hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79077364150e9401f096da60497a80960ce1bc89
| 12,938
|
py
|
Python
|
category_encoders/count.py
|
JoshuaC3/categorical-encoding
|
169aaeb26b96c264c82fd2bc7eedff75f2b91ae5
|
[
"BSD-3-Clause"
] | null | null | null |
category_encoders/count.py
|
JoshuaC3/categorical-encoding
|
169aaeb26b96c264c82fd2bc7eedff75f2b91ae5
|
[
"BSD-3-Clause"
] | null | null | null |
category_encoders/count.py
|
JoshuaC3/categorical-encoding
|
169aaeb26b96c264c82fd2bc7eedff75f2b91ae5
|
[
"BSD-3-Clause"
] | null | null | null |
"""Count Encoder"""
import numpy as np
import pandas as pd
import category_encoders.utils as util
from copy import copy
from sklearn.base import BaseEstimator, TransformerMixin
__author__ = 'joshua t. dunn'
class CountEncoder(BaseEstimator, TransformerMixin):
def __init__(self, verbose=0, cols=None, drop_invariant=False,
return_df=True, handle_unknown=None,
handle_missing='count',
min_group_size=None, combine_min_nan_groups=True,
min_group_name=None, normalize=False):
"""Count encoding for categorical features.
For a given categorical feature, replace the names of the groups
with the group counts.
Parameters
----------
verbose: int
integer indicating verbosity of output. 0 for none.
cols: list
a list of columns to encode, if None, all string and categorical columns
will be encoded.
drop_invariant: bool
boolean for whether or not to drop columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform
(otherwise it will be a numpy array).
handle_missing: str
how to handle missing values at fit time. Options are 'error', 'return_nan',
and 'count'. Default 'count', which treat NaNs as a countable category at
fit time.
handle_unknown: str, int or dict of.
how to handle unknown labels at transform time. Options are 'error'
'return_nan' and an int. Defaults to None which uses NaN behaviour
specified at fit time. Passing an int will fill with this int value.
normalize: bool or dict of.
whether to normalize the counts to the range (0, 1). See Pandas `value_counts`
for more details.
min_group_size: int, float or dict of.
the minimal count threshold of a group needed to ensure it is not
combined into a "leftovers" group. If float in the range (0, 1),
`min_group_size` is calculated as int(X.shape[0] * min_group_size).
Note: This value may change type based on the `normalize` variable. If True
this will become a float. If False, it will be an int.
min_group_name: None, str or dict of.
Set the name of the combined minimum groups when the defaults become
too long. Default None. In this case the category names will be joined
alphabetically with a `_` delimiter.
Note: The default name can be long ae may keep changing, for example,
in cross-validation.
combine_min_nan_groups: bool or dict of.
whether to combine the leftovers group with NaN group. Default True. Can
also be forced to combine with 'force' meaning small groups are effectively
counted as NaNs. Force can only used when 'handle_missing' is 'count' or 'error'.
Example
-------
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> from category_encoders import CountEncoder
>>> bunch = load_boston()
>>> y = bunch.target
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = CountEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null int64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null int64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(11), int64(2)
memory usage: 51.5 KB
None
References
----------
"""
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.verbose = verbose
self.cols = cols
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self.normalize = normalize
self.min_group_size = min_group_size
self.min_group_name = min_group_name
self.combine_min_nan_groups = combine_min_nan_groups
self._min_group_categories = {}
self._normalize = {}
self._min_group_name = {}
self._combine_min_nan_groups = {}
self._min_group_size = {}
self._handle_unknown = {}
self._handle_missing = {}
def fit(self, X, y=None, **kwargs):
"""Fit encoder according to X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# first check the type
X = util.convert_input(X)
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.cols is None:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
self._check_set_create_dict_attrs()
self._fit_count_encode(X, y)
if self.drop_invariant:
self.drop_cols = []
X_temp = self.transform(X)
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [
x for x in generated_cols if X_temp[x].var() <= 10e-5
]
return self
def transform(self, X, y=None):
"""Perform the transformation to new categorical data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples]
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self._dim is None:
raise ValueError(
'Must train encoder before it can be used to transform data.'
)
# first check the type
X = util.convert_input(X)
# then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError(
'Unexpected input dimension %d, expected %d'
% (X.shape[1], self._dim,)
)
if not self.cols:
return X
X, _ = self._transform_count_encode(X, y)
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df:
return X
else:
return X.values
def _fit_count_encode(self, X_in, y):
"""Perform the count encoding."""
X = X_in.copy(deep=True)
if self.cols is None:
self.cols = X.columns.values
self.mapping = {}
for col in self.cols:
if X[col].isna().any():
if self._handle_missing[col] == 'error':
raise ValueError(
'Missing data found in column %s at fit time.'
% (col,)
)
elif self._handle_missing[col] not in ['count', 'return_nan', 'error']:
raise ValueError(
'%s key in `handle_missing` should be one of: '
' `value`, `return_nan` and `error`.'
% (col,)
)
self.mapping[col] = X[col].value_counts(
normalize=self._normalize[col],
dropna=False
)
if self._handle_missing[col] == 'return_nan':
self.mapping[col][np.NaN] = np.NaN
if any([val is not None for val in self._min_group_size.values()]):
self.combine_min_categories(X)
def _transform_count_encode(self, X_in, y):
"""Perform the transform count encoding."""
X = X_in.copy(deep=True)
for col in self.cols:
if self._min_group_size is not None:
if col in self._min_group_categories.keys():
X[col] = (
X[col].map(self._min_group_categories[col])
.fillna(X[col])
)
X[col] = X[col].map(self.mapping[col])
if isinstance(self._handle_unknown[col], np.integer):
X[col] = X[col].fillna(self._handle_unknown[col])
elif (
self._handle_unknown[col] == 'error'
and X[col].isna().any()
):
raise ValueError(
'Missing data found in column %s at transform time.'
% (col,)
)
return X, self.mapping
def combine_min_categories(self, X):
"""Combine small categories into a single category."""
for col, mapper in self.mapping.items():
if self._normalize[col] and isinstance(self._min_group_size[col], int):
self._min_group_size[col] = self._min_group_size[col] / X.shape[0]
elif not self._normalize and isinstance(self._min_group_size[col], float):
self._min_group_size[col] = self._min_group_size[col] * X.shape[0]
if self._combine_min_nan_groups[col] is True:
min_groups_idx = mapper < self._min_group_size[col]
elif self._combine_min_nan_groups[col] == 'force':
min_groups_idx = (
(mapper < self._min_group_size[col])
| (mapper.index.isna())
)
else:
min_groups_idx = (
(mapper < self._min_group_size[col])
& (~mapper.index.isna())
)
min_groups_sum = mapper.loc[min_groups_idx].sum()
if min_groups_sum > 0 and (min_groups_idx).sum() > 1:
if isinstance(self._min_group_name[col], str):
min_group_mapper_name = self._min_group_name
else:
min_group_mapper_name = '_'.join([
str(idx)
for idx
in mapper.loc[min_groups_idx].index.astype(str).sort_values()
])
self._min_group_categories[col] = {
cat: min_group_mapper_name
for cat
in mapper.loc[min_groups_idx].index.tolist()
}
if not min_groups_idx.all():
mapper = mapper.loc[~min_groups_idx]
if mapper.index.is_categorical():
mapper.index = mapper.index.add_categories(
min_group_mapper_name
)
mapper[min_group_mapper_name] = min_groups_sum
self.mapping[col] = mapper
def _check_set_create_dict_attrs(self):
"""Check attributes that can be dicts and format for all self.cols."""
dict_attrs = {
'normalize': False,
'min_group_name': None,
'combine_min_nan_groups': True,
'min_group_size': None,
'handle_unknown': 'value',
'handle_missing': 'value',
}
for attr_name, attr_default in dict_attrs.items():
attr = copy(getattr(self, attr_name))
if isinstance(attr, dict):
for col in self.cols:
if col not in attr:
attr[col] = attr_default
setattr(self, '_' + attr_name, attr)
else:
attr_dict = {}
for col in self.cols:
attr_dict[col] = attr
setattr(self, '_' + attr_name, attr_dict)
for col in self.cols:
if (
self._handle_missing[col] == 'return_nan'
and self._combine_min_nan_groups[col] == 'force'
):
raise ValueError(
"Cannot have `handle_missing` == 'return_nan' and "
"'combine_min_nan_groups' == 'force' for columns `%s`."
% (col,)
)
| 36.139665
| 93
| 0.541738
|
import numpy as np
import pandas as pd
import category_encoders.utils as util
from copy import copy
from sklearn.base import BaseEstimator, TransformerMixin
__author__ = 'joshua t. dunn'
class CountEncoder(BaseEstimator, TransformerMixin):
def __init__(self, verbose=0, cols=None, drop_invariant=False,
return_df=True, handle_unknown=None,
handle_missing='count',
min_group_size=None, combine_min_nan_groups=True,
min_group_name=None, normalize=False):
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.verbose = verbose
self.cols = cols
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self.normalize = normalize
self.min_group_size = min_group_size
self.min_group_name = min_group_name
self.combine_min_nan_groups = combine_min_nan_groups
self._min_group_categories = {}
self._normalize = {}
self._min_group_name = {}
self._combine_min_nan_groups = {}
self._min_group_size = {}
self._handle_unknown = {}
self._handle_missing = {}
def fit(self, X, y=None, **kwargs):
X = util.convert_input(X)
self._dim = X.shape[1]
if self.cols is None:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
self._check_set_create_dict_attrs()
self._fit_count_encode(X, y)
if self.drop_invariant:
self.drop_cols = []
X_temp = self.transform(X)
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [
x for x in generated_cols if X_temp[x].var() <= 10e-5
]
return self
def transform(self, X, y=None):
if self._dim is None:
raise ValueError(
'Must train encoder before it can be used to transform data.'
)
# first check the type
X = util.convert_input(X)
# then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError(
'Unexpected input dimension %d, expected %d'
% (X.shape[1], self._dim,)
)
if not self.cols:
return X
X, _ = self._transform_count_encode(X, y)
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df:
return X
else:
return X.values
def _fit_count_encode(self, X_in, y):
X = X_in.copy(deep=True)
if self.cols is None:
self.cols = X.columns.values
self.mapping = {}
for col in self.cols:
if X[col].isna().any():
if self._handle_missing[col] == 'error':
raise ValueError(
'Missing data found in column %s at fit time.'
% (col,)
)
elif self._handle_missing[col] not in ['count', 'return_nan', 'error']:
raise ValueError(
'%s key in `handle_missing` should be one of: '
' `value`, `return_nan` and `error`.'
% (col,)
)
self.mapping[col] = X[col].value_counts(
normalize=self._normalize[col],
dropna=False
)
if self._handle_missing[col] == 'return_nan':
self.mapping[col][np.NaN] = np.NaN
if any([val is not None for val in self._min_group_size.values()]):
self.combine_min_categories(X)
def _transform_count_encode(self, X_in, y):
X = X_in.copy(deep=True)
for col in self.cols:
if self._min_group_size is not None:
if col in self._min_group_categories.keys():
X[col] = (
X[col].map(self._min_group_categories[col])
.fillna(X[col])
)
X[col] = X[col].map(self.mapping[col])
if isinstance(self._handle_unknown[col], np.integer):
X[col] = X[col].fillna(self._handle_unknown[col])
elif (
self._handle_unknown[col] == 'error'
and X[col].isna().any()
):
raise ValueError(
'Missing data found in column %s at transform time.'
% (col,)
)
return X, self.mapping
def combine_min_categories(self, X):
for col, mapper in self.mapping.items():
if self._normalize[col] and isinstance(self._min_group_size[col], int):
self._min_group_size[col] = self._min_group_size[col] / X.shape[0]
elif not self._normalize and isinstance(self._min_group_size[col], float):
self._min_group_size[col] = self._min_group_size[col] * X.shape[0]
if self._combine_min_nan_groups[col] is True:
min_groups_idx = mapper < self._min_group_size[col]
elif self._combine_min_nan_groups[col] == 'force':
min_groups_idx = (
(mapper < self._min_group_size[col])
| (mapper.index.isna())
)
else:
min_groups_idx = (
(mapper < self._min_group_size[col])
& (~mapper.index.isna())
)
min_groups_sum = mapper.loc[min_groups_idx].sum()
if min_groups_sum > 0 and (min_groups_idx).sum() > 1:
if isinstance(self._min_group_name[col], str):
min_group_mapper_name = self._min_group_name
else:
min_group_mapper_name = '_'.join([
str(idx)
for idx
in mapper.loc[min_groups_idx].index.astype(str).sort_values()
])
self._min_group_categories[col] = {
cat: min_group_mapper_name
for cat
in mapper.loc[min_groups_idx].index.tolist()
}
if not min_groups_idx.all():
mapper = mapper.loc[~min_groups_idx]
if mapper.index.is_categorical():
mapper.index = mapper.index.add_categories(
min_group_mapper_name
)
mapper[min_group_mapper_name] = min_groups_sum
self.mapping[col] = mapper
def _check_set_create_dict_attrs(self):
dict_attrs = {
'normalize': False,
'min_group_name': None,
'combine_min_nan_groups': True,
'min_group_size': None,
'handle_unknown': 'value',
'handle_missing': 'value',
}
for attr_name, attr_default in dict_attrs.items():
attr = copy(getattr(self, attr_name))
if isinstance(attr, dict):
for col in self.cols:
if col not in attr:
attr[col] = attr_default
setattr(self, '_' + attr_name, attr)
else:
attr_dict = {}
for col in self.cols:
attr_dict[col] = attr
setattr(self, '_' + attr_name, attr_dict)
for col in self.cols:
if (
self._handle_missing[col] == 'return_nan'
and self._combine_min_nan_groups[col] == 'force'
):
raise ValueError(
"Cannot have `handle_missing` == 'return_nan' and "
"'combine_min_nan_groups' == 'force' for columns `%s`."
% (col,)
)
| true
| true
|
790774071b5e09178748078702b5453337cd49f7
| 1,664
|
py
|
Python
|
P5/Brasilia/Q7 - BR.py
|
Boa-Thomas/Eletricidade
|
7cbd62f2d56cbb1430ed0b8818ffc878b480b3c1
|
[
"MIT"
] | null | null | null |
P5/Brasilia/Q7 - BR.py
|
Boa-Thomas/Eletricidade
|
7cbd62f2d56cbb1430ed0b8818ffc878b480b3c1
|
[
"MIT"
] | null | null | null |
P5/Brasilia/Q7 - BR.py
|
Boa-Thomas/Eletricidade
|
7cbd62f2d56cbb1430ed0b8818ffc878b480b3c1
|
[
"MIT"
] | 2
|
2022-02-16T00:08:07.000Z
|
2022-03-07T13:43:37.000Z
|
import cmath
import math
cv =150
cvconv = 736
t1 =440
t2 = 254
polos = 10
freq = 60
r1 = 0.012
R2L = 0.018
X1 = 0.08
X2L = X1
Rp = 58
Xm = 54
print("\nConsidere que o motor é alimentado com tensão de fase igual a 254 V, conexão Y e atinge escorregamento igual a 1,8%")
print("\nA - Corrente no estator\n")
s = 0.018
print("R2L_s = ", R2L/s, "Ohm")
print("(1-s)*(R2L_s) = ", (1-s)*(R2L/s), "Ohm")
Z1 = r1+complex(0,X1)
print("Z1 = ", Z1, "Ohm")
Z2 = R2L/s+complex(0,X2L)
print("Z2 = ", Z2, "Ohm")
Zn = Rp*complex(0,Xm)/complex(Rp,Xm)
print("Zn = ", Zn, "Ohm")
Zeq1 = Zn*Z2/(Zn+Z2)
print("Zeq1 = ", Zeq1, "Ohm")
Zeq2 = Z1+Zeq1
print("Zeq2 = ", Zeq2, "Ohm")
I1 = t2/Zeq2
print("I1 = ", I1, "A")
I1p = cmath.polar(I1)
print("\nB - Fator de pontecia\n")
FP = cmath.cos(I1p[1])
FPreal = round(FP.real,5)
print("FP = ", FPreal)
print("\nC - Potencia de entrada\n")
Pe = t2*I1p[0]*cmath.cos(I1p[1])
pereal = round(Pe.real,3)
print("Pe = ", pereal, "W")
Pe3 = 3*pereal
print("Pe3 = ", Pe3, "W")
print("\nD - Corrente no rotor\n")
E1 = t2-Z1*I1
E1p = cmath.polar(E1)
print("E1 = ", E1p, "V")
I2L = E1/Z2
I2Lp = cmath.polar(I2L)
print("I2L = ", I2Lp, "A")
print("\nE - Potencia na carga\n")
#professor ultiliza dados polares
Ps = ((R2L*(1-s))/s)*I2Lp[0]*I2Lp[0]
print("Ps = ", Ps, "W")
Ps3 = 3*Ps
print("Ps3 = ", Ps3, "W")
print("\nF - Velocidade do eixo\n")
ns = 120*freq/polos
print("ns = ", ns, "rpm")
n = (1-s)*ns
print("n = ", n, "rpm")
w = 2*math.pi*n/60
w = round(w,3)
print("w = ", w, "rad/s")
print("\nG - Torque na carga\n")
t = Ps3/w
print("t = ", t, "Nm")
print("\nH - Rendimento do motor\n")
eni = Ps3/Pe3*100
print("eni = ", eni, "%")
| 17.333333
| 126
| 0.582933
|
import cmath
import math
cv =150
cvconv = 736
t1 =440
t2 = 254
polos = 10
freq = 60
r1 = 0.012
R2L = 0.018
X1 = 0.08
X2L = X1
Rp = 58
Xm = 54
print("\nConsidere que o motor é alimentado com tensão de fase igual a 254 V, conexão Y e atinge escorregamento igual a 1,8%")
print("\nA - Corrente no estator\n")
s = 0.018
print("R2L_s = ", R2L/s, "Ohm")
print("(1-s)*(R2L_s) = ", (1-s)*(R2L/s), "Ohm")
Z1 = r1+complex(0,X1)
print("Z1 = ", Z1, "Ohm")
Z2 = R2L/s+complex(0,X2L)
print("Z2 = ", Z2, "Ohm")
Zn = Rp*complex(0,Xm)/complex(Rp,Xm)
print("Zn = ", Zn, "Ohm")
Zeq1 = Zn*Z2/(Zn+Z2)
print("Zeq1 = ", Zeq1, "Ohm")
Zeq2 = Z1+Zeq1
print("Zeq2 = ", Zeq2, "Ohm")
I1 = t2/Zeq2
print("I1 = ", I1, "A")
I1p = cmath.polar(I1)
print("\nB - Fator de pontecia\n")
FP = cmath.cos(I1p[1])
FPreal = round(FP.real,5)
print("FP = ", FPreal)
print("\nC - Potencia de entrada\n")
Pe = t2*I1p[0]*cmath.cos(I1p[1])
pereal = round(Pe.real,3)
print("Pe = ", pereal, "W")
Pe3 = 3*pereal
print("Pe3 = ", Pe3, "W")
print("\nD - Corrente no rotor\n")
E1 = t2-Z1*I1
E1p = cmath.polar(E1)
print("E1 = ", E1p, "V")
I2L = E1/Z2
I2Lp = cmath.polar(I2L)
print("I2L = ", I2Lp, "A")
print("\nE - Potencia na carga\n")
Ps = ((R2L*(1-s))/s)*I2Lp[0]*I2Lp[0]
print("Ps = ", Ps, "W")
Ps3 = 3*Ps
print("Ps3 = ", Ps3, "W")
print("\nF - Velocidade do eixo\n")
ns = 120*freq/polos
print("ns = ", ns, "rpm")
n = (1-s)*ns
print("n = ", n, "rpm")
w = 2*math.pi*n/60
w = round(w,3)
print("w = ", w, "rad/s")
print("\nG - Torque na carga\n")
t = Ps3/w
print("t = ", t, "Nm")
print("\nH - Rendimento do motor\n")
eni = Ps3/Pe3*100
print("eni = ", eni, "%")
| true
| true
|
790776b7ac11feed73af6aae9673f127ed003258
| 1,113
|
py
|
Python
|
examples/hacker_news/setup.py
|
kbd/dagster
|
14affaf1372fcb5169e6c2d5d53621eeed954767
|
[
"Apache-2.0"
] | null | null | null |
examples/hacker_news/setup.py
|
kbd/dagster
|
14affaf1372fcb5169e6c2d5d53621eeed954767
|
[
"Apache-2.0"
] | null | null | null |
examples/hacker_news/setup.py
|
kbd/dagster
|
14affaf1372fcb5169e6c2d5d53621eeed954767
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name="hacker_news",
version="dev",
author="Elementl",
author_email="hello@elementl.com",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["test"]),
package_data={"hacker_news": ["hacker_news_dbt/*"]},
install_requires=[
"aiobotocore==1.3.3",
"dagster",
"dagster-aws",
"dagster-dbt",
"dagster-pandas",
"dagster-pyspark",
"dagster-slack",
"dagster-postgres",
"dagstermill",
"dbt>=0.19.0",
"mock",
# DataFrames were not written to Snowflake, causing errors
"pandas<1.4.0",
"pyarrow>=4.0.0",
"pyspark",
"requests",
"fsspec",
"s3fs",
"scipy",
"sklearn",
"snowflake-sqlalchemy",
"matplotlib",
],
extras_require={"tests": ["mypy", "pylint", "pytest"]},
)
| 26.5
| 66
| 0.539982
|
from setuptools import find_packages, setup
setup(
name="hacker_news",
version="dev",
author="Elementl",
author_email="hello@elementl.com",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["test"]),
package_data={"hacker_news": ["hacker_news_dbt/*"]},
install_requires=[
"aiobotocore==1.3.3",
"dagster",
"dagster-aws",
"dagster-dbt",
"dagster-pandas",
"dagster-pyspark",
"dagster-slack",
"dagster-postgres",
"dagstermill",
"dbt>=0.19.0",
"mock",
"pandas<1.4.0",
"pyarrow>=4.0.0",
"pyspark",
"requests",
"fsspec",
"s3fs",
"scipy",
"sklearn",
"snowflake-sqlalchemy",
"matplotlib",
],
extras_require={"tests": ["mypy", "pylint", "pytest"]},
)
| true
| true
|
790776caca4bebab86893c741e794ed61cf7a24c
| 16,245
|
py
|
Python
|
bmtk/utils/reports/spike_trains/plotting.py
|
mjhyman/bmtk
|
42dcce944fe8ff8cab02b19d2d983f73a8cbc0d1
|
[
"BSD-3-Clause"
] | 1
|
2020-08-12T23:02:05.000Z
|
2020-08-12T23:02:05.000Z
|
bmtk/utils/reports/spike_trains/plotting.py
|
mjhyman/bmtk
|
42dcce944fe8ff8cab02b19d2d983f73a8cbc0d1
|
[
"BSD-3-Clause"
] | null | null | null |
bmtk/utils/reports/spike_trains/plotting.py
|
mjhyman/bmtk
|
42dcce944fe8ff8cab02b19d2d983f73a8cbc0d1
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2020. Allen Institute. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
import six
import matplotlib.pyplot as plt
import types
import copy
from functools import partial
from .spike_trains import SpikeTrains
from .spike_trains_api import SpikeTrainsAPI
def __get_spike_trains(spike_trains):
"""Make sure SpikeTrainsAPI object is always returned"""
if isinstance(spike_trains, six.string_types):
# Load spikes from file
return SpikeTrains.load(spike_trains)
elif isinstance(spike_trains, (SpikeTrains, SpikeTrainsAPI)):
return spike_trains
raise AttributeError('Could not parse spiketrains. Pass in file-path, SpikeTrains object, or list of the previous')
def __get_population(spike_trains, population):
"""Helper function to figure out which population of nodes to use."""
pops = spike_trains.populations
if population is None:
# If only one population exists in spikes object/file select that one
if len(pops) > 1:
raise Exception('SpikeTrains contains more than one population of nodes. Use "population" parameter '
'to specify population to display.')
else:
return pops[0]
elif population not in pops:
raise Exception('Could not find node population "{}" in SpikeTrains, only found {}'.format(population, pops))
else:
return population
def __get_node_groups(spike_trains, node_groups, population):
"""Helper function for parsing the 'node_groups' params"""
if node_groups is None:
# If none are specified by user make a 'node_group' consisting of all nodes
selected_nodes = spike_trains.node_ids(population=population)
return [{'node_ids': selected_nodes, 'c': 'b'}], selected_nodes
else:
# Fetch all node_ids which can be used to filter the data.
node_groups = copy.deepcopy(node_groups) # Make a copy since later we may be altering the dictionary
selected_nodes = np.array(node_groups[0]['node_ids'])
for grp in node_groups[1:]:
if 'node_ids' not in grp:
raise AttributeError('Could not find "node_ids" key in node_groups parameter.')
selected_nodes = np.concatenate((selected_nodes, np.array(grp['node_ids'])))
return node_groups, selected_nodes
def plot_raster(spike_trains, with_histogram=True, population=None, node_groups=None, times=None, title=None,
show=True, save_as=None):
"""will create a raster plot (plus optional histogram) from a SpikeTrains object or SONATA Spike-Trains file. Will
return the figure
By default will display all nodes, if you want to only display a subset of nodes and/or group together different
nodes (by node_id) by dot colors and labels then you can use the node_groups, which should be a list of dicts::
plot_raster('/path/to/my/spike.h5',
node_groups=[{'node_ids': range(0, 70), 'c': 'b', 'label': 'pyr'}, # first 70 nodes are blue pyr cells
{'node_ids': range(70, 100), 'c': 'r', 'label': 'inh'}]) # last 30 nodes are red inh cells
The histogram will not be grouped.
:param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.
:param with_histogram: If True the a histogram will be shown as a small subplot below the scatter plot. Default
True.
:param population: string. If a spikes-file contains more than one population of nodes, use this to determine which
nodes to actually plot. If only one population exists and population=None then the function will find it by
default.
:param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should
be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for
label and color. If None all nodes will be labeled and colored the same.
:param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking
data.
:param title: str, Use to add a title. Default no tile
:param show: bool to display or not display plot. default True.
:param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
save plot.
:return: matplotlib figure.Figure object
"""
spike_trains = __get_spike_trains(spike_trains=spike_trains)
pop = __get_population(spike_trains=spike_trains, population=population)
node_groups, selected_ids = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop)
# Only show a legend if one of the node_groups have an explicit label, otherwise matplotlib will show an empty
# legend box which looks bad
show_legend = False
# Situation where if the last (or first) M nodes don't spike matplotlib will cut off the y range, but it should
# show these as empty rows. To do this need to keep track of range of all node_ids
min_id, max_id = np.inf, -1
spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False)
spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)]
if times is not None:
min_ts, max_ts = times[0], times[1]
spikes_df = spikes_df[(spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1])]
else:
min_ts = np.min(spikes_df['timestamps'])
max_ts = np.max(spikes_df['timestamps'])
# Used to determine
if with_histogram:
fig, axes = plt.subplots(2, 1, gridspec_kw={'height_ratios': [7, 1]}, squeeze=True)
raster_axes = axes[0]
bottom_axes = hist_axes = axes[1]
else:
fig, axes = plt.subplots(1, 1)
bottom_axes = raster_axes = axes
hist_axes = None
for node_grp in node_groups:
grp_ids = node_grp.pop('node_ids')
grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)]
# If label exists for at-least one group we want to show
show_legend = show_legend or 'label' in node_grp
# Finds min/max node_id for all node groups
min_id = np.min([np.min(grp_ids), min_id])
max_id = np.max([np.max(grp_ids), max_id])
raster_axes.scatter(grp_spikes['timestamps'], grp_spikes['node_ids'], lw=0, s=8, **node_grp)
if show_legend:
raster_axes.legend(loc='upper right')
if title:
raster_axes.set_title(title)
raster_axes.set_ylabel('node_ids')
raster_axes.set_ylim(min_id - 0.5, max_id + 1) # add buffering to range else the rows at the ends look cut-off.
raster_axes.set_xlim(min_ts, max_ts + 1)
bottom_axes.set_xlabel('timestamps ({})'.format(spike_trains.units(population=pop)))
if with_histogram:
# Add a histogram if necessarry
hist_axes.hist(spikes_df['timestamps'], 100)
hist_axes.set_xlim(min_ts - 0.5, max_ts + 1)
hist_axes.axes.get_yaxis().set_visible(False)
raster_axes.set_xticks([])
if save_as:
plt.savefig(save_as)
if show:
plt.show()
return fig
def moving_average(data, window_size=10):
h = int(window_size / 2)
x_max = len(data)
return [np.mean(data[max(0, x - h):min(x_max, x + h)]) for x in range(0, x_max)]
def plot_rates(spike_trains, population=None, node_groups=None, times=None, smoothing=False,
smoothing_params=None, title=None, show=True, save_as=None):
"""Calculate and plot the rates of each node in a SpikeTrains object or SONATA Spike-Trains file. If start and stop
times are not specified from the "times" parameter, will try to parse values from the timestamps data.
If you want to only display a subset of nodes and/or group together different nodes (by node_id) by dot colors and
labels then you can use the node_groups, which should be a list of dicts::
plot_rates('/path/to/my/spike.h5',
node_groups=[{'node_ids': range(0, 70), 'c': 'b', 'label': 'pyr'},
{'node_ids': range(70, 100), 'c': 'r', 'label': 'inh'}])
:param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.
:param population: string. If a spikes-file contains more than one population of nodes, use this to determine which
nodes to actually plot. If only one population exists and population=None then the function will find it by
default.
:param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should
be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for
label and color. If None all nodes will be labeled and colored the same.
:param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking
data.
:param smoothing: Bool or function. Used to smooth the data. By default (False) no smoothing will be done. If True
will using a moving average smoothing function. Or use a function pointer.
:param smoothing_params: dict, parameters when using a function pointer smoothing value.
:param title: str, Use to add a title. Default no tile
:param show: bool to display or not display plot. default True.
:param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
save plot.
:return: matplotlib figure.Figure object
"""
spike_trains = __get_spike_trains(spike_trains=spike_trains)
pop = __get_population(spike_trains=spike_trains, population=population)
node_groups, selected_ids = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop)
# Determine if smoothing will be applied to the data
smoothing_params = smoothing_params or {} # pass in empty parameters
if isinstance(smoothing, types.FunctionType):
smoothing_fnc = partial(smoothing, **smoothing_params)
elif smoothing:
smoothing_fnc = partial(moving_average, **smoothing_params)
else:
smoothing_fnc = lambda d: d # Use a filler function that won't do anything
# get data
spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False)
spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)]
if times is not None:
recording_interval = times[1] - times[0]
spikes_df = spikes_df[(spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1])]
else:
recording_interval = np.max(spikes_df['timestamps']) - np.min(spikes_df['timestamps'])
# Iterate through each group of nodes and add to the same plot
fig, axes = plt.subplots()
show_legend = False # Only show labels if one of the node group has label value
for node_grp in node_groups:
show_legend = show_legend or 'label' in node_grp # If label exists for at-least one group we want to show
grp_ids = node_grp.pop('node_ids')
grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)]
spike_rates = grp_spikes.groupby('node_ids').size() / (recording_interval / 1000.0)
axes.plot(np.array(spike_rates.index), smoothing_fnc(spike_rates), '.', **node_grp)
axes.set_ylabel('Firing Rates (Hz)')
axes.set_xlabel('node_ids')
if show_legend:
axes.legend() # loc='upper right')
if title:
axes.set_title(title)
if save_as:
plt.savefig(save_as)
if show:
plt.show()
return fig
def plot_rates_boxplot(spike_trains, population=None, node_groups=None, times=None, title=None, show=True,
save_as=None):
"""Creates a box plot of the firing rates taken from a SpikeTrains object or SONATA Spike-Trains file. If start
and stop times are not specified from the "times" parameter, will try to parse values from the timestamps data.
By default will plot all nodes together. To only display a subset of the nodes and/or create groups of nodes use
the node_groups options::
plot_rates_boxplot(
'/path/to/my/spike.h5',
node_groups=[{'node_ids': range(0, 70), 'label': 'pyr'},
{'node_ids': range(70, 100), 'label': 'inh'}]
)
:param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.
:param population: string. If a spikes-file contains more than one population of nodes, use this to determine which
nodes to actually plot. If only one population exists and population=None then the function will find it by
default.
:param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should
be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for
label and color. If None all nodes will be labeled and colored the same.
:param title: str, Use to add a title. Default no tile
:param show: bool to display or not display plot. default True.
:param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
save plot.
:return: matplotlib figure.Figure object
"""
spike_trains = __get_spike_trains(spike_trains=spike_trains)
pop = __get_population(spike_trains=spike_trains, population=population)
node_groups, selected_ids = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop)
spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False)
spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)]
if times is not None:
recording_interval = times[1] - times[0]
spikes_df = spikes_df[(spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1])]
else:
recording_interval = np.max(spikes_df['timestamps']) - np.min(spikes_df['timestamps'])
fig, axes = plt.subplots()
rates_data = []
rates_labels = []
if len(node_groups) == 1 and 'label' not in node_groups[0]:
node_groups[0]['label'] = 'All Nodes'
for i, node_grp in enumerate(node_groups):
rates_labels.append(node_grp.get('label', 'Node Group {}'.format(i)))
grp_ids = node_grp.pop('node_ids')
grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)]
spike_rates = grp_spikes.groupby('node_ids').size() / (recording_interval / 1000.0)
rates_data.append(spike_rates)
axes.boxplot(rates_data)
axes.set_ylabel('Firing Rates (Hz)')
axes.set_xticklabels(rates_labels)
if title:
axes.set_title(title)
if save_as:
plt.savefig(save_as)
if show:
plt.show()
return fig
| 47.639296
| 120
| 0.697999
|
import numpy as np
import six
import matplotlib.pyplot as plt
import types
import copy
from functools import partial
from .spike_trains import SpikeTrains
from .spike_trains_api import SpikeTrainsAPI
def __get_spike_trains(spike_trains):
if isinstance(spike_trains, six.string_types):
return SpikeTrains.load(spike_trains)
elif isinstance(spike_trains, (SpikeTrains, SpikeTrainsAPI)):
return spike_trains
raise AttributeError('Could not parse spiketrains. Pass in file-path, SpikeTrains object, or list of the previous')
def __get_population(spike_trains, population):
pops = spike_trains.populations
if population is None:
if len(pops) > 1:
raise Exception('SpikeTrains contains more than one population of nodes. Use "population" parameter '
'to specify population to display.')
else:
return pops[0]
elif population not in pops:
raise Exception('Could not find node population "{}" in SpikeTrains, only found {}'.format(population, pops))
else:
return population
def __get_node_groups(spike_trains, node_groups, population):
if node_groups is None:
selected_nodes = spike_trains.node_ids(population=population)
return [{'node_ids': selected_nodes, 'c': 'b'}], selected_nodes
else:
node_groups = copy.deepcopy(node_groups)
selected_nodes = np.array(node_groups[0]['node_ids'])
for grp in node_groups[1:]:
if 'node_ids' not in grp:
raise AttributeError('Could not find "node_ids" key in node_groups parameter.')
selected_nodes = np.concatenate((selected_nodes, np.array(grp['node_ids'])))
return node_groups, selected_nodes
def plot_raster(spike_trains, with_histogram=True, population=None, node_groups=None, times=None, title=None,
show=True, save_as=None):
spike_trains = __get_spike_trains(spike_trains=spike_trains)
pop = __get_population(spike_trains=spike_trains, population=population)
node_groups, selected_ids = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop)
show_legend = False
# show these as empty rows. To do this need to keep track of range of all node_ids
min_id, max_id = np.inf, -1
spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False)
spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)]
if times is not None:
min_ts, max_ts = times[0], times[1]
spikes_df = spikes_df[(spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1])]
else:
min_ts = np.min(spikes_df['timestamps'])
max_ts = np.max(spikes_df['timestamps'])
# Used to determine
if with_histogram:
fig, axes = plt.subplots(2, 1, gridspec_kw={'height_ratios': [7, 1]}, squeeze=True)
raster_axes = axes[0]
bottom_axes = hist_axes = axes[1]
else:
fig, axes = plt.subplots(1, 1)
bottom_axes = raster_axes = axes
hist_axes = None
for node_grp in node_groups:
grp_ids = node_grp.pop('node_ids')
grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)]
# If label exists for at-least one group we want to show
show_legend = show_legend or 'label' in node_grp
# Finds min/max node_id for all node groups
min_id = np.min([np.min(grp_ids), min_id])
max_id = np.max([np.max(grp_ids), max_id])
raster_axes.scatter(grp_spikes['timestamps'], grp_spikes['node_ids'], lw=0, s=8, **node_grp)
if show_legend:
raster_axes.legend(loc='upper right')
if title:
raster_axes.set_title(title)
raster_axes.set_ylabel('node_ids')
raster_axes.set_ylim(min_id - 0.5, max_id + 1) # add buffering to range else the rows at the ends look cut-off.
raster_axes.set_xlim(min_ts, max_ts + 1)
bottom_axes.set_xlabel('timestamps ({})'.format(spike_trains.units(population=pop)))
if with_histogram:
# Add a histogram if necessarry
hist_axes.hist(spikes_df['timestamps'], 100)
hist_axes.set_xlim(min_ts - 0.5, max_ts + 1)
hist_axes.axes.get_yaxis().set_visible(False)
raster_axes.set_xticks([])
if save_as:
plt.savefig(save_as)
if show:
plt.show()
return fig
def moving_average(data, window_size=10):
h = int(window_size / 2)
x_max = len(data)
return [np.mean(data[max(0, x - h):min(x_max, x + h)]) for x in range(0, x_max)]
def plot_rates(spike_trains, population=None, node_groups=None, times=None, smoothing=False,
smoothing_params=None, title=None, show=True, save_as=None):
spike_trains = __get_spike_trains(spike_trains=spike_trains)
pop = __get_population(spike_trains=spike_trains, population=population)
node_groups, selected_ids = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop)
# Determine if smoothing will be applied to the data
smoothing_params = smoothing_params or {} # pass in empty parameters
if isinstance(smoothing, types.FunctionType):
smoothing_fnc = partial(smoothing, **smoothing_params)
elif smoothing:
smoothing_fnc = partial(moving_average, **smoothing_params)
else:
smoothing_fnc = lambda d: d # Use a filler function that won't do anything
spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False)
spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)]
if times is not None:
recording_interval = times[1] - times[0]
spikes_df = spikes_df[(spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1])]
else:
recording_interval = np.max(spikes_df['timestamps']) - np.min(spikes_df['timestamps'])
fig, axes = plt.subplots()
show_legend = False
for node_grp in node_groups:
show_legend = show_legend or 'label' in node_grp
grp_ids = node_grp.pop('node_ids')
grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)]
spike_rates = grp_spikes.groupby('node_ids').size() / (recording_interval / 1000.0)
axes.plot(np.array(spike_rates.index), smoothing_fnc(spike_rates), '.', **node_grp)
axes.set_ylabel('Firing Rates (Hz)')
axes.set_xlabel('node_ids')
if show_legend:
axes.legend()
if title:
axes.set_title(title)
if save_as:
plt.savefig(save_as)
if show:
plt.show()
return fig
def plot_rates_boxplot(spike_trains, population=None, node_groups=None, times=None, title=None, show=True,
save_as=None):
spike_trains = __get_spike_trains(spike_trains=spike_trains)
pop = __get_population(spike_trains=spike_trains, population=population)
node_groups, selected_ids = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop)
spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False)
spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)]
if times is not None:
recording_interval = times[1] - times[0]
spikes_df = spikes_df[(spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1])]
else:
recording_interval = np.max(spikes_df['timestamps']) - np.min(spikes_df['timestamps'])
fig, axes = plt.subplots()
rates_data = []
rates_labels = []
if len(node_groups) == 1 and 'label' not in node_groups[0]:
node_groups[0]['label'] = 'All Nodes'
for i, node_grp in enumerate(node_groups):
rates_labels.append(node_grp.get('label', 'Node Group {}'.format(i)))
grp_ids = node_grp.pop('node_ids')
grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)]
spike_rates = grp_spikes.groupby('node_ids').size() / (recording_interval / 1000.0)
rates_data.append(spike_rates)
axes.boxplot(rates_data)
axes.set_ylabel('Firing Rates (Hz)')
axes.set_xticklabels(rates_labels)
if title:
axes.set_title(title)
if save_as:
plt.savefig(save_as)
if show:
plt.show()
return fig
| true
| true
|
7907797d313be675a9fff120b60dc370985e5f44
| 1,662
|
py
|
Python
|
tests/search_filter/test_search_filter_service.py
|
ymatsiuk/dispatch
|
cfc0b238f980d9f8140294dd50a5527ca4e1cdb8
|
[
"Apache-2.0"
] | null | null | null |
tests/search_filter/test_search_filter_service.py
|
ymatsiuk/dispatch
|
cfc0b238f980d9f8140294dd50a5527ca4e1cdb8
|
[
"Apache-2.0"
] | null | null | null |
tests/search_filter/test_search_filter_service.py
|
ymatsiuk/dispatch
|
cfc0b238f980d9f8140294dd50a5527ca4e1cdb8
|
[
"Apache-2.0"
] | null | null | null |
import pytest
def test_get(session, search_filter):
from dispatch.search_filter.service import get
t_search_filter = get(db_session=session, search_filter_id=search_filter.id)
assert t_search_filter.id == search_filter.id
def test_get_all(session, search_filters):
from dispatch.search_filter.service import get_all
t_search_filters = get_all(db_session=session).all()
assert len(t_search_filters) > 1
def test_create(session, project):
from dispatch.search_filter.service import create
from dispatch.search_filter.models import SearchFilterCreate
name = "name"
description = "description"
expression = [{}]
type = "type"
search_filter_in = SearchFilterCreate(
name=name,
description=description,
expression=expression,
type=type,
project=project,
)
search_filter = create(db_session=session, search_filter_in=search_filter_in)
assert search_filter
@pytest.mark.skip
def test_update(session, search_filter):
from dispatch.search_filter.service import update
from dispatch.search_filter.models import SearchFilterUpdate
name = "Updated name"
search_filter_in = SearchFilterUpdate(
name=name,
)
search_filter = update(
db_session=session,
search_filter=search_filter,
search_filter_in=search_filter_in,
)
assert search_filter.name == name
def test_delete(session, search_filter):
from dispatch.search_filter.service import delete, get
delete(db_session=session, search_filter_id=search_filter.id)
assert not get(db_session=session, search_filter_id=search_filter.id)
| 27.245902
| 81
| 0.737665
|
import pytest
def test_get(session, search_filter):
from dispatch.search_filter.service import get
t_search_filter = get(db_session=session, search_filter_id=search_filter.id)
assert t_search_filter.id == search_filter.id
def test_get_all(session, search_filters):
from dispatch.search_filter.service import get_all
t_search_filters = get_all(db_session=session).all()
assert len(t_search_filters) > 1
def test_create(session, project):
from dispatch.search_filter.service import create
from dispatch.search_filter.models import SearchFilterCreate
name = "name"
description = "description"
expression = [{}]
type = "type"
search_filter_in = SearchFilterCreate(
name=name,
description=description,
expression=expression,
type=type,
project=project,
)
search_filter = create(db_session=session, search_filter_in=search_filter_in)
assert search_filter
@pytest.mark.skip
def test_update(session, search_filter):
from dispatch.search_filter.service import update
from dispatch.search_filter.models import SearchFilterUpdate
name = "Updated name"
search_filter_in = SearchFilterUpdate(
name=name,
)
search_filter = update(
db_session=session,
search_filter=search_filter,
search_filter_in=search_filter_in,
)
assert search_filter.name == name
def test_delete(session, search_filter):
from dispatch.search_filter.service import delete, get
delete(db_session=session, search_filter_id=search_filter.id)
assert not get(db_session=session, search_filter_id=search_filter.id)
| true
| true
|
79077ab9eea1593a9c9e072b1c42a4cabb1739dc
| 6,323
|
py
|
Python
|
test/test_regex.py
|
clayne/asm2cfg
|
4d9089185ec8efb0bf82aa525762f5af84cc0c6d
|
[
"MIT"
] | null | null | null |
test/test_regex.py
|
clayne/asm2cfg
|
4d9089185ec8efb0bf82aa525762f5af84cc0c6d
|
[
"MIT"
] | null | null | null |
test/test_regex.py
|
clayne/asm2cfg
|
4d9089185ec8efb0bf82aa525762f5af84cc0c6d
|
[
"MIT"
] | null | null | null |
"""
Unittests of asm2cfg's regexes
"""
import unittest
from src.asm2cfg import asm2cfg
class FunctionHeaderTestCase(unittest.TestCase):
"""
Tests of function header regex
"""
def test_gdb_unstripped(self):
line = 'Dump of assembler code for function test_function:'
fmt, fun = asm2cfg.parse_function_header(line)
self.assertEqual(fmt, asm2cfg.InputFormat.GDB)
self.assertEqual(fun, 'test_function')
def test_gdb_stripped(self):
line = 'Dump of assembler code from 0x555555555faf to 0x555555557008:'
fmt, fun = asm2cfg.parse_function_header(line)
self.assertEqual(fmt, asm2cfg.InputFormat.GDB)
self.assertEqual(fun, '0x555555555faf-0x555555557008')
def test_objdump(self):
line = '000000000000100b <bar>:'
fmt, fun = asm2cfg.parse_function_header(line)
self.assertEqual(fmt, asm2cfg.InputFormat.OBJDUMP)
self.assertEqual(fun, 'bar')
class ParseAddressTestCase(unittest.TestCase):
"""
Tests of parse_address function
"""
def test_absolute(self):
line = '0x000055555557259c: XYZ'
address, rest = asm2cfg.parse_address(line)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x55555557259c)
self.assertIs(address.base, None)
self.assertIs(address.offset, None)
self.assertEqual(rest, ' XYZ')
def test_relative(self):
line = '0x000055555557259c <+11340>: XYZ'
address, rest = asm2cfg.parse_address(line)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x55555557259c)
self.assertIs(address.base, None)
self.assertEqual(address.offset, 11340)
self.assertEqual(rest, ' XYZ')
class ParseBodyTestCase(unittest.TestCase):
"""
Tests of asm2cfg.parse_body function
"""
def setUp(self):
self.target_info = asm2cfg.X86TargetInfo()
def test_gdb_stripped_known(self):
line = ' call 0x55555558add0 <_Z19exportDebugifyStats>'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'call 0x55555558add0')
self.assertEqual(opcode, 'call')
self.assertEqual(ops, ['0x55555558add0'])
self.assertEqual(rest, '<_Z19exportDebugifyStats>')
def test_gdb_stripped_pic(self):
line = ' call *0x26a16(%rip) # 0x5555555967a8'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'call *0x26a16(%rip)')
self.assertEqual(opcode, 'call')
self.assertEqual(ops, ['*0x26a16(%rip)'])
self.assertEqual(rest, '# 0x5555555967a8')
def test_gdb_plt(self):
line = ' callq 0x1020 <foo@plt>'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'callq 0x1020')
self.assertEqual(opcode, 'callq')
self.assertEqual(ops, ['0x1020'])
self.assertEqual(rest, '<foo@plt>')
def test_gdb_stripped_nonpic(self):
line = ' call 0x555555555542'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'call 0x555555555542')
self.assertEqual(opcode, 'call')
self.assertEqual(ops, ['0x555555555542'])
self.assertEqual(rest, '')
def test_gdb_indirect_call(self):
line = ' callq *(%rsi)'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'callq *(%rsi)')
self.assertEqual(opcode, 'callq')
self.assertEqual(ops, ['*(%rsi)'])
self.assertEqual(rest, '')
class ParseTargetTestCase(unittest.TestCase):
"""
Tests of parse_address function
"""
def test_with_offset(self):
line = '<_Z19exportDebugifyStats+123>'
address, rest = asm2cfg.parse_target(line)
self.assertIsNot(address, None)
self.assertIs(address.abs, None)
self.assertEqual(address.base, '_Z19exportDebugifyStats')
self.assertEqual(address.offset, 123)
self.assertEqual(rest, '')
def test_with_neg_offset(self):
line = '<_Z19exportDebugifyStats-123>'
address, rest = asm2cfg.parse_target(line)
self.assertIsNot(address, None)
self.assertIs(address.abs, None)
self.assertEqual(address.base, '_Z19exportDebugifyStats')
self.assertEqual(address.offset, -123)
self.assertEqual(rest, '')
def test_without_offset(self):
line = '<_Z19exportDebugifyStats>'
address, rest = asm2cfg.parse_target(line)
self.assertIsNot(address, None)
self.assertIs(address.abs, None)
self.assertEqual(address.base, '_Z19exportDebugifyStats')
self.assertEqual(address.offset, 0)
self.assertEqual(rest, '')
class ParseCommentTestCase(unittest.TestCase):
"""
Tests of parse_comment function
"""
def setUp(self):
self.target_info = asm2cfg.X86TargetInfo()
def test_absolute(self):
line = '# 0x5555555967a8'
address, rest = asm2cfg.parse_comment(line, self.target_info)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x5555555967a8)
self.assertIs(address.base, None)
self.assertIs(address.offset, None)
self.assertEqual(rest, '')
def test_symbolic(self):
line = '# 0x5555555967a8 <foo>'
address, rest = asm2cfg.parse_comment(line, self.target_info)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x5555555967a8)
self.assertEqual(address.base, 'foo')
self.assertIs(address.offset, 0)
self.assertEqual(rest, '')
def test_complete(self):
line = '# 3ff8 <foo+0x2ff8>'
address, rest = asm2cfg.parse_comment(line, self.target_info)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x3ff8) # FIXME: support hex offsets
self.assertEqual(address.base, 'foo')
self.assertEqual(address.offset, 0x2ff8)
self.assertEqual(rest, '')
| 32.425641
| 78
| 0.652697
|
import unittest
from src.asm2cfg import asm2cfg
class FunctionHeaderTestCase(unittest.TestCase):
def test_gdb_unstripped(self):
line = 'Dump of assembler code for function test_function:'
fmt, fun = asm2cfg.parse_function_header(line)
self.assertEqual(fmt, asm2cfg.InputFormat.GDB)
self.assertEqual(fun, 'test_function')
def test_gdb_stripped(self):
line = 'Dump of assembler code from 0x555555555faf to 0x555555557008:'
fmt, fun = asm2cfg.parse_function_header(line)
self.assertEqual(fmt, asm2cfg.InputFormat.GDB)
self.assertEqual(fun, '0x555555555faf-0x555555557008')
def test_objdump(self):
line = '000000000000100b <bar>:'
fmt, fun = asm2cfg.parse_function_header(line)
self.assertEqual(fmt, asm2cfg.InputFormat.OBJDUMP)
self.assertEqual(fun, 'bar')
class ParseAddressTestCase(unittest.TestCase):
def test_absolute(self):
line = '0x000055555557259c: XYZ'
address, rest = asm2cfg.parse_address(line)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x55555557259c)
self.assertIs(address.base, None)
self.assertIs(address.offset, None)
self.assertEqual(rest, ' XYZ')
def test_relative(self):
line = '0x000055555557259c <+11340>: XYZ'
address, rest = asm2cfg.parse_address(line)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x55555557259c)
self.assertIs(address.base, None)
self.assertEqual(address.offset, 11340)
self.assertEqual(rest, ' XYZ')
class ParseBodyTestCase(unittest.TestCase):
def setUp(self):
self.target_info = asm2cfg.X86TargetInfo()
def test_gdb_stripped_known(self):
line = ' call 0x55555558add0 <_Z19exportDebugifyStats>'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'call 0x55555558add0')
self.assertEqual(opcode, 'call')
self.assertEqual(ops, ['0x55555558add0'])
self.assertEqual(rest, '<_Z19exportDebugifyStats>')
def test_gdb_stripped_pic(self):
line = ' call *0x26a16(%rip) # 0x5555555967a8'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'call *0x26a16(%rip)')
self.assertEqual(opcode, 'call')
self.assertEqual(ops, ['*0x26a16(%rip)'])
self.assertEqual(rest, '# 0x5555555967a8')
def test_gdb_plt(self):
line = ' callq 0x1020 <foo@plt>'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'callq 0x1020')
self.assertEqual(opcode, 'callq')
self.assertEqual(ops, ['0x1020'])
self.assertEqual(rest, '<foo@plt>')
def test_gdb_stripped_nonpic(self):
line = ' call 0x555555555542'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'call 0x555555555542')
self.assertEqual(opcode, 'call')
self.assertEqual(ops, ['0x555555555542'])
self.assertEqual(rest, '')
def test_gdb_indirect_call(self):
line = ' callq *(%rsi)'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'callq *(%rsi)')
self.assertEqual(opcode, 'callq')
self.assertEqual(ops, ['*(%rsi)'])
self.assertEqual(rest, '')
class ParseTargetTestCase(unittest.TestCase):
def test_with_offset(self):
line = '<_Z19exportDebugifyStats+123>'
address, rest = asm2cfg.parse_target(line)
self.assertIsNot(address, None)
self.assertIs(address.abs, None)
self.assertEqual(address.base, '_Z19exportDebugifyStats')
self.assertEqual(address.offset, 123)
self.assertEqual(rest, '')
def test_with_neg_offset(self):
line = '<_Z19exportDebugifyStats-123>'
address, rest = asm2cfg.parse_target(line)
self.assertIsNot(address, None)
self.assertIs(address.abs, None)
self.assertEqual(address.base, '_Z19exportDebugifyStats')
self.assertEqual(address.offset, -123)
self.assertEqual(rest, '')
def test_without_offset(self):
line = '<_Z19exportDebugifyStats>'
address, rest = asm2cfg.parse_target(line)
self.assertIsNot(address, None)
self.assertIs(address.abs, None)
self.assertEqual(address.base, '_Z19exportDebugifyStats')
self.assertEqual(address.offset, 0)
self.assertEqual(rest, '')
class ParseCommentTestCase(unittest.TestCase):
def setUp(self):
self.target_info = asm2cfg.X86TargetInfo()
def test_absolute(self):
line = '# 0x5555555967a8'
address, rest = asm2cfg.parse_comment(line, self.target_info)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x5555555967a8)
self.assertIs(address.base, None)
self.assertIs(address.offset, None)
self.assertEqual(rest, '')
def test_symbolic(self):
line = '# 0x5555555967a8 <foo>'
address, rest = asm2cfg.parse_comment(line, self.target_info)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x5555555967a8)
self.assertEqual(address.base, 'foo')
self.assertIs(address.offset, 0)
self.assertEqual(rest, '')
def test_complete(self):
line = '# 3ff8 <foo+0x2ff8>'
address, rest = asm2cfg.parse_comment(line, self.target_info)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x3ff8)
self.assertEqual(address.base, 'foo')
self.assertEqual(address.offset, 0x2ff8)
self.assertEqual(rest, '')
| true
| true
|
79077b00bc0523eae654fd0035bda44d3a761054
| 402
|
py
|
Python
|
return_practice.py
|
Athenian-ComputerScience-Fall2020/functions-practice-21lsparks
|
dd772a336d18f2c7736a72080111271aed181d48
|
[
"Apache-2.0"
] | null | null | null |
return_practice.py
|
Athenian-ComputerScience-Fall2020/functions-practice-21lsparks
|
dd772a336d18f2c7736a72080111271aed181d48
|
[
"Apache-2.0"
] | 1
|
2020-09-29T03:31:49.000Z
|
2020-09-29T03:31:49.000Z
|
return_practice.py
|
Athenian-ComputerScience-Fall2020/functions-practice-21lsparks
|
dd772a336d18f2c7736a72080111271aed181d48
|
[
"Apache-2.0"
] | null | null | null |
# Add comments to explain what the output from this program will be and how you know.
def math1():
num1 = 50
num2 = 5
return num1 + num2
def math2():
num1 = 50
num2 = 5
return num1 - num2
def math3():
num1 = 50
num2 = 5
return num1 * num2
output_num = math2()
print(output_num)
'''
Add prediction(s) here:
# I think it will work because i am smart. I predict be 45
'''
| 15.461538
| 85
| 0.639303
|
def math1():
num1 = 50
num2 = 5
return num1 + num2
def math2():
num1 = 50
num2 = 5
return num1 - num2
def math3():
num1 = 50
num2 = 5
return num1 * num2
output_num = math2()
print(output_num)
| true
| true
|
79077b92015bbceb91ab6b6fbe7fc577c35eb1ed
| 5,848
|
py
|
Python
|
radiator_fritz_o365_sync/core.py
|
ykorzikowski/python-fritz-office-365-sync
|
2044aa3d6cbdf9ceddd82f96380336ad2addb6f4
|
[
"Apache-2.0"
] | 1
|
2019-06-10T18:16:15.000Z
|
2019-06-10T18:16:15.000Z
|
radiator_fritz_o365_sync/core.py
|
ykorzikowski/python-fritz-office-365-sync
|
2044aa3d6cbdf9ceddd82f96380336ad2addb6f4
|
[
"Apache-2.0"
] | null | null | null |
radiator_fritz_o365_sync/core.py
|
ykorzikowski/python-fritz-office-365-sync
|
2044aa3d6cbdf9ceddd82f96380336ad2addb6f4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from O365 import Account, Connection, FileSystemTokenBackend
from datetime import datetime as dt
from datetime import timedelta
from conf.conf import CONFIG as conf
from fritzhome import FritzBox
import logging
class Core:
@staticmethod
def get_credentials():
return conf['OFFICE_CLIENT_ID'], conf['OFFICE_CLIENT_SECRET']
@staticmethod
def get_account():
return Account(credentials=Core.get_credentials())
@staticmethod
def get_scopes():
return ['offline_access',
'https://graph.microsoft.com/Mail.ReadWrite',
'https://graph.microsoft.com/Mail.Send',
'https://graph.microsoft.com/Calendars.Read',
'https://graph.microsoft.com/Files.ReadWrite',
'https://graph.microsoft.com/User.Read']
@staticmethod
def get_con_obj():
credentials = (conf['OFFICE_CLIENT_ID'], conf['OFFICE_CLIENT_SECRET'])
scopes = Core.get_scopes()
return Connection(credentials, scopes=scopes, token_backend=FileSystemTokenBackend(token_filename='o365_token.txt'))
def run(self):
con = Core.get_con_obj()
if not con.token_backend.check_token():
logging.error("You have to generate your token file with python -m radiator_fritz_o365_sync.gen_token first!")
exit(1)
con.refresh_token()
heating = self.query_for_heating_periods()
# Cool down if no heating entries found in calendar
if len(heating) == 0:
logging.debug('No heating entry in calendar found. Cooling down all thermostats if they are heating. ')
self.cool_down_all()
# For each heating entry in calendar heat up
subjects = []
for heat in heating:
logging.info('Found entry "%s"', heat.subject)
self.heat_up(heat.subject)
subjects.append(heat.subject)
# Cool down thermostats if they are not heated
self.cool_down_unless(subjects)
# auto reset
if len(heating) == 0:
self.auto_reset()
# Every night refresh the token and cool down to reset manual changes on thermostats
if dt.now().time().strftime('%H:%M') == '00:00':
con.refresh_token()
"""
Gets all thermostats from fritzbox
"""
def get_thermostats(self):
if conf['FRITZ_TLS']:
fritzbox = FritzBox(conf['FRITZ_IP'], conf['FRITZ_USER'], conf['FRITZ_PW'], use_tls=conf['FRITZ_TLS'], tls_cert_path='conf/fritz.crt')
else:
fritzbox = FritzBox(conf['FRITZ_IP'], conf['FRITZ_USER'], conf['FRITZ_PW'])
fritzbox.login()
actors = fritzbox.get_actors()
thermostats = []
for actor in actors:
if actor.has_heating_controller:
thermostats.append(actor)
return thermostats
def thermostat_heatup(self, actor):
if actor.target_temperature == conf['HEATING_LOW_TEMP']:
logging.info('Heating up %s ...', actor.name)
actor.set_temperature(conf['HEATING_COMFORT_TEMP'])
"""
Sets the temperature of thermostats with matching subject or all thermostats to comfort temperature
"""
def heat_up(self, sub):
thermostats = self.get_thermostats()
for thermostat in thermostats:
if sub == conf['CALENDAR_HEAT_ALL_SUBJECT']:
self.thermostat_heatup(thermostat)
else:
if thermostat.name == sub:
self.thermostat_heatup(thermostat)
"""
Cool down every thermostat which is not in unless list
"""
def cool_down_unless(self, unless):
# return if wildcard is found in subjects
if conf['CALENDAR_HEAT_ALL_SUBJECT'] in unless:
return
thermostats = self.get_thermostats()
for thermostat in thermostats:
if thermostat.name not in unless:
self.cool_down(thermostat)
"""
Sets the temperature of all thermostats to LOW_TEMP if they are currently set to COMFORT_TEMP
"""
def cool_down_all(self):
thermostats = self.get_thermostats()
for thermostat in thermostats:
self.cool_down(thermostat)
"""
Sets the temperature of thermostat to low temp if it is on comfort temp
"""
def cool_down(self, thermostat):
if thermostat.target_temperature == conf['HEATING_COMFORT_TEMP']:
logging.info('Cooling down %s ...', thermostat.name)
thermostat.set_temperature(conf['HEATING_LOW_TEMP'])
"""
If the temperature has changed manually via app or on the thermostat itself,
this method resets the temperature to the HEATING_LOW_TEMP on a given time
"""
def auto_reset(self):
if conf['HEATING_AUTO_RESET']:
current_time = dt.now().time()
target_time = conf['HEATING_AUTO_RESET_TIME']
if current_time.strftime('%H:%M') == target_time:
logging.info('Resetting temperature on all thermostats now!')
thermostats = self.get_thermostats()
for thermostat in thermostats:
thermostat.set_temperature(conf['HEATING_LOW_TEMP'])
def query_for_heating_periods(self):
account = Core.get_account()
schedule = account.schedule()
calendar = schedule.get_calendar(calendar_name=conf['CALENDAR_NAME'])
if calendar is None:
logging.error("Calendar with name '%s' does not exist!", conf['CALENDAR_NAME'])
exit(1)
q = calendar.new_query('start').greater_equal(dt.now())
q.chain('and').on_attribute('end').less_equal(dt.now() + timedelta(minutes=5))
return list(calendar.get_events(query=q))
if __name__ == "__main__":
Core().run()
| 35.658537
| 146
| 0.632695
|
import os
from O365 import Account, Connection, FileSystemTokenBackend
from datetime import datetime as dt
from datetime import timedelta
from conf.conf import CONFIG as conf
from fritzhome import FritzBox
import logging
class Core:
@staticmethod
def get_credentials():
return conf['OFFICE_CLIENT_ID'], conf['OFFICE_CLIENT_SECRET']
@staticmethod
def get_account():
return Account(credentials=Core.get_credentials())
@staticmethod
def get_scopes():
return ['offline_access',
'https://graph.microsoft.com/Mail.ReadWrite',
'https://graph.microsoft.com/Mail.Send',
'https://graph.microsoft.com/Calendars.Read',
'https://graph.microsoft.com/Files.ReadWrite',
'https://graph.microsoft.com/User.Read']
@staticmethod
def get_con_obj():
credentials = (conf['OFFICE_CLIENT_ID'], conf['OFFICE_CLIENT_SECRET'])
scopes = Core.get_scopes()
return Connection(credentials, scopes=scopes, token_backend=FileSystemTokenBackend(token_filename='o365_token.txt'))
def run(self):
con = Core.get_con_obj()
if not con.token_backend.check_token():
logging.error("You have to generate your token file with python -m radiator_fritz_o365_sync.gen_token first!")
exit(1)
con.refresh_token()
heating = self.query_for_heating_periods()
if len(heating) == 0:
logging.debug('No heating entry in calendar found. Cooling down all thermostats if they are heating. ')
self.cool_down_all()
subjects = []
for heat in heating:
logging.info('Found entry "%s"', heat.subject)
self.heat_up(heat.subject)
subjects.append(heat.subject)
self.cool_down_unless(subjects)
if len(heating) == 0:
self.auto_reset()
if dt.now().time().strftime('%H:%M') == '00:00':
con.refresh_token()
def get_thermostats(self):
if conf['FRITZ_TLS']:
fritzbox = FritzBox(conf['FRITZ_IP'], conf['FRITZ_USER'], conf['FRITZ_PW'], use_tls=conf['FRITZ_TLS'], tls_cert_path='conf/fritz.crt')
else:
fritzbox = FritzBox(conf['FRITZ_IP'], conf['FRITZ_USER'], conf['FRITZ_PW'])
fritzbox.login()
actors = fritzbox.get_actors()
thermostats = []
for actor in actors:
if actor.has_heating_controller:
thermostats.append(actor)
return thermostats
def thermostat_heatup(self, actor):
if actor.target_temperature == conf['HEATING_LOW_TEMP']:
logging.info('Heating up %s ...', actor.name)
actor.set_temperature(conf['HEATING_COMFORT_TEMP'])
def heat_up(self, sub):
thermostats = self.get_thermostats()
for thermostat in thermostats:
if sub == conf['CALENDAR_HEAT_ALL_SUBJECT']:
self.thermostat_heatup(thermostat)
else:
if thermostat.name == sub:
self.thermostat_heatup(thermostat)
def cool_down_unless(self, unless):
if conf['CALENDAR_HEAT_ALL_SUBJECT'] in unless:
return
thermostats = self.get_thermostats()
for thermostat in thermostats:
if thermostat.name not in unless:
self.cool_down(thermostat)
def cool_down_all(self):
thermostats = self.get_thermostats()
for thermostat in thermostats:
self.cool_down(thermostat)
def cool_down(self, thermostat):
if thermostat.target_temperature == conf['HEATING_COMFORT_TEMP']:
logging.info('Cooling down %s ...', thermostat.name)
thermostat.set_temperature(conf['HEATING_LOW_TEMP'])
def auto_reset(self):
if conf['HEATING_AUTO_RESET']:
current_time = dt.now().time()
target_time = conf['HEATING_AUTO_RESET_TIME']
if current_time.strftime('%H:%M') == target_time:
logging.info('Resetting temperature on all thermostats now!')
thermostats = self.get_thermostats()
for thermostat in thermostats:
thermostat.set_temperature(conf['HEATING_LOW_TEMP'])
def query_for_heating_periods(self):
account = Core.get_account()
schedule = account.schedule()
calendar = schedule.get_calendar(calendar_name=conf['CALENDAR_NAME'])
if calendar is None:
logging.error("Calendar with name '%s' does not exist!", conf['CALENDAR_NAME'])
exit(1)
q = calendar.new_query('start').greater_equal(dt.now())
q.chain('and').on_attribute('end').less_equal(dt.now() + timedelta(minutes=5))
return list(calendar.get_events(query=q))
if __name__ == "__main__":
Core().run()
| true
| true
|
79077b96ce06cb88bebec2f7bc5e8bffdea9380b
| 2,581
|
py
|
Python
|
twitoff/app.py
|
kvinne-anc/TwittOff
|
f734323edc9f271f81c217f2cf6e9afdccf964dc
|
[
"MIT"
] | null | null | null |
twitoff/app.py
|
kvinne-anc/TwittOff
|
f734323edc9f271f81c217f2cf6e9afdccf964dc
|
[
"MIT"
] | null | null | null |
twitoff/app.py
|
kvinne-anc/TwittOff
|
f734323edc9f271f81c217f2cf6e9afdccf964dc
|
[
"MIT"
] | null | null | null |
"""Main app/routing file for TwitOff"""
from os import getenv
from flask import Flask, render_template, request
from twitoff.twitter import add_or_update_user
from twitoff.models import DB, User, MIGRATE
from twitoff.predict import predict_user
def create_app():
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = getenv("DATABASE_URL")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
DB.init_app(app)
MIGRATE.init_app(app, DB)
# TODO - make rest of application
@app.route('/')
def root():
# SQL equivalent = "SELECT * FROM user;"
return render_template('base.html', title="Home", users=User.query.all())
@app.route("/compare", methods=["POST"])
def compare():
user0, user1 = sorted(
[request.values["user1"], request.values["user2"]])
# conditinoal that prevents same user comparison
if user0 == user1:
message = "Cannot compare users to themselves!"
else:
hypo_tweet_text = request.values["tweet_text"]
# prediction return zero or one depending upon user
prediction = predict_user(user0, user1, hypo_tweet_text)
message = "'{}' is more likely to be said by {} than {}".format(
hypo_tweet_text, user1 if prediction else user0,
user0 if prediction else user1
)
# returns rendered template with dynamic message
return render_template('prediction.html', title="Prediction:", message=message)
@app.route("/user", methods=["POST"])
@app.route("/user/<name>", methods=["GET"])
def user(name=None, message=""):
name = name or request.values["user_name"]
try:
if request.method == "POST":
add_or_update_user(name)
message = "User {} sucessfully added!".format(name)
tweets = User.query.filter(User.name == name).one().tweets
except Exception as e:
message = "Error handling {}: {}".format(name, e)
tweets = []
return render_template("user.html", title=name, tweets=tweets, message=message)
@app.route("/update")
def update():
users = User.query.all()
for user in users:
add_or_update_user(user.name)
return render_template("base.html", title="Database has been updated!", users=User.query.all())
@app.route("/reset")
def reset():
DB.drop_all()
DB.create_all()
return render_template("base.html", title="Reset Database")
return app
| 33.519481
| 103
| 0.618752
|
from os import getenv
from flask import Flask, render_template, request
from twitoff.twitter import add_or_update_user
from twitoff.models import DB, User, MIGRATE
from twitoff.predict import predict_user
def create_app():
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = getenv("DATABASE_URL")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
DB.init_app(app)
MIGRATE.init_app(app, DB)
@app.route('/')
def root():
return render_template('base.html', title="Home", users=User.query.all())
@app.route("/compare", methods=["POST"])
def compare():
user0, user1 = sorted(
[request.values["user1"], request.values["user2"]])
if user0 == user1:
message = "Cannot compare users to themselves!"
else:
hypo_tweet_text = request.values["tweet_text"]
prediction = predict_user(user0, user1, hypo_tweet_text)
message = "'{}' is more likely to be said by {} than {}".format(
hypo_tweet_text, user1 if prediction else user0,
user0 if prediction else user1
)
return render_template('prediction.html', title="Prediction:", message=message)
@app.route("/user", methods=["POST"])
@app.route("/user/<name>", methods=["GET"])
def user(name=None, message=""):
name = name or request.values["user_name"]
try:
if request.method == "POST":
add_or_update_user(name)
message = "User {} sucessfully added!".format(name)
tweets = User.query.filter(User.name == name).one().tweets
except Exception as e:
message = "Error handling {}: {}".format(name, e)
tweets = []
return render_template("user.html", title=name, tweets=tweets, message=message)
@app.route("/update")
def update():
users = User.query.all()
for user in users:
add_or_update_user(user.name)
return render_template("base.html", title="Database has been updated!", users=User.query.all())
@app.route("/reset")
def reset():
DB.drop_all()
DB.create_all()
return render_template("base.html", title="Reset Database")
return app
| true
| true
|
79077beb055c458d395199ff41e16e906d53d08c
| 1,272
|
py
|
Python
|
hdfshell/cluster.py
|
alingse/hdfshell
|
3da0ff9fd2204fd957f011fe6fd3e21687004c7c
|
[
"Apache-2.0"
] | null | null | null |
hdfshell/cluster.py
|
alingse/hdfshell
|
3da0ff9fd2204fd957f011fe6fd3e21687004c7c
|
[
"Apache-2.0"
] | null | null | null |
hdfshell/cluster.py
|
alingse/hdfshell
|
3da0ff9fd2204fd957f011fe6fd3e21687004c7c
|
[
"Apache-2.0"
] | null | null | null |
#coding=utf-8
#author@alingse
#2016.06.21
hdfs_schema = 'hdfs://'
file_schema = 'file://'
class hdfsCluster(object):
""" 一个hdfs 资源 hdfs uri,path,账户密码认证
"""
def __init__(self,host,port=9000,schema=hdfs_schema):
""" 目前只需要host和port """
self.host = host
self.port = port
self.schema = schema
self._path = '/'
self._status = None
@property
def status(self):
return self._status
@status.setter
def status(self,value):
if value in [None,True,False]:
self._status = value
@property
def path(self):
return self._path
@path.setter
def path(self,value):
if value.startswith('/') and value.endswith('/'):
self._path = value
self._status = None
@property
def uri_head(self):
""" 返回 uri 的 head"""
head = self.schema + '{}:{}'.format(self.host,self.port)
return head
@property
def uri(self):
""" 返回当前路径"""
_uri = self.schema + '{}:{}{}'.format(self.host,self.port,self._path)
return _uri
if __name__ == '__main__':
hdfs = hdfsCluster('localhost','9000')
hdfs.path = '/hive/'
print(hdfs.uri)
print(hdfs.uri_head)
| 20.852459
| 77
| 0.552673
|
hdfs_schema = 'hdfs://'
file_schema = 'file://'
class hdfsCluster(object):
def __init__(self,host,port=9000,schema=hdfs_schema):
self.host = host
self.port = port
self.schema = schema
self._path = '/'
self._status = None
@property
def status(self):
return self._status
@status.setter
def status(self,value):
if value in [None,True,False]:
self._status = value
@property
def path(self):
return self._path
@path.setter
def path(self,value):
if value.startswith('/') and value.endswith('/'):
self._path = value
self._status = None
@property
def uri_head(self):
head = self.schema + '{}:{}'.format(self.host,self.port)
return head
@property
def uri(self):
_uri = self.schema + '{}:{}{}'.format(self.host,self.port,self._path)
return _uri
if __name__ == '__main__':
hdfs = hdfsCluster('localhost','9000')
hdfs.path = '/hive/'
print(hdfs.uri)
print(hdfs.uri_head)
| true
| true
|
79077c49d4fd877106e3bb12a28b786be0f3b587
| 6,831
|
py
|
Python
|
dqn/exercise/dqn_agent.py
|
0xtristan/deep-reinforcement-learning
|
fb943ddb2796d85cc876ea076a26d850b7b87919
|
[
"MIT"
] | 1
|
2019-08-10T04:01:22.000Z
|
2019-08-10T04:01:22.000Z
|
dqn/exercise/dqn_agent.py
|
tfrizza/deep-reinforcement-learning
|
fb943ddb2796d85cc876ea076a26d850b7b87919
|
[
"MIT"
] | null | null | null |
dqn/exercise/dqn_agent.py
|
tfrizza/deep-reinforcement-learning
|
fb943ddb2796d85cc876ea076a26d850b7b87919
|
[
"MIT"
] | 1
|
2021-11-14T17:29:39.000Z
|
2021-11-14T17:29:39.000Z
|
import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
# from_numpy creates tensor without copying numpy array data
# float == to(float), to() can be used for dtype and device conversions
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
# eval mode as opposed to training (ignores dropout, batchnorm)
self.qnetwork_local.eval()
with torch.no_grad():
# call the nn.Module rather than explicitly using nn.Module.forward()
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
## TODO: compute and minimize the loss
"*** YOUR CODE HERE ***"
# Max q value over all next actions given their next states (this is for a whole batch)
# i.e. max_a(Q(s_{j+1}, a, w-)) from the one step look ahead
Q_targets_next = self.qnetwork_local(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + gamma * Q_targets_next * (1 - dones) # set y_i = r if done
# Get expected Q values from local model - used in gradient update as diff from target
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute Loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimise loss by backprop
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
| 41.652439
| 127
| 0.624799
|
import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5)
BATCH_SIZE = 64
GAMMA = 0.99
TAU = 1e-3
LR = 5e-4
UPDATE_EVERY = 4
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
def __init__(self, state_size, action_size, seed):
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
self.memory.add(state, action, reward, next_state, done)
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
states, actions, rewards, next_states, dones = experiences
next = self.qnetwork_local(next_states).detach().max(1)[0].unsqueeze(1)
Q_targets = rewards + gamma * Q_targets_next * (1 - dones)
Q_expected = self.qnetwork_local(states).gather(1, actions)
loss = F.mse_loss(Q_expected, Q_targets)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
def __init__(self, action_size, buffer_size, batch_size, seed):
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
return len(self.memory)
| true
| true
|
79077d4b3595c987921374a8110da69527eb0df1
| 1,080
|
py
|
Python
|
palindrome_check.py
|
igelfiend/Python.Structures.Deque
|
4d296615ab1a4c5d7fd4af03164228cb877a0d00
|
[
"MIT"
] | null | null | null |
palindrome_check.py
|
igelfiend/Python.Structures.Deque
|
4d296615ab1a4c5d7fd4af03164228cb877a0d00
|
[
"MIT"
] | null | null | null |
palindrome_check.py
|
igelfiend/Python.Structures.Deque
|
4d296615ab1a4c5d7fd4af03164228cb877a0d00
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf8
from copy import deepcopy
class Deque:
def __init__(self):
self.data = []
def addFront(self, item):
self.data.insert(0, item)
def addTail(self, item):
self.data.append(item)
def removeFront(self):
if self.size() == 0:
return None
else:
value = deepcopy(self.data[0])
del self.data[0]
return value
def removeTail(self):
if self.size() == 0:
return None
else:
value = deepcopy(self.data[-1])
del self.data[-1]
return value
def size(self):
return len(self.data)
def check_palindrome(check_value):
deque = Deque()
# Reading data into deque
for c in check_value:
deque.addTail(c)
# Comparing each symbol on both sides, if not equal - not palindrome
while deque.size() > 1:
if deque.removeTail() != deque.removeFront():
return False
# If all check was succeeded, string is a palindrome
return True
| 21.6
| 72
| 0.564815
|
from copy import deepcopy
class Deque:
def __init__(self):
self.data = []
def addFront(self, item):
self.data.insert(0, item)
def addTail(self, item):
self.data.append(item)
def removeFront(self):
if self.size() == 0:
return None
else:
value = deepcopy(self.data[0])
del self.data[0]
return value
def removeTail(self):
if self.size() == 0:
return None
else:
value = deepcopy(self.data[-1])
del self.data[-1]
return value
def size(self):
return len(self.data)
def check_palindrome(check_value):
deque = Deque()
for c in check_value:
deque.addTail(c)
while deque.size() > 1:
if deque.removeTail() != deque.removeFront():
return False
return True
| true
| true
|
79077e267dd746d28e22773af26b462a9124a50a
| 1,459
|
py
|
Python
|
docs/tokenizer.py
|
concreted/prefect
|
dd732f5990ee2b0f3d816adb285168fd63b239e4
|
[
"Apache-2.0"
] | 8,633
|
2019-03-23T17:51:03.000Z
|
2022-03-31T22:17:42.000Z
|
docs/tokenizer.py
|
concreted/prefect
|
dd732f5990ee2b0f3d816adb285168fd63b239e4
|
[
"Apache-2.0"
] | 3,903
|
2019-03-23T19:11:21.000Z
|
2022-03-31T23:21:23.000Z
|
docs/tokenizer.py
|
ngriffiths13/prefect
|
7f5613abcb182494b7dc12159277c3bc5f3c9898
|
[
"Apache-2.0"
] | 937
|
2019-03-23T18:49:44.000Z
|
2022-03-31T21:45:13.000Z
|
from pygments.lexers import Python3Lexer
from pygments.token import Comment, Keyword, Name, Number, Operator, Punctuation, String
def is_comment(token):
return token in Comment
def is_decorator(token):
return token in Name.Decorator
def is_function(token):
return token in Name.Function
def is_builtin(token):
return token in Name.Builtin
def is_classname(token):
return token in Name.Class
def is_keyword(token):
return token in Keyword
def is_number(token):
return token in Number
def is_operator(token):
return token in Operator
def is_punctuation(token):
return token in Punctuation
def is_string(token):
return token in String
tokenizer_map = {
"keyword": is_keyword,
"builtin": is_builtin,
"class-name": is_classname,
"punctuation": is_punctuation,
"decorator": is_decorator,
"function": is_function,
"operator": is_operator,
"comment": is_comment,
"string": is_string,
"number": is_number,
}
def format_code(code):
pp = Python3Lexer()
tokens = pp.get_tokens(code)
formatted = ""
for token, string in tokens:
updated = False
for span_class, checker in tokenizer_map.items():
if checker(token):
formatted += f'<span class="token {span_class}">{string}</span>'
updated = True
break
if not updated:
formatted += string
return formatted
| 19.986301
| 88
| 0.663468
|
from pygments.lexers import Python3Lexer
from pygments.token import Comment, Keyword, Name, Number, Operator, Punctuation, String
def is_comment(token):
return token in Comment
def is_decorator(token):
return token in Name.Decorator
def is_function(token):
return token in Name.Function
def is_builtin(token):
return token in Name.Builtin
def is_classname(token):
return token in Name.Class
def is_keyword(token):
return token in Keyword
def is_number(token):
return token in Number
def is_operator(token):
return token in Operator
def is_punctuation(token):
return token in Punctuation
def is_string(token):
return token in String
tokenizer_map = {
"keyword": is_keyword,
"builtin": is_builtin,
"class-name": is_classname,
"punctuation": is_punctuation,
"decorator": is_decorator,
"function": is_function,
"operator": is_operator,
"comment": is_comment,
"string": is_string,
"number": is_number,
}
def format_code(code):
pp = Python3Lexer()
tokens = pp.get_tokens(code)
formatted = ""
for token, string in tokens:
updated = False
for span_class, checker in tokenizer_map.items():
if checker(token):
formatted += f'<span class="token {span_class}">{string}</span>'
updated = True
break
if not updated:
formatted += string
return formatted
| true
| true
|
79077e9bac0a3bae6b1a07981b053ed053545a65
| 7,146
|
py
|
Python
|
test/test_spatial_interpolation.py
|
rgaensler/gcode
|
c6a6b617a04490dedefb2bae7b596a2e12ab4ab1
|
[
"MIT"
] | null | null | null |
test/test_spatial_interpolation.py
|
rgaensler/gcode
|
c6a6b617a04490dedefb2bae7b596a2e12ab4ab1
|
[
"MIT"
] | 314
|
2020-02-26T12:37:17.000Z
|
2021-08-02T00:32:32.000Z
|
test/test_spatial_interpolation.py
|
rgaensler/gcode
|
c6a6b617a04490dedefb2bae7b596a2e12ab4ab1
|
[
"MIT"
] | 2
|
2020-11-12T16:07:48.000Z
|
2020-11-16T09:14:48.000Z
|
from math import pi, sqrt
from typing import List
import numpy as np
import pytest
from src.kinematics.forward_kinematics import get_tform
from src.prechecks.spatial_interpolation import linear_interpolation, circular_interpolation
@pytest.mark.parametrize("start,end,ds,expected_points",
[
(
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [300, 0, 0]],
50,
7
),
(
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [50, 0, 0]],
50,
2
)
]
)
def test_linear_interpolation(start, end, ds, expected_points):
# Create the start and end point matrices
start = get_tform(*start)
end = get_tform(*end)
# Calculate the interpolated tforms
interpolated_tforms = list(linear_interpolation(start, end, ds=ds))
helper_spatial_interpolation_test(interpolated_tforms, start, end, expected_points)
# Check that the points are equidistant
if expected_points > 2:
for i in range(expected_points - 1):
ds_actual = np.linalg.norm(interpolated_tforms[i + 1][0:3, 3] - interpolated_tforms[i][0:3, 3])
assert pytest.approx(ds, rel=0.1) == ds_actual
@pytest.mark.parametrize("start,end,nvec,cw,ds,expected_points",
[
# XY plane half circle (start, intermediate, end)
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]],
[0, 0, 1],
True,
pi / 2,
3
),
# XY plane half circle (start, end)
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]],
[0, 0, 1],
True,
pi,
2
),
# XY plane half circle (start, end) rounded
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]],
[0, 0, 1],
True,
pi / 2 * 1.1,
2
),
# XY plane half circle (start, end) rounded
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]],
[0, 0, 1],
False,
pi / 5,
6
),
# XY plane 3/4 circle, five points
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, -1, 0]],
[0, 0, 1],
True,
6 / 16 * pi,
5
),
# XY plane full circle, five points
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[0, 0, 1],
False,
2 / 3 * pi,
4
),
# YZ plane 3/4 circle, five points
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, -1, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, -1]],
[1, 0, 0],
True,
6 / 16 * pi,
5
),
# XY plane half circle (start, end) rounded
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, -0.5 * sqrt(2), 0.5 * sqrt(2)]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0.5 * sqrt(2), -0.5 * sqrt(2)]],
[0, 1, 1],
False,
pi / 5,
6
)
]
)
def test_circular_interpolation(start, end, nvec, cw, ds, expected_points):
# Create the start and end point matrices
start = get_tform(*start)
end = get_tform(*end)
# Calculate the interpolated tforms
interpolated_tforms = list(circular_interpolation(start, end, [0, 0, 0], nvec, cw, ds=ds))
print(interpolated_tforms)
helper_spatial_interpolation_test(interpolated_tforms, start, end, expected_points)
# Check that the points all have distance of the radius to the center point
r = np.linalg.norm(start[0:3, 3])
for tform in interpolated_tforms:
assert pytest.approx(r, rel=0.01) == np.linalg.norm(tform[0:3, 3])
# Check that the points are equidistant
if expected_points > 3:
ds_straight_line_ref = np.linalg.norm(interpolated_tforms[1][0:3, 3] - interpolated_tforms[0][0:3, 3])
for i in range(1, expected_points - 1):
ds_actual = np.linalg.norm(interpolated_tforms[i + 1][0:3, 3] - interpolated_tforms[i][0:3, 3])
assert pytest.approx(ds_straight_line_ref, rel=0.1) == ds_actual
def helper_spatial_interpolation_test(interpolated_tforms: List[np.ndarray], start, end, expected_points):
# Test that the number of interpolated points is correct
assert len(interpolated_tforms) == expected_points
# Test that the start and end points are included
np.testing.assert_allclose(interpolated_tforms[0], start)
np.testing.assert_allclose(interpolated_tforms[-1], end)
| 47.64
| 110
| 0.34047
|
from math import pi, sqrt
from typing import List
import numpy as np
import pytest
from src.kinematics.forward_kinematics import get_tform
from src.prechecks.spatial_interpolation import linear_interpolation, circular_interpolation
@pytest.mark.parametrize("start,end,ds,expected_points",
[
(
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [300, 0, 0]],
50,
7
),
(
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [50, 0, 0]],
50,
2
)
]
)
def test_linear_interpolation(start, end, ds, expected_points):
start = get_tform(*start)
end = get_tform(*end)
interpolated_tforms = list(linear_interpolation(start, end, ds=ds))
helper_spatial_interpolation_test(interpolated_tforms, start, end, expected_points)
if expected_points > 2:
for i in range(expected_points - 1):
ds_actual = np.linalg.norm(interpolated_tforms[i + 1][0:3, 3] - interpolated_tforms[i][0:3, 3])
assert pytest.approx(ds, rel=0.1) == ds_actual
@pytest.mark.parametrize("start,end,nvec,cw,ds,expected_points",
[
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]],
[0, 0, 1],
True,
pi / 2,
3
),
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]],
[0, 0, 1],
True,
pi,
2
),
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]],
[0, 0, 1],
True,
pi / 2 * 1.1,
2
),
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]],
[0, 0, 1],
False,
pi / 5,
6
),
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, -1, 0]],
[0, 0, 1],
True,
6 / 16 * pi,
5
),
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]],
[0, 0, 1],
False,
2 / 3 * pi,
4
),
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, -1, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, -1]],
[1, 0, 0],
True,
6 / 16 * pi,
5
),
(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, -0.5 * sqrt(2), 0.5 * sqrt(2)]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0.5 * sqrt(2), -0.5 * sqrt(2)]],
[0, 1, 1],
False,
pi / 5,
6
)
]
)
def test_circular_interpolation(start, end, nvec, cw, ds, expected_points):
start = get_tform(*start)
end = get_tform(*end)
interpolated_tforms = list(circular_interpolation(start, end, [0, 0, 0], nvec, cw, ds=ds))
print(interpolated_tforms)
helper_spatial_interpolation_test(interpolated_tforms, start, end, expected_points)
r = np.linalg.norm(start[0:3, 3])
for tform in interpolated_tforms:
assert pytest.approx(r, rel=0.01) == np.linalg.norm(tform[0:3, 3])
if expected_points > 3:
ds_straight_line_ref = np.linalg.norm(interpolated_tforms[1][0:3, 3] - interpolated_tforms[0][0:3, 3])
for i in range(1, expected_points - 1):
ds_actual = np.linalg.norm(interpolated_tforms[i + 1][0:3, 3] - interpolated_tforms[i][0:3, 3])
assert pytest.approx(ds_straight_line_ref, rel=0.1) == ds_actual
def helper_spatial_interpolation_test(interpolated_tforms: List[np.ndarray], start, end, expected_points):
assert len(interpolated_tforms) == expected_points
np.testing.assert_allclose(interpolated_tforms[0], start)
np.testing.assert_allclose(interpolated_tforms[-1], end)
| true
| true
|
79077f21d6d7384dc45eda0bee1e5a779573fae2
| 6,865
|
py
|
Python
|
neuroswarms/matrix.py
|
jdmonaco/neuroswarms
|
a2bfaa4e9b84baecdb41e01a32a028665e8886d7
|
[
"MIT"
] | 1
|
2020-11-19T11:37:26.000Z
|
2020-11-19T11:37:26.000Z
|
neuroswarms/matrix.py
|
jdmonaco/neuroswarms
|
a2bfaa4e9b84baecdb41e01a32a028665e8886d7
|
[
"MIT"
] | null | null | null |
neuroswarms/matrix.py
|
jdmonaco/neuroswarms
|
a2bfaa4e9b84baecdb41e01a32a028665e8886d7
|
[
"MIT"
] | 1
|
2020-11-19T11:38:15.000Z
|
2020-11-19T11:38:15.000Z
|
"""
Matrix operations for neuroswarms models.
Author: Joseph Monaco (jmonaco@jhu.edu)
Affiliation: Johns Hopkins University
Created: 2019-05-12
Updated: 2020-11-16
Related paper:
Monaco, J.D., Hwang, G.M., Schultz, K.M. et al. Cognitive swarming in complex
environments with attractor dynamics and oscillatory computing. Biol Cybern
114, 269–284 (2020). https://doi.org/10.1007/s00422-020-00823-z
This software is provided AS IS under the terms of the Open Source MIT License.
See http://www.opensource.org/licenses/mit-license.php
"""
__all__ = ('tile_index', 'pairwise_tile_index', 'pairwise_distances',
'distances', 'pairwise_phasediffs', 'pairwise_unit_diffs',
'somatic_motion_update', 'reward_motion_update')
from numpy import (empty, zeros, newaxis as AX, swapaxes, hypot, sin, inf,
broadcast_arrays, broadcast_to)
from .utils.types import *
DEBUGGING = False
def _check_ndim(Mstr, M, ndim):
assert M.ndim == ndim, f'{Mstr}.ndim != {ndim}'
def _check_shape(Mstr, M, shape, axis=None):
if axis is None:
assert M.shape == shape, f'{Mstr}.shape != {shape}'
else:
assert M.shape[axis] == shape, f'{Mstr}.shape[{axis}] != {shape}'
def tile_index(A, B):
"""
Entrywise comparison index of tile index (column) vectors.
"""
AA, BB = broadcast_arrays(A, B)
if DEBUGGING:
shape = (max(A.shape[0], B.shape[0]), 1)
_check_shape('AA', AA, shape)
_check_shape('BB', BB, shape)
return (AA, BB)
def pairwise_tile_index(A, B):
"""
Pairwise comparison index of tile index (column) vectors.
"""
AA, BB = broadcast_arrays(A, B.T)
if DEBUGGING:
shape = (len(A), len(B))
_check_shape('AA', AA, shape)
_check_shape('BB', BB, shape)
return (AA, BB)
def pairwise_phasediffs(A, B):
"""
Compute synchronizing phase differences between phase pairs.
"""
N_A = len(A)
N_B = len(B)
DD_shape = (N_A, N_B)
if DEBUGGING:
_check_ndim('A', A, 2)
_check_ndim('B', B, 2)
_check_shape('A', A, 1, axis=1)
_check_shape('B', B, 1, axis=1)
return B.T - A
def distances(A, B):
"""
Compute distances between points in entrywise order.
"""
AA, BB = broadcast_arrays(A, B)
shape = AA.shape
if DEBUGGING:
_check_ndim('AA', AA, 2)
_check_ndim('BB', BB, 2)
_check_shape('AA', AA, 2, axis=1)
_check_shape('BB', BB, 2, axis=1)
return hypot(AA[:,0] - BB[:,0], AA[:,1] - BB[:,1])[:,AX]
def pairwise_unit_diffs(A, B):
"""
Compute attracting unit-vector differences between pairs of points.
"""
DD = pairwise_position_deltas(A, B)
D_norm = hypot(DD[...,0], DD[...,1])
nz = D_norm.nonzero()
DD[nz] /= D_norm[nz][...,AX]
return DD
def pairwise_distances(A, B):
"""
Compute distances between pairs of points.
"""
DD = pairwise_position_deltas(A, B)
return hypot(DD[...,0], DD[...,1])
def pairwise_position_deltas(A, B):
"""
Compute attracting component deltas between pairs of points.
"""
N_A = len(A)
N_B = len(B)
if DEBUGGING:
_check_ndim('A', A, 2)
_check_ndim('B', B, 2)
_check_shape('A', A, 2, axis=1)
_check_shape('B', B, 2, axis=1)
# Broadcast the first position matrix
AA = empty((N_A,N_B,2), DISTANCE_DTYPE)
AA[:] = A[:,AX,:]
return B[AX,...] - AA
def somatic_motion_update(D_up, D_cur, X, V):
"""
Compute updated positions by averaging pairwise difference vectors for
mutually visible pairs with equal bidirectional adjustments within each
pair. The updated distance matrix does not need to be symmetric; it
represents 'desired' updates based on recurrent learning.
:D_up: R(N,N)-matrix of updated distances
:D_cur: R(N,N)-matrix of current distances
:X: R(N,2)-matrix of current positions
:V: {0,1}(N,2)-matrix of current agent visibility
:returns: R(N,2)-matrix of updated positions
"""
N = len(X)
D_shape = (N, N)
if DEBUGGING:
_check_ndim('X', X, 2)
_check_shape('X', X, 2, axis=1)
_check_shape('D_up', D_up, D_shape)
_check_shape('D_cur', D_cur, D_shape)
_check_shape('V', V, D_shape)
# Broadcast field position matrix and its transpose
XX = empty((N,N,2))
XX[:] = X[:,AX,:]
XT = swapaxes(XX, 0, 1)
# Find visible & valid values (i.e., corresponding to non-zero weights)
#
# NOTE: The normalizing factor is divided by 2 because the somatic update
# represents one half of the change in distance between a pair of units.
D_inf = D_up == inf
norm = V * ~D_inf
N = norm.sum(axis=1)
valid = N.nonzero()[0]
norm[valid] /= 2*N[valid,AX]
# Zero out the inf elements of the updated distance matrix and corresponding
# elements in the current distance matrix
D_up[D_inf] = D_cur[D_inf] = 0.0
# Construct the agent-agent avoidant unit vectors
DX = XX - XT
DX_norm = hypot(DX[...,0], DX[...,1])
valid = DX_norm.nonzero()
DX[valid] /= DX_norm[valid][:,AX]
return (norm[...,AX]*(D_up - D_cur)[...,AX]*DX).sum(axis=1)
def reward_motion_update(D_up, D_cur, X, R, V):
"""
Compute updated positions by averaging reward-based unit vectors for
adjustments of the point only. The updated distance matrix represents
'desired' updates based on reward learning.
:D_up: R(N,N_R)-matrix of updated distances between points and rewards
:D_cur: R(N,N_R)-matrix of current distances between points and rewards
:X: R(N,2)-matrix of current point positions
:R: R(N_R,2)-matrix of current reward positions
:V: {0,1}(N_R,2)-matrix of current agent-reward visibility
:returns: R(N,2)-matrix of updated positions
"""
N = len(X)
N_R = len(R)
D_shape = (N, N_R)
if DEBUGGING:
_check_ndim('X', X, 2)
_check_ndim('R', R, 2)
_check_shape('X', X, 2, axis=1)
_check_shape('R', R, 2, axis=1)
_check_shape('D_up', D_up, D_shape)
_check_shape('D_cur', D_cur, D_shape)
_check_shape('V', V, D_shape)
# Broadcast field position matrix
XX = empty((N,N_R,2))
XX[:] = X[:,AX,:]
# Find valid values (i.e., corresponding to non-zero weights)
D_inf = D_up == inf
norm = V * ~D_inf
N = norm.sum(axis=1)
valid = N.nonzero()[0]
norm[valid] /= N[valid,AX]
# Zero out the inf elements of the updated distance matrix and corresponding
# elements in the current distance matrix
D_up[D_inf] = D_cur[D_inf] = 0.0
# Construct the agent-reward avoidant unit vectors
DR = XX - R[AX]
DR_norm = hypot(DR[...,0], DR[...,1])
valid = DR_norm.nonzero()
DR[valid] /= DR_norm[valid][:,AX]
return (norm[...,AX]*(D_up - D_cur)[...,AX]*DR).sum(axis=1)
| 30.376106
| 81
| 0.621267
|
__all__ = ('tile_index', 'pairwise_tile_index', 'pairwise_distances',
'distances', 'pairwise_phasediffs', 'pairwise_unit_diffs',
'somatic_motion_update', 'reward_motion_update')
from numpy import (empty, zeros, newaxis as AX, swapaxes, hypot, sin, inf,
broadcast_arrays, broadcast_to)
from .utils.types import *
DEBUGGING = False
def _check_ndim(Mstr, M, ndim):
assert M.ndim == ndim, f'{Mstr}.ndim != {ndim}'
def _check_shape(Mstr, M, shape, axis=None):
if axis is None:
assert M.shape == shape, f'{Mstr}.shape != {shape}'
else:
assert M.shape[axis] == shape, f'{Mstr}.shape[{axis}] != {shape}'
def tile_index(A, B):
AA, BB = broadcast_arrays(A, B)
if DEBUGGING:
shape = (max(A.shape[0], B.shape[0]), 1)
_check_shape('AA', AA, shape)
_check_shape('BB', BB, shape)
return (AA, BB)
def pairwise_tile_index(A, B):
AA, BB = broadcast_arrays(A, B.T)
if DEBUGGING:
shape = (len(A), len(B))
_check_shape('AA', AA, shape)
_check_shape('BB', BB, shape)
return (AA, BB)
def pairwise_phasediffs(A, B):
N_A = len(A)
N_B = len(B)
DD_shape = (N_A, N_B)
if DEBUGGING:
_check_ndim('A', A, 2)
_check_ndim('B', B, 2)
_check_shape('A', A, 1, axis=1)
_check_shape('B', B, 1, axis=1)
return B.T - A
def distances(A, B):
AA, BB = broadcast_arrays(A, B)
shape = AA.shape
if DEBUGGING:
_check_ndim('AA', AA, 2)
_check_ndim('BB', BB, 2)
_check_shape('AA', AA, 2, axis=1)
_check_shape('BB', BB, 2, axis=1)
return hypot(AA[:,0] - BB[:,0], AA[:,1] - BB[:,1])[:,AX]
def pairwise_unit_diffs(A, B):
DD = pairwise_position_deltas(A, B)
D_norm = hypot(DD[...,0], DD[...,1])
nz = D_norm.nonzero()
DD[nz] /= D_norm[nz][...,AX]
return DD
def pairwise_distances(A, B):
DD = pairwise_position_deltas(A, B)
return hypot(DD[...,0], DD[...,1])
def pairwise_position_deltas(A, B):
N_A = len(A)
N_B = len(B)
if DEBUGGING:
_check_ndim('A', A, 2)
_check_ndim('B', B, 2)
_check_shape('A', A, 2, axis=1)
_check_shape('B', B, 2, axis=1)
AA = empty((N_A,N_B,2), DISTANCE_DTYPE)
AA[:] = A[:,AX,:]
return B[AX,...] - AA
def somatic_motion_update(D_up, D_cur, X, V):
N = len(X)
D_shape = (N, N)
if DEBUGGING:
_check_ndim('X', X, 2)
_check_shape('X', X, 2, axis=1)
_check_shape('D_up', D_up, D_shape)
_check_shape('D_cur', D_cur, D_shape)
_check_shape('V', V, D_shape)
XX = empty((N,N,2))
XX[:] = X[:,AX,:]
XT = swapaxes(XX, 0, 1)
D_inf = D_up == inf
norm = V * ~D_inf
N = norm.sum(axis=1)
valid = N.nonzero()[0]
norm[valid] /= 2*N[valid,AX]
D_up[D_inf] = D_cur[D_inf] = 0.0
DX = XX - XT
DX_norm = hypot(DX[...,0], DX[...,1])
valid = DX_norm.nonzero()
DX[valid] /= DX_norm[valid][:,AX]
return (norm[...,AX]*(D_up - D_cur)[...,AX]*DX).sum(axis=1)
def reward_motion_update(D_up, D_cur, X, R, V):
N = len(X)
N_R = len(R)
D_shape = (N, N_R)
if DEBUGGING:
_check_ndim('X', X, 2)
_check_ndim('R', R, 2)
_check_shape('X', X, 2, axis=1)
_check_shape('R', R, 2, axis=1)
_check_shape('D_up', D_up, D_shape)
_check_shape('D_cur', D_cur, D_shape)
_check_shape('V', V, D_shape)
XX = empty((N,N_R,2))
XX[:] = X[:,AX,:]
D_inf = D_up == inf
norm = V * ~D_inf
N = norm.sum(axis=1)
valid = N.nonzero()[0]
norm[valid] /= N[valid,AX]
D_up[D_inf] = D_cur[D_inf] = 0.0
DR = XX - R[AX]
DR_norm = hypot(DR[...,0], DR[...,1])
valid = DR_norm.nonzero()
DR[valid] /= DR_norm[valid][:,AX]
return (norm[...,AX]*(D_up - D_cur)[...,AX]*DR).sum(axis=1)
| true
| true
|
790780152dc5b7c65e4a1d012f8ebe7d99bb2a51
| 7,288
|
py
|
Python
|
azure-ml-pipelines/pytorch/training-folder/pytorch_train.py
|
hudua/azureml
|
51f67380aa773184ef1710a3983ce017c29e68e8
|
[
"MIT"
] | null | null | null |
azure-ml-pipelines/pytorch/training-folder/pytorch_train.py
|
hudua/azureml
|
51f67380aa773184ef1710a3983ce017c29e68e8
|
[
"MIT"
] | null | null | null |
azure-ml-pipelines/pytorch/training-folder/pytorch_train.py
|
hudua/azureml
|
51f67380aa773184ef1710a3983ce017c29e68e8
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
import numpy as np
import time
import os
import copy
import argparse
from azureml.core.run import Run
from azureml.core import Dataset, Workspace
from azureml.core.model import Model
# get the Azure ML run object
run = Run.get_context()
ws = run.experiment.workspace
def load_data(data_dir):
"""Load the train/val data."""
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
return dataloaders, dataset_sizes, class_names
def train_model(model, criterion, optimizer, scheduler, num_epochs, data_dir):
"""Train the model."""
# load training/validation data
dataloaders, dataset_sizes, class_names = load_data(data_dir)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
# log the best val accuracy to AML run
run.log('best_val_acc', np.float(best_acc))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def fine_tune_model(num_epochs, data_dir, learning_rate, momentum):
"""Load a pretrained model and reset the final fully connected layer."""
# log the hyperparameter metrics to the AML run
run.log('lr', np.float(learning_rate))
run.log('momentum', np.float(momentum))
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2) # only 2 classes to predict
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(),
lr=learning_rate, momentum=momentum)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(
optimizer_ft, step_size=7, gamma=0.1)
model = train_model(model_ft, criterion, optimizer_ft,
exp_lr_scheduler, num_epochs, data_dir)
return model
def download_data():
dataset = Dataset.get_by_name(ws, name='pytorchdataset')
dataset.download(target_path='fowl_data', overwrite=True)
return 'fowl_data'
# def download_data():
# """Download and extract the training data."""
# import urllib
# from zipfile import ZipFile
# # download data
# data_file = './fowl_data.zip'
# download_url = 'https://azureopendatastorage.blob.core.windows.net/testpublic/temp/fowl_data.zip'
# urllib.request.urlretrieve(download_url, filename=data_file)
# # extract files
# with ZipFile(data_file, 'r') as zip:
# print('extracting files...')
# zip.extractall()
# print('finished extracting')
# data_dir = zip.namelist()[0]
# # delete zip file
# os.remove(data_file)
# return data_dir
def main():
import torch
print("Torch version:", torch.__version__)
print(torch.cuda.is_available())
# get command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--num_epochs', type=int, default=25,
help='number of epochs to train')
parser.add_argument('--output_dir', type=str, help='output directory')
parser.add_argument('--learning_rate', type=float,
default=0.001, help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
args = parser.parse_args()
data_dir = download_data()
print("data directory is: " + data_dir)
model = fine_tune_model(args.num_epochs, data_dir,
args.learning_rate, args.momentum)
os.makedirs(args.output_dir, exist_ok=True)
torch.save(model, os.path.join(args.output_dir, 'model.pt'))
model = Model.register(model_name='my_model', model_path=os.path.join(args.output_dir, 'model.pt'), workspace = ws)
if __name__ == "__main__":
main()
| 34.215962
| 119
| 0.612514
|
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
import numpy as np
import time
import os
import copy
import argparse
from azureml.core.run import Run
from azureml.core import Dataset, Workspace
from azureml.core.model import Model
run = Run.get_context()
ws = run.experiment.workspace
def load_data(data_dir):
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
return dataloaders, dataset_sizes, class_names
def train_model(model, criterion, optimizer, scheduler, num_epochs, data_dir):
dataloaders, dataset_sizes, class_names = load_data(data_dir)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
run.log('best_val_acc', np.float(best_acc))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
model.load_state_dict(best_model_wts)
return model
def fine_tune_model(num_epochs, data_dir, learning_rate, momentum):
run.log('lr', np.float(learning_rate))
run.log('momentum', np.float(momentum))
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model_ft.parameters(),
lr=learning_rate, momentum=momentum)
exp_lr_scheduler = lr_scheduler.StepLR(
optimizer_ft, step_size=7, gamma=0.1)
model = train_model(model_ft, criterion, optimizer_ft,
exp_lr_scheduler, num_epochs, data_dir)
return model
def download_data():
dataset = Dataset.get_by_name(ws, name='pytorchdataset')
dataset.download(target_path='fowl_data', overwrite=True)
return 'fowl_data'
print("Torch version:", torch.__version__)
print(torch.cuda.is_available())
parser = argparse.ArgumentParser()
parser.add_argument('--num_epochs', type=int, default=25,
help='number of epochs to train')
parser.add_argument('--output_dir', type=str, help='output directory')
parser.add_argument('--learning_rate', type=float,
default=0.001, help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
args = parser.parse_args()
data_dir = download_data()
print("data directory is: " + data_dir)
model = fine_tune_model(args.num_epochs, data_dir,
args.learning_rate, args.momentum)
os.makedirs(args.output_dir, exist_ok=True)
torch.save(model, os.path.join(args.output_dir, 'model.pt'))
model = Model.register(model_name='my_model', model_path=os.path.join(args.output_dir, 'model.pt'), workspace = ws)
if __name__ == "__main__":
main()
| true
| true
|
79078052b865175debedebdc36758824b94e71f2
| 15,667
|
py
|
Python
|
dns_main/src/kinematics.py
|
JevgenijsGalaktionovs/AntBot
|
e89fa811087cce6c4038329c44ffeaf26308c0e9
|
[
"MIT"
] | null | null | null |
dns_main/src/kinematics.py
|
JevgenijsGalaktionovs/AntBot
|
e89fa811087cce6c4038329c44ffeaf26308c0e9
|
[
"MIT"
] | 4
|
2019-09-10T07:14:06.000Z
|
2019-09-10T09:29:51.000Z
|
dns_main/src/kinematics.py
|
eugenegalaxy/DNS
|
e89fa811087cce6c4038329c44ffeaf26308c0e9
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python2
from math import pi, cos, sin, atan2, acos, sqrt, pow, radians, asin
from math_calc import *
from service_router import readPos
class LegConsts(object):
''' Class object to store characteristics of each leg '''
def __init__(self, x_off, y_off, z_off, ang_off, leg_nr):
self.x_off = x_off # X offset from body origin to first servo (mm)
self.y_off = y_off # Y offset from body origin to first servo (mm)
self.z_off = z_off # Z offset from body origin to first servo (mm)
self.ang_off = ang_off # Angular offset from body origin to first servo (mm)
self.f_ang_off = radians(13.33) # Angular offset of Femur
self.t_ang_off = radians(-25.90) # Angular offset of Tibia
self.c_len = 66.50 # Link length of Coxa (mm)
self.f_len = 144.40 # Link length of Femur (mm)
self.t_len = 287 # Link length of Tibia (mm)
self.leg_nr = leg_nr # Leg Number
class Kinematics(object):
''' Class object to compute various types of kinematics data for AntBot '''
# Origin to coxa: x_off, y_off, z_off, ang_off, name
leg1 = LegConsts(70.5, 122.225, -14.9, - pi / 3, "Leg 1")
leg2 = LegConsts(-70.5, 122.225, -14.9, -2 * pi / 3, "Leg 2")
leg3 = LegConsts(141.33, 0, -14.9, 0, "Leg 3")
leg4 = LegConsts(-141.33, 0, -14.9, pi, "Leg 4")
leg5 = LegConsts(70.5, -122.225, -14.9, pi / 3, "Leg 5")
leg6 = LegConsts(-70.5, -122.225, -14.9, 2 * pi / 3, "Leg 6")
leg_list = [leg1, leg2, leg3, leg4, leg5, leg6]
################
# Public methods
################
def doFkine(self, all_positions):
''' Function: computes forward kinematics
Parameter: all_positions: list with 18 values of servo positions in steps from ID1 to ID18
Return: ee_xyz: list of x,y,z coordinates for all 6 legs
servoPos: servo positions in radians
'''
servoPos = self.step_to_rad(all_positions)
ee_xyz = []
j = 0
for i in xrange(0, 16, 3):
ee_xyz.extend(self.calc_fkine(servoPos[i:i + 3], self.leg_list[j]))
j += 1
return ee_xyz, servoPos
def doIkine(self, all_positions, x, y, z, body_orient=None, leg=None, auto=None):
''' Function: computes inverse kinematics
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
x,y,z: desired change in x,y,z coordinates (same for all legs)
body_orient: list of 3 integers meaning alpha,beta,gamma rotation in degrees
leg: list with integers meaning leg numbers to compute inverse for them only
Return: list of 18 integers with servo steps
'''
ee_xyz, servoPos = self.doFkine(all_positions)
thetas = []
j = 0
if isinstance(leg, int):
leg = [leg]
elif isinstance(leg, tuple):
leg = list(leg)
elif isinstance(body_orient, tuple):
body_orient = list(body_orient)
if body_orient:
# Optional parameter. Compute inverse with body orientation
body_orient = [radians(d) for d in body_orient]
alpha_rad, beta_rad, gama_rad = body_orient[0], body_orient[1], body_orient[2]
x = (cos(gama_rad) * sin(beta_rad) * z + sin(gama_rad) * sin(beta_rad) * y + x * cos(beta_rad)) \
* cos(alpha_rad) - sin(alpha_rad) * (cos(gama_rad) * y - sin(gama_rad) * z)
y = (cos(gama_rad) * sin(beta_rad) * z + sin(gama_rad) * sin(beta_rad) * y + x * cos(beta_rad)) \
* sin(alpha_rad) + cos(alpha_rad) * (cos(gama_rad) * y - sin(gama_rad) * z)
z = -sin(beta_rad) * x + cos(beta_rad) * sin(gama_rad) * y + cos(beta_rad) * cos(gama_rad) * z
if leg:
# Optional parameter. Compute inverse for a specific leg/s.
for i in range(len(leg)):
j = leg[i] - 1
thetas.extend(self.calc_ikine(x, y, z, ee_xyz[j * 3:j * 3 + 3], self.leg_list[j]))
else:
# Compute inverse for all legs if not leg specified.
for i in xrange(0, 16, 3):
thetas.extend(self.calc_ikine(x, y, z, ee_xyz[i:i + 3], self.leg_list[j]))
j += 1
result = [int(each_theta) for each_theta in self.rad_to_step(thetas)]
return result
def doIkineRotationEuler(self, all_positions, alpha_rad, beta_rad, gama_rad, dist_x, dist_y, dist_z):
''' Function: computes inverse kinematics and body rotation (Parallel kinematics)
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
alpha,beta,gama: # for leg in range(6): # 6 legs
# if leg in leg_list:
# new_pos.extend(K.calc_ikine(x, y, z, ee_xyz[leg:leg + 3], K.leg_list[leg]))
# else:
# new_pos.append(current_pos[3 * leg])
# new_pos.append(current_pos[3 * leg + 1])
# new_pos.append(current_pos[3 * leg + 2])ers with servo steps
'''
final_eexyz, ee_xyz = self.calc_rot_matrix(all_positions, alpha_rad, beta_rad, gama_rad)
thetas = []
j = 0
for i in xrange(0, 16, 3):
thetas.extend(self.calc_ikine(final_eexyz[i] - dist_x, final_eexyz[i + 1] - dist_y, final_eexyz[i + 2] - dist_z, ee_xyz[i:i + 3], self.leg_list[j]))
j += 1
result = [int(each_theta) for each_theta in self.rad_to_step(thetas)]
return result
def printForward(self, all_positions):
''' Function: Prints x,y,z coordinates of each leg
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
'''
ee_list, theta_list = self.doFkine(all_positions)
RoundedCoords = ['%.4f' % elem for elem in ee_list]
print ""
print "X,Y,Z coordinates of Leg end-points: "
print " " + str(["X ", " Y ", " Z "])
print "Leg 1: " + str(RoundedCoords[0:3])
print "Leg 2: " + str(RoundedCoords[3:6])
print "Leg 3: " + str(RoundedCoords[6:9])
print "Leg 4: " + str(RoundedCoords[9:12])
print "Leg 5: " + str(RoundedCoords[12:15])
print "Leg 6: " + str(RoundedCoords[15:18])
print ""
def printInverse(self, all_positions, x, y, z):
''' Function: Prints servo positions, in radians, needed to reach the position
Parameters: theta_list: 18 servo positions in radians.
'''
theta_list = self.doIkine(all_positions, x, y, z)
RoundedThetas = ['%.4f' % elem for elem in theta_list]
print ""
print "Theta angles of each servo:"
print " " + str(["Coxa ", "Femur ", "Tibia"])
print "Leg 1: " + str(RoundedThetas[0:3])
print "Leg 2: " + str(RoundedThetas[3:6])
print "Leg 3: " + str(RoundedThetas[6:9])
print "Leg 4: " + str(RoundedThetas[9:12])
print "Leg 5: " + str(RoundedThetas[12:15])
print "Leg 6: " + str(RoundedThetas[15:18])
print ""
def printKinematics(self, all_positions, x, y, z):
self.printForward(all_positions)
self.printInverse(all_positions, x, y, z)
#################
# Private methods
#################
def calc_fkine(self, servoPos, leg):
theta1 = servoPos[0] - leg.ang_off
theta2 = servoPos[1] + leg.f_ang_off
theta3 = servoPos[2] + leg.t_ang_off
ee_z = leg.f_len * sin(theta2) + leg.t_len * sin(theta3 + theta2) + leg.z_off
ee_x = leg.x_off + cos(theta1) * (leg.c_len + leg.f_len * cos(theta2) + leg.t_len * cos(theta3 + theta2))
ee_y = leg.y_off + sin(theta1) * (leg.c_len + leg.f_len * cos(theta2) + leg.t_len * cos(theta3 + theta2))
return [ee_x, ee_y, ee_z]
def calc_ikine(self, x, y, z, ee_xyz, leg, auto=None):
init_X = ee_xyz[0]
init_Y = ee_xyz[1]
init_Z = ee_xyz[2]
X = init_X + (x) - leg.x_off
Y = init_Y + (y) - leg.y_off
Z = init_Z + (z) - leg.z_off
theta1 = atan2(Y, X) + leg.ang_off
if theta1 < -pi:
theta1 += 2 * pi
if theta1 > pi:
theta1 -= 2 * pi
new_x = cos(leg.ang_off) * X - sin(leg.ang_off) * Y
new_y = sin(leg.ang_off) * X + cos(leg.ang_off) * Y
final_x = cos(theta1) * new_x + sin(theta1) * new_y - leg.c_len
s = sqrt(pow(final_x, 2) + pow(Z, 2))
try:
t3_term = (-pow(s, 2) + pow(leg.f_len, 2) + pow(leg.t_len, 2)) / (2 * leg.f_len * leg.t_len)
t3 = pi - acos(t3_term)
except ValueError:
print "Cannot compute acos(", t3_term, ") for ", leg.leg_nr
if auto is None:
if t3_term < 0:
t3 = pi - acos(-0.99)
else:
t3 = pi - acos(0.99)
else:
return -1
theta3 = -t3 - leg.t_ang_off
theta2 = -(-atan2(Z, final_x) - atan2(leg.t_len * sin(t3), leg.f_len + leg.t_len * cos(t3)) + leg.f_ang_off)
if auto is not None:
if (theta2 > 1.8 or theta2 < -1.8) or (theta3 < -2.2 or theta3 > 2.2):
return -1
return [theta1, theta2, theta3]
def calc_rot_displacement(self, alpha_rad, beta_rad, gama_rad, ee_xyz):
pre_x = ee_xyz[0]
pre_y = ee_xyz[1]
pre_z = ee_xyz[2]
r_term1 = (cos(gama_rad) * sin(beta_rad) * pre_z + sin(gama_rad) * sin(beta_rad) * pre_y + pre_x * cos(beta_rad))
r_term2 = (cos(gama_rad) * pre_y - sin(gama_rad) * pre_z)
r_x = r_term1 * cos(alpha_rad) - r_term2 * sin(alpha_rad) - pre_x
r_y = r_term1 * sin(alpha_rad) + r_term2 * cos(alpha_rad) - pre_y
r_z = - sin(beta_rad) * pre_x + cos(beta_rad) * sin(gama_rad) * pre_y + cos(beta_rad) * cos(gama_rad) * pre_z - pre_z
return [r_x, r_y, r_z]
def calc_rot_matrix(self, all_positions, alpha_rad, beta_rad, gama_rad):
ee_xyz, servoPos = self.doFkine(all_positions)
rot_val_list = []
for i in xrange(0, 16, 3):
rot_val_list.extend(self.calc_rot_displacement(alpha_rad, beta_rad, gama_rad, ee_xyz[i:i + 3]))
return rot_val_list, ee_xyz
def rad_to_step(self, pos_rads):
return [i / pi * 2048 + 2048 for i in pos_rads]
def step_to_rad(self, pos_steps):
return [(((x / 2047.5) - 1) * pi) for x in pos_steps]
def make_poligonCorners(self, all_positions, leg_list):
if leg_list is int:
leg_list = [leg_list]
xyz_polygon = []
ee_xyz, servoPos = self.doFkine(all_positions)
newEe_xyz = [ee_xyz[0], ee_xyz[1], ee_xyz[2], ee_xyz[3], ee_xyz[4], ee_xyz[5],
ee_xyz[9], ee_xyz[10], ee_xyz[11], ee_xyz[15], ee_xyz[16], ee_xyz[17],
ee_xyz[12], ee_xyz[13], ee_xyz[14], ee_xyz[6], ee_xyz[7], ee_xyz[8]]
for i in range(len(leg_list)):
j = leg_list[i] - 1
xyz_polygon.extend((newEe_xyz[j * 3:j * 3 + 3]))
return xyz_polygon
def make_polygonLines(self, leg_list, ee_xyz):
print("leglistLins", leg_list)
line = []
for i in range(len(ee_xyz / 3)):
j = i - 1
line.extend = [ee_xyz[3 * j + 3] - ee_xyz[3 * j],
ee_xyz[3 * j + 4] - ee_xyz[3 * j + 1],
ee_xyz[3 * j + 5] - ee_xyz[3 * j + 2]]
return line
def check_stabilty(self, t_poly=None):
ee_xyz, servoPos = self.doFkine(readPos())
tac = [False, True, False, True, True, False]
leg_list = []
for i in range(len(tac)):
if tac[i] is True:
leg_list.extend([i + 1])
poly_lines, poly_points = self.make_polygonLines(leg_list, ee_xyz)
print("lines", poly_lines)
if tac[1] is True and tac[2] is True and tac[5]is True:
# gamma, beta = 10,20 #self.get_orientation(tac)
# n = [0,-sin(beta),cos(beta)]
print("im not here")
P1 = [ee_xyz[3], ee_xyz[4], 1]
P2 = [ee_xyz[6], ee_xyz[7], 1]
P3 = [ee_xyz[15], ee_xyz[16], 1]
print(P1, P2, P3)
elif tac[0] is True and tac[3] is True and tac[4] is True:
print("im here")
P1 = [ee_xyz[0], ee_xyz[1], 1]
P3 = [ee_xyz[9], ee_xyz[10], 1]
P2 = [ee_xyz[12], ee_xyz[13], 1]
print(P1, P2, P3)
k = 1 # dotProduct(n,P1)
x = 0
y = 1
z = 2
lambda_1 = ((P2[x] * P3[y] - P2[y] * P3[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
lambda_2 = -((P1[x] * P3[y] - P1[y] * P3[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
lambda_3 = ((P1[x] * P2[y] - P1[y] * P2[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
if lambda_1 > 0.1 and lambda_2 > 0.1 and lambda_3 > 0.1 and lambda_3 > 0.1:
if lambda_1 < 0.9 and lambda_2 < 0.9 and lambda_3 < 0.9:
if lambda_1 + lambda_2 + lambda_3 == 1:
inside = True
side1 = subtract(P1, P2)
side2 = subtract(P3, P2)
side3 = subtract(P1, P3)
G = [0, 0, 1]
P2_G = subtract(G, P2)
P3_G = subtract(G, P3)
margin_s1 = sqrt(pow(dotProduct(P2_G, unit_vec(side1)), 2) + dotProduct(P2_G, P2_G))
margin_s2 = sqrt(pow(dotProduct(P2_G, unit_vec(side2)), 2) + dotProduct(P2_G, P2_G))
margin_s3 = sqrt(pow(dotProduct(P3_G, unit_vec(side3)), 2) + dotProduct(P3_G, P3_G))
stability_margin = min(margin_s1, margin_s2, margin_s3)
print(stability_margin, inside)
return stability_margin, inside
def get_orientation(self, leg_list):
ee_xyz, servoPos = self.doFkine(readPos())
p1 = ee_xyz[3 * (leg_list[0] - 1):3 * (leg_list[0] - 1) + 3]
p2 = ee_xyz[3 * (leg_list[1] - 1):3 * (leg_list[1] - 1) + 3]
p3 = ee_xyz[3 * (leg_list[2] - 1):3 * (leg_list[2] - 1) + 3]
p21 = subtract(p2, p1)
p23 = subtract(p2, p3)
normz = crossProduct(p21, p23)
beta = atan2(normz[0], normz[2]) * 180 / pi
gamma = -atan2(normz[1], normz[2]) * 180 / pi
return gamma, beta
def calc_translationStairs(self, riser, climbed_stairs_front, climbed_stairs_rear):
# gamma, beta = self.get_orientation([1,5,6])
ee_xyz, servopos = self.doFkine(readPos())
dist_y = abs(ee_xyz[1] - ee_xyz[13])
riser_diff = (climbed_stairs_front - climbed_stairs_rear) * riser
omega = asin(riser_diff / dist_y) * 180 / pi
AB = -ee_xyz[14] + 30
AC = AB / cos(omega * pi / 180)
BC = AC * sin(omega * pi / 180)
BE = sqrt(pow(ee_xyz[12], 2) + pow(ee_xyz[11], 2)) - 141.33
CE = BE - BC
CD = BC * CE / AC
if AC + CD <= riser_diff:
trans_z_g = riser_diff - AC - CD + 10
translation_z = trans_z_g * cos(omega * pi / 180)
translation_y = trans_z_g * sin(omega * pi / 180)
else:
translation_z = 0
translation_y = 0
return [translation_z, translation_y]
| 47.048048
| 203
| 0.547456
|
from math import pi, cos, sin, atan2, acos, sqrt, pow, radians, asin
from math_calc import *
from service_router import readPos
class LegConsts(object):
''' Class object to store characteristics of each leg '''
def __init__(self, x_off, y_off, z_off, ang_off, leg_nr):
self.x_off = x_off
self.y_off = y_off
self.z_off = z_off
self.ang_off = ang_off
self.f_ang_off = radians(13.33)
self.t_ang_off = radians(-25.90)
self.c_len = 66.50
self.f_len = 144.40
self.t_len = 287
self.leg_nr = leg_nr
class Kinematics(object):
''' Class object to compute various types of kinematics data for AntBot '''
leg1 = LegConsts(70.5, 122.225, -14.9, - pi / 3, "Leg 1")
leg2 = LegConsts(-70.5, 122.225, -14.9, -2 * pi / 3, "Leg 2")
leg3 = LegConsts(141.33, 0, -14.9, 0, "Leg 3")
leg4 = LegConsts(-141.33, 0, -14.9, pi, "Leg 4")
leg5 = LegConsts(70.5, -122.225, -14.9, pi / 3, "Leg 5")
leg6 = LegConsts(-70.5, -122.225, -14.9, 2 * pi / 3, "Leg 6")
leg_list = [leg1, leg2, leg3, leg4, leg5, leg6]
of x,y,z coordinates for all 6 legs
servoPos: servo positions in radians
'''
servoPos = self.step_to_rad(all_positions)
ee_xyz = []
j = 0
for i in xrange(0, 16, 3):
ee_xyz.extend(self.calc_fkine(servoPos[i:i + 3], self.leg_list[j]))
j += 1
return ee_xyz, servoPos
def doIkine(self, all_positions, x, y, z, body_orient=None, leg=None, auto=None):
''' Function: computes inverse kinematics
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
x,y,z: desired change in x,y,z coordinates (same for all legs)
body_orient: list of 3 integers meaning alpha,beta,gamma rotation in degrees
leg: list with integers meaning leg numbers to compute inverse for them only
Return: list of 18 integers with servo steps
'''
ee_xyz, servoPos = self.doFkine(all_positions)
thetas = []
j = 0
if isinstance(leg, int):
leg = [leg]
elif isinstance(leg, tuple):
leg = list(leg)
elif isinstance(body_orient, tuple):
body_orient = list(body_orient)
if body_orient:
body_orient = [radians(d) for d in body_orient]
alpha_rad, beta_rad, gama_rad = body_orient[0], body_orient[1], body_orient[2]
x = (cos(gama_rad) * sin(beta_rad) * z + sin(gama_rad) * sin(beta_rad) * y + x * cos(beta_rad)) \
* cos(alpha_rad) - sin(alpha_rad) * (cos(gama_rad) * y - sin(gama_rad) * z)
y = (cos(gama_rad) * sin(beta_rad) * z + sin(gama_rad) * sin(beta_rad) * y + x * cos(beta_rad)) \
* sin(alpha_rad) + cos(alpha_rad) * (cos(gama_rad) * y - sin(gama_rad) * z)
z = -sin(beta_rad) * x + cos(beta_rad) * sin(gama_rad) * y + cos(beta_rad) * cos(gama_rad) * z
if leg:
for i in range(len(leg)):
j = leg[i] - 1
thetas.extend(self.calc_ikine(x, y, z, ee_xyz[j * 3:j * 3 + 3], self.leg_list[j]))
else:
for i in xrange(0, 16, 3):
thetas.extend(self.calc_ikine(x, y, z, ee_xyz[i:i + 3], self.leg_list[j]))
j += 1
result = [int(each_theta) for each_theta in self.rad_to_step(thetas)]
return result
def doIkineRotationEuler(self, all_positions, alpha_rad, beta_rad, gama_rad, dist_x, dist_y, dist_z):
''' Function: computes inverse kinematics and body rotation (Parallel kinematics)
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
alpha,beta,gama: # for leg in range(6): # 6 legs
# if leg in leg_list:
# new_pos.extend(K.calc_ikine(x, y, z, ee_xyz[leg:leg + 3], K.leg_list[leg]))
# else:
# new_pos.append(current_pos[3 * leg])
# new_pos.append(current_pos[3 * leg + 1])
# new_pos.append(current_pos[3 * leg + 2])ers with servo steps
'''
final_eexyz, ee_xyz = self.calc_rot_matrix(all_positions, alpha_rad, beta_rad, gama_rad)
thetas = []
j = 0
for i in xrange(0, 16, 3):
thetas.extend(self.calc_ikine(final_eexyz[i] - dist_x, final_eexyz[i + 1] - dist_y, final_eexyz[i + 2] - dist_z, ee_xyz[i:i + 3], self.leg_list[j]))
j += 1
result = [int(each_theta) for each_theta in self.rad_to_step(thetas)]
return result
def printForward(self, all_positions):
''' Function: Prints x,y,z coordinates of each leg
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
'''
ee_list, theta_list = self.doFkine(all_positions)
RoundedCoords = ['%.4f' % elem for elem in ee_list]
print ""
print "X,Y,Z coordinates of Leg end-points: "
print " " + str(["X ", " Y ", " Z "])
print "Leg 1: " + str(RoundedCoords[0:3])
print "Leg 2: " + str(RoundedCoords[3:6])
print "Leg 3: " + str(RoundedCoords[6:9])
print "Leg 4: " + str(RoundedCoords[9:12])
print "Leg 5: " + str(RoundedCoords[12:15])
print "Leg 6: " + str(RoundedCoords[15:18])
print ""
def printInverse(self, all_positions, x, y, z):
''' Function: Prints servo positions, in radians, needed to reach the position
Parameters: theta_list: 18 servo positions in radians.
'''
theta_list = self.doIkine(all_positions, x, y, z)
RoundedThetas = ['%.4f' % elem for elem in theta_list]
print ""
print "Theta angles of each servo:"
print " " + str(["Coxa ", "Femur ", "Tibia"])
print "Leg 1: " + str(RoundedThetas[0:3])
print "Leg 2: " + str(RoundedThetas[3:6])
print "Leg 3: " + str(RoundedThetas[6:9])
print "Leg 4: " + str(RoundedThetas[9:12])
print "Leg 5: " + str(RoundedThetas[12:15])
print "Leg 6: " + str(RoundedThetas[15:18])
print ""
def printKinematics(self, all_positions, x, y, z):
self.printForward(all_positions)
self.printInverse(all_positions, x, y, z)
ee_x = leg.x_off + cos(theta1) * (leg.c_len + leg.f_len * cos(theta2) + leg.t_len * cos(theta3 + theta2))
ee_y = leg.y_off + sin(theta1) * (leg.c_len + leg.f_len * cos(theta2) + leg.t_len * cos(theta3 + theta2))
return [ee_x, ee_y, ee_z]
def calc_ikine(self, x, y, z, ee_xyz, leg, auto=None):
init_X = ee_xyz[0]
init_Y = ee_xyz[1]
init_Z = ee_xyz[2]
X = init_X + (x) - leg.x_off
Y = init_Y + (y) - leg.y_off
Z = init_Z + (z) - leg.z_off
theta1 = atan2(Y, X) + leg.ang_off
if theta1 < -pi:
theta1 += 2 * pi
if theta1 > pi:
theta1 -= 2 * pi
new_x = cos(leg.ang_off) * X - sin(leg.ang_off) * Y
new_y = sin(leg.ang_off) * X + cos(leg.ang_off) * Y
final_x = cos(theta1) * new_x + sin(theta1) * new_y - leg.c_len
s = sqrt(pow(final_x, 2) + pow(Z, 2))
try:
t3_term = (-pow(s, 2) + pow(leg.f_len, 2) + pow(leg.t_len, 2)) / (2 * leg.f_len * leg.t_len)
t3 = pi - acos(t3_term)
except ValueError:
print "Cannot compute acos(", t3_term, ") for ", leg.leg_nr
if auto is None:
if t3_term < 0:
t3 = pi - acos(-0.99)
else:
t3 = pi - acos(0.99)
else:
return -1
theta3 = -t3 - leg.t_ang_off
theta2 = -(-atan2(Z, final_x) - atan2(leg.t_len * sin(t3), leg.f_len + leg.t_len * cos(t3)) + leg.f_ang_off)
if auto is not None:
if (theta2 > 1.8 or theta2 < -1.8) or (theta3 < -2.2 or theta3 > 2.2):
return -1
return [theta1, theta2, theta3]
def calc_rot_displacement(self, alpha_rad, beta_rad, gama_rad, ee_xyz):
pre_x = ee_xyz[0]
pre_y = ee_xyz[1]
pre_z = ee_xyz[2]
r_term1 = (cos(gama_rad) * sin(beta_rad) * pre_z + sin(gama_rad) * sin(beta_rad) * pre_y + pre_x * cos(beta_rad))
r_term2 = (cos(gama_rad) * pre_y - sin(gama_rad) * pre_z)
r_x = r_term1 * cos(alpha_rad) - r_term2 * sin(alpha_rad) - pre_x
r_y = r_term1 * sin(alpha_rad) + r_term2 * cos(alpha_rad) - pre_y
r_z = - sin(beta_rad) * pre_x + cos(beta_rad) * sin(gama_rad) * pre_y + cos(beta_rad) * cos(gama_rad) * pre_z - pre_z
return [r_x, r_y, r_z]
def calc_rot_matrix(self, all_positions, alpha_rad, beta_rad, gama_rad):
ee_xyz, servoPos = self.doFkine(all_positions)
rot_val_list = []
for i in xrange(0, 16, 3):
rot_val_list.extend(self.calc_rot_displacement(alpha_rad, beta_rad, gama_rad, ee_xyz[i:i + 3]))
return rot_val_list, ee_xyz
def rad_to_step(self, pos_rads):
return [i / pi * 2048 + 2048 for i in pos_rads]
def step_to_rad(self, pos_steps):
return [(((x / 2047.5) - 1) * pi) for x in pos_steps]
def make_poligonCorners(self, all_positions, leg_list):
if leg_list is int:
leg_list = [leg_list]
xyz_polygon = []
ee_xyz, servoPos = self.doFkine(all_positions)
newEe_xyz = [ee_xyz[0], ee_xyz[1], ee_xyz[2], ee_xyz[3], ee_xyz[4], ee_xyz[5],
ee_xyz[9], ee_xyz[10], ee_xyz[11], ee_xyz[15], ee_xyz[16], ee_xyz[17],
ee_xyz[12], ee_xyz[13], ee_xyz[14], ee_xyz[6], ee_xyz[7], ee_xyz[8]]
for i in range(len(leg_list)):
j = leg_list[i] - 1
xyz_polygon.extend((newEe_xyz[j * 3:j * 3 + 3]))
return xyz_polygon
def make_polygonLines(self, leg_list, ee_xyz):
print("leglistLins", leg_list)
line = []
for i in range(len(ee_xyz / 3)):
j = i - 1
line.extend = [ee_xyz[3 * j + 3] - ee_xyz[3 * j],
ee_xyz[3 * j + 4] - ee_xyz[3 * j + 1],
ee_xyz[3 * j + 5] - ee_xyz[3 * j + 2]]
return line
def check_stabilty(self, t_poly=None):
ee_xyz, servoPos = self.doFkine(readPos())
tac = [False, True, False, True, True, False]
leg_list = []
for i in range(len(tac)):
if tac[i] is True:
leg_list.extend([i + 1])
poly_lines, poly_points = self.make_polygonLines(leg_list, ee_xyz)
print("lines", poly_lines)
if tac[1] is True and tac[2] is True and tac[5]is True:
print("im not here")
P1 = [ee_xyz[3], ee_xyz[4], 1]
P2 = [ee_xyz[6], ee_xyz[7], 1]
P3 = [ee_xyz[15], ee_xyz[16], 1]
print(P1, P2, P3)
elif tac[0] is True and tac[3] is True and tac[4] is True:
print("im here")
P1 = [ee_xyz[0], ee_xyz[1], 1]
P3 = [ee_xyz[9], ee_xyz[10], 1]
P2 = [ee_xyz[12], ee_xyz[13], 1]
print(P1, P2, P3)
k = 1
x = 0
y = 1
z = 2
lambda_1 = ((P2[x] * P3[y] - P2[y] * P3[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
lambda_2 = -((P1[x] * P3[y] - P1[y] * P3[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
lambda_3 = ((P1[x] * P2[y] - P1[y] * P2[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
if lambda_1 > 0.1 and lambda_2 > 0.1 and lambda_3 > 0.1 and lambda_3 > 0.1:
if lambda_1 < 0.9 and lambda_2 < 0.9 and lambda_3 < 0.9:
if lambda_1 + lambda_2 + lambda_3 == 1:
inside = True
side1 = subtract(P1, P2)
side2 = subtract(P3, P2)
side3 = subtract(P1, P3)
G = [0, 0, 1]
P2_G = subtract(G, P2)
P3_G = subtract(G, P3)
margin_s1 = sqrt(pow(dotProduct(P2_G, unit_vec(side1)), 2) + dotProduct(P2_G, P2_G))
margin_s2 = sqrt(pow(dotProduct(P2_G, unit_vec(side2)), 2) + dotProduct(P2_G, P2_G))
margin_s3 = sqrt(pow(dotProduct(P3_G, unit_vec(side3)), 2) + dotProduct(P3_G, P3_G))
stability_margin = min(margin_s1, margin_s2, margin_s3)
print(stability_margin, inside)
return stability_margin, inside
def get_orientation(self, leg_list):
ee_xyz, servoPos = self.doFkine(readPos())
p1 = ee_xyz[3 * (leg_list[0] - 1):3 * (leg_list[0] - 1) + 3]
p2 = ee_xyz[3 * (leg_list[1] - 1):3 * (leg_list[1] - 1) + 3]
p3 = ee_xyz[3 * (leg_list[2] - 1):3 * (leg_list[2] - 1) + 3]
p21 = subtract(p2, p1)
p23 = subtract(p2, p3)
normz = crossProduct(p21, p23)
beta = atan2(normz[0], normz[2]) * 180 / pi
gamma = -atan2(normz[1], normz[2]) * 180 / pi
return gamma, beta
def calc_translationStairs(self, riser, climbed_stairs_front, climbed_stairs_rear):
ee_xyz, servopos = self.doFkine(readPos())
dist_y = abs(ee_xyz[1] - ee_xyz[13])
riser_diff = (climbed_stairs_front - climbed_stairs_rear) * riser
omega = asin(riser_diff / dist_y) * 180 / pi
AB = -ee_xyz[14] + 30
AC = AB / cos(omega * pi / 180)
BC = AC * sin(omega * pi / 180)
BE = sqrt(pow(ee_xyz[12], 2) + pow(ee_xyz[11], 2)) - 141.33
CE = BE - BC
CD = BC * CE / AC
if AC + CD <= riser_diff:
trans_z_g = riser_diff - AC - CD + 10
translation_z = trans_z_g * cos(omega * pi / 180)
translation_y = trans_z_g * sin(omega * pi / 180)
else:
translation_z = 0
translation_y = 0
return [translation_z, translation_y]
| false
| true
|
79078066b6af069c242f862f323f0ed337449d6a
| 7,787
|
py
|
Python
|
rasa/shared/importers/multi_project.py
|
mukulbalodi/rasa
|
3126ef1148c165f2402f3c7203138d429e46c68c
|
[
"Apache-2.0"
] | null | null | null |
rasa/shared/importers/multi_project.py
|
mukulbalodi/rasa
|
3126ef1148c165f2402f3c7203138d429e46c68c
|
[
"Apache-2.0"
] | null | null | null |
rasa/shared/importers/multi_project.py
|
mukulbalodi/rasa
|
3126ef1148c165f2402f3c7203138d429e46c68c
|
[
"Apache-2.0"
] | 1
|
2022-02-22T12:35:19.000Z
|
2022-02-22T12:35:19.000Z
|
import logging
from functools import reduce
from typing import Text, Set, Dict, Optional, List, Union, Any
import os
import rasa.shared.data
import rasa.shared.utils.io
from rasa.shared.core.domain import Domain
from rasa.shared.importers.importer import TrainingDataImporter
from rasa.shared.importers import utils
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.core.training_data.structures import StoryGraph
from rasa.shared.utils.common import mark_as_experimental_feature
from rasa.shared.core.training_data.story_reader.yaml_story_reader import (
YAMLStoryReader,
)
logger = logging.getLogger(__name__)
class MultiProjectImporter(TrainingDataImporter):
def __init__(
self,
config_file: Text,
domain_path: Optional[Text] = None,
training_data_paths: Optional[Union[List[Text], Text]] = None,
project_directory: Optional[Text] = None,
):
self.config = rasa.shared.utils.io.read_model_configuration(config_file)
if domain_path:
self._domain_paths = [domain_path]
else:
self._domain_paths = []
self._story_paths = []
self._e2e_story_paths = []
self._nlu_paths = []
self._imports = []
self._additional_paths = training_data_paths or []
self._project_directory = project_directory or os.path.dirname(config_file)
self._init_from_dict(self.config, self._project_directory)
extra_nlu_files = rasa.shared.data.get_data_files(
training_data_paths, rasa.shared.data.is_nlu_file
)
extra_story_files = rasa.shared.data.get_data_files(
training_data_paths, YAMLStoryReader.is_stories_file
)
self._story_paths += extra_story_files
self._nlu_paths += extra_nlu_files
logger.debug(
"Selected projects: {}".format("".join([f"\n-{i}" for i in self._imports]))
)
mark_as_experimental_feature(feature_name="MultiProjectImporter")
def get_config_file_for_auto_config(self) -> Optional[Text]:
"""Returns config file path for auto-config only if there is a single one."""
return None
def _init_from_path(self, path: Text) -> None:
if os.path.isfile(path):
self._init_from_file(path)
elif os.path.isdir(path):
self._init_from_directory(path)
def _init_from_file(self, path: Text) -> None:
path = os.path.abspath(path)
if os.path.exists(path) and rasa.shared.data.is_config_file(path):
config = rasa.shared.utils.io.read_config_file(path)
parent_directory = os.path.dirname(path)
self._init_from_dict(config, parent_directory)
else:
rasa.shared.utils.io.raise_warning(
f"'{path}' does not exist or is not a valid config file."
)
def _init_from_dict(self, _dict: Dict[Text, Any], parent_directory: Text) -> None:
imports = _dict.get("imports") or []
imports = [os.path.join(parent_directory, i) for i in imports]
# clean out relative paths
imports = [os.path.abspath(i) for i in imports]
# remove duplication
import_candidates = []
for i in imports:
if i not in import_candidates and not self._is_explicitly_imported(i):
import_candidates.append(i)
self._imports.extend(import_candidates)
# import config files from paths which have not been processed so far
for p in import_candidates:
self._init_from_path(p)
def _is_explicitly_imported(self, path: Text) -> bool:
return not self.no_skills_selected() and self.is_imported(path)
def _init_from_directory(self, path: Text) -> None:
for parent, _, files in os.walk(path, followlinks=True):
for file in files:
full_path = os.path.join(parent, file)
if not self.is_imported(full_path):
# Check next file
continue
if YAMLStoryReader.is_test_stories_file(full_path):
self._e2e_story_paths.append(full_path)
elif Domain.is_domain_file(full_path):
self._domain_paths.append(full_path)
elif rasa.shared.data.is_nlu_file(full_path):
self._nlu_paths.append(full_path)
elif YAMLStoryReader.is_stories_file(full_path):
self._story_paths.append(full_path)
elif rasa.shared.data.is_config_file(full_path):
self._init_from_file(full_path)
def no_skills_selected(self) -> bool:
return not self._imports
def training_paths(self) -> Set[Text]:
"""Returns the paths which should be searched for training data."""
# only include extra paths if they are not part of the current project directory
training_paths = {
i
for i in self._imports
if not self._project_directory or self._project_directory not in i
}
if self._project_directory:
training_paths.add(self._project_directory)
return training_paths
def is_imported(self, path: Text) -> bool:
"""
Checks whether a path is imported by a skill.
Args:
path: File or directory path which should be checked.
Returns:
`True` if path is imported by a skill, `False` if not.
"""
absolute_path = os.path.abspath(path)
return (
self.no_skills_selected()
or self._is_in_project_directory(absolute_path)
or self._is_in_additional_paths(absolute_path)
or self._is_in_imported_paths(absolute_path)
)
def _is_in_project_directory(self, path: Text) -> bool:
if os.path.isfile(path):
parent_directory = os.path.abspath(os.path.dirname(path))
return parent_directory == self._project_directory
else:
return path == self._project_directory
def _is_in_additional_paths(self, path: Text) -> bool:
included = path in self._additional_paths
if not included and os.path.isfile(path):
parent_directory = os.path.abspath(os.path.dirname(path))
included = parent_directory in self._additional_paths
return included
def _is_in_imported_paths(self, path: Text) -> bool:
return any(
[rasa.shared.utils.io.is_subdirectory(path, i) for i in self._imports]
)
def get_domain(self) -> Domain:
"""Retrieves model domain (see parent class for full docstring)."""
domains = [Domain.load(path) for path in self._domain_paths]
return reduce(
lambda merged, other: merged.merge(other), domains, Domain.empty()
)
def get_stories(self, exclusion_percentage: Optional[int] = None) -> StoryGraph:
"""Retrieves training stories / rules (see parent class for full docstring)."""
return utils.story_graph_from_paths(
self._story_paths, self.get_domain(), exclusion_percentage
)
def get_conversation_tests(self) -> StoryGraph:
"""Retrieves conversation test stories (see parent class for full docstring)."""
return utils.story_graph_from_paths(self._e2e_story_paths, self.get_domain())
def get_config(self) -> Dict:
"""Retrieves model config (see parent class for full docstring)."""
return self.config
def get_nlu_data(self, language: Optional[Text] = "en") -> TrainingData:
"""Retrieves NLU training data (see parent class for full docstring)."""
return utils.training_data_from_paths(self._nlu_paths, language)
| 38.549505
| 88
| 0.651856
|
import logging
from functools import reduce
from typing import Text, Set, Dict, Optional, List, Union, Any
import os
import rasa.shared.data
import rasa.shared.utils.io
from rasa.shared.core.domain import Domain
from rasa.shared.importers.importer import TrainingDataImporter
from rasa.shared.importers import utils
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.core.training_data.structures import StoryGraph
from rasa.shared.utils.common import mark_as_experimental_feature
from rasa.shared.core.training_data.story_reader.yaml_story_reader import (
YAMLStoryReader,
)
logger = logging.getLogger(__name__)
class MultiProjectImporter(TrainingDataImporter):
def __init__(
self,
config_file: Text,
domain_path: Optional[Text] = None,
training_data_paths: Optional[Union[List[Text], Text]] = None,
project_directory: Optional[Text] = None,
):
self.config = rasa.shared.utils.io.read_model_configuration(config_file)
if domain_path:
self._domain_paths = [domain_path]
else:
self._domain_paths = []
self._story_paths = []
self._e2e_story_paths = []
self._nlu_paths = []
self._imports = []
self._additional_paths = training_data_paths or []
self._project_directory = project_directory or os.path.dirname(config_file)
self._init_from_dict(self.config, self._project_directory)
extra_nlu_files = rasa.shared.data.get_data_files(
training_data_paths, rasa.shared.data.is_nlu_file
)
extra_story_files = rasa.shared.data.get_data_files(
training_data_paths, YAMLStoryReader.is_stories_file
)
self._story_paths += extra_story_files
self._nlu_paths += extra_nlu_files
logger.debug(
"Selected projects: {}".format("".join([f"\n-{i}" for i in self._imports]))
)
mark_as_experimental_feature(feature_name="MultiProjectImporter")
def get_config_file_for_auto_config(self) -> Optional[Text]:
return None
def _init_from_path(self, path: Text) -> None:
if os.path.isfile(path):
self._init_from_file(path)
elif os.path.isdir(path):
self._init_from_directory(path)
def _init_from_file(self, path: Text) -> None:
path = os.path.abspath(path)
if os.path.exists(path) and rasa.shared.data.is_config_file(path):
config = rasa.shared.utils.io.read_config_file(path)
parent_directory = os.path.dirname(path)
self._init_from_dict(config, parent_directory)
else:
rasa.shared.utils.io.raise_warning(
f"'{path}' does not exist or is not a valid config file."
)
def _init_from_dict(self, _dict: Dict[Text, Any], parent_directory: Text) -> None:
imports = _dict.get("imports") or []
imports = [os.path.join(parent_directory, i) for i in imports]
imports = [os.path.abspath(i) for i in imports]
import_candidates = []
for i in imports:
if i not in import_candidates and not self._is_explicitly_imported(i):
import_candidates.append(i)
self._imports.extend(import_candidates)
for p in import_candidates:
self._init_from_path(p)
def _is_explicitly_imported(self, path: Text) -> bool:
return not self.no_skills_selected() and self.is_imported(path)
def _init_from_directory(self, path: Text) -> None:
for parent, _, files in os.walk(path, followlinks=True):
for file in files:
full_path = os.path.join(parent, file)
if not self.is_imported(full_path):
continue
if YAMLStoryReader.is_test_stories_file(full_path):
self._e2e_story_paths.append(full_path)
elif Domain.is_domain_file(full_path):
self._domain_paths.append(full_path)
elif rasa.shared.data.is_nlu_file(full_path):
self._nlu_paths.append(full_path)
elif YAMLStoryReader.is_stories_file(full_path):
self._story_paths.append(full_path)
elif rasa.shared.data.is_config_file(full_path):
self._init_from_file(full_path)
def no_skills_selected(self) -> bool:
return not self._imports
def training_paths(self) -> Set[Text]:
training_paths = {
i
for i in self._imports
if not self._project_directory or self._project_directory not in i
}
if self._project_directory:
training_paths.add(self._project_directory)
return training_paths
def is_imported(self, path: Text) -> bool:
absolute_path = os.path.abspath(path)
return (
self.no_skills_selected()
or self._is_in_project_directory(absolute_path)
or self._is_in_additional_paths(absolute_path)
or self._is_in_imported_paths(absolute_path)
)
def _is_in_project_directory(self, path: Text) -> bool:
if os.path.isfile(path):
parent_directory = os.path.abspath(os.path.dirname(path))
return parent_directory == self._project_directory
else:
return path == self._project_directory
def _is_in_additional_paths(self, path: Text) -> bool:
included = path in self._additional_paths
if not included and os.path.isfile(path):
parent_directory = os.path.abspath(os.path.dirname(path))
included = parent_directory in self._additional_paths
return included
def _is_in_imported_paths(self, path: Text) -> bool:
return any(
[rasa.shared.utils.io.is_subdirectory(path, i) for i in self._imports]
)
def get_domain(self) -> Domain:
domains = [Domain.load(path) for path in self._domain_paths]
return reduce(
lambda merged, other: merged.merge(other), domains, Domain.empty()
)
def get_stories(self, exclusion_percentage: Optional[int] = None) -> StoryGraph:
return utils.story_graph_from_paths(
self._story_paths, self.get_domain(), exclusion_percentage
)
def get_conversation_tests(self) -> StoryGraph:
return utils.story_graph_from_paths(self._e2e_story_paths, self.get_domain())
def get_config(self) -> Dict:
return self.config
def get_nlu_data(self, language: Optional[Text] = "en") -> TrainingData:
return utils.training_data_from_paths(self._nlu_paths, language)
| true
| true
|
7907806716c8beba0182bfd72e0f656cf99a49b1
| 346
|
py
|
Python
|
chat_app/routing.py
|
aanu1143/chat-app
|
20ce2d08ba1efea8951fb9db920014589789a2d9
|
[
"MIT"
] | null | null | null |
chat_app/routing.py
|
aanu1143/chat-app
|
20ce2d08ba1efea8951fb9db920014589789a2d9
|
[
"MIT"
] | null | null | null |
chat_app/routing.py
|
aanu1143/chat-app
|
20ce2d08ba1efea8951fb9db920014589789a2d9
|
[
"MIT"
] | null | null | null |
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import chat.routing
application = ProtocolTypeRouter({
# Empty for now (http->django views is added by default)
'websocket': AuthMiddlewareStack(
URLRouter(
chat.routing.websocket_urlpatterns
)
),
})
| 26.615385
| 60
| 0.722543
|
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import chat.routing
application = ProtocolTypeRouter({
'websocket': AuthMiddlewareStack(
URLRouter(
chat.routing.websocket_urlpatterns
)
),
})
| true
| true
|
790780cefe04d8b106ea68eb55ca71ae2365469e
| 372
|
py
|
Python
|
example/save_and_load_model.py
|
wingedsheep/music-generation-tools
|
02656eb75781925451f51d4ead7d8b6003bdeb29
|
[
"MIT"
] | 12
|
2021-07-22T12:13:27.000Z
|
2022-02-13T09:09:08.000Z
|
example/save_and_load_model.py
|
wingedsheep/music-generation-tools
|
02656eb75781925451f51d4ead7d8b6003bdeb29
|
[
"MIT"
] | 9
|
2021-06-26T10:43:16.000Z
|
2021-12-03T17:25:10.000Z
|
example/save_and_load_model.py
|
wingedsheep/music-generation-tools
|
02656eb75781925451f51d4ead7d8b6003bdeb29
|
[
"MIT"
] | null | null | null |
from mgt.datamanagers.remi.dictionary_generator import DictionaryGenerator
from mgt.models.transformer_model import TransformerModel
"""
Example showing how to save and load a model.
"""
dictionary = DictionaryGenerator.create_dictionary();
model = TransformerModel(dictionary)
model.save_checkpoint("test_model")
model2 = TransformerModel.load_checkpoint("test_model")
| 31
| 74
| 0.833333
|
from mgt.datamanagers.remi.dictionary_generator import DictionaryGenerator
from mgt.models.transformer_model import TransformerModel
dictionary = DictionaryGenerator.create_dictionary();
model = TransformerModel(dictionary)
model.save_checkpoint("test_model")
model2 = TransformerModel.load_checkpoint("test_model")
| true
| true
|
7907817491ff57d46ad146b73c73dd0cd9632333
| 2,481
|
py
|
Python
|
test_geo.py
|
negsrahimi/monke
|
ec2c953c6f10103eb2b45dc68160246a6ee5a473
|
[
"MIT"
] | null | null | null |
test_geo.py
|
negsrahimi/monke
|
ec2c953c6f10103eb2b45dc68160246a6ee5a473
|
[
"MIT"
] | null | null | null |
test_geo.py
|
negsrahimi/monke
|
ec2c953c6f10103eb2b45dc68160246a6ee5a473
|
[
"MIT"
] | null | null | null |
"""Tests for functions defined in the floodsystem/geo module
"""
from floodsystem import geo
from floodsystem.station import MonitoringStation
from floodsystem.stationdata import build_station_list
stations = build_station_list()
# define arbitrary stations for the tests
station_id1 = "test station id 1"
measure_id1 = "test measure id 1"
label1 = "TS1"
coord1 = (1.0, 4.0)
typical_range1 = (-2, 5)
river1 = "River Cam"
town1 = "Town 1"
TestStation1 = MonitoringStation(station_id1, measure_id1, label1, coord1, typical_range1, river1, town1)
station_id2 = "test station id 2"
measure_id2 = "test measure id 2"
label2 = "TS2"
coord2 = (0.0, 1.0)
typical_range2 = (-2, 2)
river2 = "River Cam"
town2 = "Town 2"
TestStation2 = MonitoringStation(station_id2, measure_id2, label2, coord2, typical_range2, river2, town2)
station_id3 = "test station id 3"
measure_id3 = "test measure id 3"
label3 = "TS3"
coord3 = (1.0, 1.0)
typical_range3 = (-2, 3)
river3 = "River Thames"
town3 = "Town 3"
TestStation3 = MonitoringStation(station_id3, measure_id3, label3, coord3, typical_range3, river3, town3)
test_stations = [TestStation1, TestStation2, TestStation3]
def test_stations_within_radius():
centre = (52.2053, 0.1218)
# check that no stations are at a negative distance from the centre
assert geo.stations_within_radius(stations, centre, 0) == []
# check that all stations are within 10000km of the centre
assert len(geo.stations_within_radius(stations, centre, 10000)) == len(stations)
def test_rivers_by_station_number():
lst = geo.rivers_by_station_number(stations, 2)
# check that the number of stations is greater (or equal to the second one) for the first river.
assert lst[0][1] >= lst[1][1]
def test_stations_by_distance():
test = geo.stations_by_distance(test_stations, (0,0))
# check that the results are in the right order based on the test stations provided above
assert (test[0][0], test[1][0], test[2][0]) == (TestStation2, TestStation3, TestStation1)
def test_rivers_with_station():
# check that the results are River Cam and River Thames as per the test stations provided above
assert geo.rivers_with_station(test_stations) == ['River Cam', 'River Thames']
def test_stations_by_river():
# check that the two stations on the River Cam are TestStation1 and TestStation2
assert sorted([x.name for x in geo.stations_by_river(test_stations)['River Cam']]) == [TestStation1.name, TestStation2.name]
| 34.943662
| 128
| 0.742846
|
from floodsystem import geo
from floodsystem.station import MonitoringStation
from floodsystem.stationdata import build_station_list
stations = build_station_list()
station_id1 = "test station id 1"
measure_id1 = "test measure id 1"
label1 = "TS1"
coord1 = (1.0, 4.0)
typical_range1 = (-2, 5)
river1 = "River Cam"
town1 = "Town 1"
TestStation1 = MonitoringStation(station_id1, measure_id1, label1, coord1, typical_range1, river1, town1)
station_id2 = "test station id 2"
measure_id2 = "test measure id 2"
label2 = "TS2"
coord2 = (0.0, 1.0)
typical_range2 = (-2, 2)
river2 = "River Cam"
town2 = "Town 2"
TestStation2 = MonitoringStation(station_id2, measure_id2, label2, coord2, typical_range2, river2, town2)
station_id3 = "test station id 3"
measure_id3 = "test measure id 3"
label3 = "TS3"
coord3 = (1.0, 1.0)
typical_range3 = (-2, 3)
river3 = "River Thames"
town3 = "Town 3"
TestStation3 = MonitoringStation(station_id3, measure_id3, label3, coord3, typical_range3, river3, town3)
test_stations = [TestStation1, TestStation2, TestStation3]
def test_stations_within_radius():
centre = (52.2053, 0.1218)
assert geo.stations_within_radius(stations, centre, 0) == []
assert len(geo.stations_within_radius(stations, centre, 10000)) == len(stations)
def test_rivers_by_station_number():
lst = geo.rivers_by_station_number(stations, 2)
assert lst[0][1] >= lst[1][1]
def test_stations_by_distance():
test = geo.stations_by_distance(test_stations, (0,0))
assert (test[0][0], test[1][0], test[2][0]) == (TestStation2, TestStation3, TestStation1)
def test_rivers_with_station():
assert geo.rivers_with_station(test_stations) == ['River Cam', 'River Thames']
def test_stations_by_river():
assert sorted([x.name for x in geo.stations_by_river(test_stations)['River Cam']]) == [TestStation1.name, TestStation2.name]
| true
| true
|
7907817eaace07c51f5c20363c1fda57e0c57fc3
| 1,191
|
py
|
Python
|
src/utils/console_functions.py
|
MariusDgr/AudioMining
|
ef74567fcc1d9034777bde45bc4a4ead20e8aa75
|
[
"Apache-2.0"
] | null | null | null |
src/utils/console_functions.py
|
MariusDgr/AudioMining
|
ef74567fcc1d9034777bde45bc4a4ead20e8aa75
|
[
"Apache-2.0"
] | null | null | null |
src/utils/console_functions.py
|
MariusDgr/AudioMining
|
ef74567fcc1d9034777bde45bc4a4ead20e8aa75
|
[
"Apache-2.0"
] | null | null | null |
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
From: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
| 47.64
| 123
| 0.592779
|
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)
if iteration == total:
print()
| true
| true
|
790781e3fabc371efb85b5a430d9dced823cd5d5
| 922
|
py
|
Python
|
data_loader.py
|
abhishek1907/transformer
|
49693c47c6e2550bd85d60604dd8319cd761d816
|
[
"MIT"
] | null | null | null |
data_loader.py
|
abhishek1907/transformer
|
49693c47c6e2550bd85d60604dd8319cd761d816
|
[
"MIT"
] | null | null | null |
data_loader.py
|
abhishek1907/transformer
|
49693c47c6e2550bd85d60604dd8319cd761d816
|
[
"MIT"
] | null | null | null |
from torchtext import data
import spacy
import dill
BOS_WORD = '<s>'
EOS_WORD = '</s>'
BLANK_WORD = "<blank>"
spacy_en = spacy.load('en')
spacy_de = spacy.load('de')
def tokenizer_en(text):
return [tok.text for tok in spacy_en.tokenizer(text)]
def tokenizer_de(text):
return [tok.text for tok in spacy_de.tokenizer(text)]
SRC = data.Field(tokenize=tokenizer_de, pad_token=BLANK_WORD)
TGT = data.Field(tokenize=tokenizer_en, init_token = BOS_WORD, eos_token = EOS_WORD, pad_token=BLANK_WORD)
data_fields = [('German', SRC), ('English', TGT)]
train, val, test = data.TabularDataset.splits(path='./data', train='train.csv', validation='val.csv', test='test.csv', format='csv', fields=data_fields, skip_header=True)
SRC.build_vocab(train.German)
TGT.build_vocab(train.English)
with open("./data/src_vocab.pt", "wb")as f:
dill.dump(SRC, f)
with open("./data/tgt_vocab.pt", "wb")as f:
dill.dump(TGT, f)
| 28.8125
| 170
| 0.715835
|
from torchtext import data
import spacy
import dill
BOS_WORD = '<s>'
EOS_WORD = '</s>'
BLANK_WORD = "<blank>"
spacy_en = spacy.load('en')
spacy_de = spacy.load('de')
def tokenizer_en(text):
return [tok.text for tok in spacy_en.tokenizer(text)]
def tokenizer_de(text):
return [tok.text for tok in spacy_de.tokenizer(text)]
SRC = data.Field(tokenize=tokenizer_de, pad_token=BLANK_WORD)
TGT = data.Field(tokenize=tokenizer_en, init_token = BOS_WORD, eos_token = EOS_WORD, pad_token=BLANK_WORD)
data_fields = [('German', SRC), ('English', TGT)]
train, val, test = data.TabularDataset.splits(path='./data', train='train.csv', validation='val.csv', test='test.csv', format='csv', fields=data_fields, skip_header=True)
SRC.build_vocab(train.German)
TGT.build_vocab(train.English)
with open("./data/src_vocab.pt", "wb")as f:
dill.dump(SRC, f)
with open("./data/tgt_vocab.pt", "wb")as f:
dill.dump(TGT, f)
| true
| true
|
790782d1c21f4f039013903d7f863d124c97cdc5
| 472
|
py
|
Python
|
misc/configuration/config.py
|
gotitinc/code-samples
|
78f4a42b7ea3826d84b91d7303c41da3458d75de
|
[
"Apache-2.0"
] | null | null | null |
misc/configuration/config.py
|
gotitinc/code-samples
|
78f4a42b7ea3826d84b91d7303c41da3458d75de
|
[
"Apache-2.0"
] | null | null | null |
misc/configuration/config.py
|
gotitinc/code-samples
|
78f4a42b7ea3826d84b91d7303c41da3458d75de
|
[
"Apache-2.0"
] | null | null | null |
import os.path
from importlib import import_module
basedir = os.path.abspath(os.path.dirname(__file__))
env = os.getenv('ENVIRONMENT', 'local')
if not env in ['local', 'test']:
config_file = '/path/to/config/directory/' + env + '.py'
if not os.path.isfile(config_file):
env = 'local'
config_name = 'path.to.config.directory.' + env
module = import_module(config_name)
config = module.config
config.MIGRATIONS_PATH = os.path.join(basedir, 'migrations')
| 27.764706
| 60
| 0.709746
|
import os.path
from importlib import import_module
basedir = os.path.abspath(os.path.dirname(__file__))
env = os.getenv('ENVIRONMENT', 'local')
if not env in ['local', 'test']:
config_file = '/path/to/config/directory/' + env + '.py'
if not os.path.isfile(config_file):
env = 'local'
config_name = 'path.to.config.directory.' + env
module = import_module(config_name)
config = module.config
config.MIGRATIONS_PATH = os.path.join(basedir, 'migrations')
| true
| true
|
7907843f53aebe2d388e8a228811273446d764aa
| 2,292
|
py
|
Python
|
tms/breakrule.py
|
marmstr93ng/TimeManagementSystem
|
2f81ea33d9bd9415151215143e7f9ad55704dd95
|
[
"MIT"
] | null | null | null |
tms/breakrule.py
|
marmstr93ng/TimeManagementSystem
|
2f81ea33d9bd9415151215143e7f9ad55704dd95
|
[
"MIT"
] | 12
|
2018-09-27T09:47:21.000Z
|
2021-06-01T22:34:22.000Z
|
tms/breakrule.py
|
marmstr93ng/TimeManagementSystemEmulator
|
2f81ea33d9bd9415151215143e7f9ad55704dd95
|
[
"MIT"
] | null | null | null |
import logging
import configparser
import os
from utils import bool_query
class BreakRule(object):
def __init__(self, settings):
self.settings = settings
self.rules_record = configparser.ConfigParser()
self.rules_record.read("{}/tms/breakrules.ini".format(os.getcwd()))
self.rules = {}
for rule_id in self.rules_record.sections():
self.rules[rule_id] = self.rules_record.get(rule_id, "Description")
def _check_rule_exists(self, rule_id):
if self.rules.get(rule_id, None) is None:
logging.warning("Rule {} doesn't exist".format(rule_id))
return False
else:
logging.debug("Rule {} exists".format(rule_id))
return True
def _update_break_rule(self, rule_id):
self.settings.set("Settings", "BreakRule", rule_id)
with open("{}/tms/settings.ini".format(os.getcwd()), 'w') as configfile:
self.settings.write(configfile)
logging.info("Break rule changed to rule {}".format(self.settings.get("Settings", "BreakRule")))
def print_rules(self):
logging.info("Break Rules: ")
for rule_id in self.rules:
logging.info(' [{}] {}'.format(rule_id, self.rules[rule_id]))
def get_break_rule(self, desired_rule_id=None):
if not desired_rule_id: desired_rule_id = self.settings.get("Settings", "BreakRule")
if self._check_rule_exists(desired_rule_id):
for rule_id in self.rules:
if rule_id == desired_rule_id:
logging.info(' [{}] {}'.format(rule_id, self.rules[desired_rule_id]))
def cmd_update_break_rule(self):
self.print_rules()
selection_query = None
while selection_query is None:
logging.info('Please enter the ID of the rule to be used...')
selection = input()
try:
int(selection)
except ValueError:
logging.warning('WARNING: Please enter a numeric value corresponding to a rule ID.')
else:
if self._check_rule_exists(selection):
selection_query = bool_query('Select Rule "{}" for use?'.format(selection, default="y"))
self._update_break_rule(selection)
| 37.57377
| 108
| 0.616492
|
import logging
import configparser
import os
from utils import bool_query
class BreakRule(object):
def __init__(self, settings):
self.settings = settings
self.rules_record = configparser.ConfigParser()
self.rules_record.read("{}/tms/breakrules.ini".format(os.getcwd()))
self.rules = {}
for rule_id in self.rules_record.sections():
self.rules[rule_id] = self.rules_record.get(rule_id, "Description")
def _check_rule_exists(self, rule_id):
if self.rules.get(rule_id, None) is None:
logging.warning("Rule {} doesn't exist".format(rule_id))
return False
else:
logging.debug("Rule {} exists".format(rule_id))
return True
def _update_break_rule(self, rule_id):
self.settings.set("Settings", "BreakRule", rule_id)
with open("{}/tms/settings.ini".format(os.getcwd()), 'w') as configfile:
self.settings.write(configfile)
logging.info("Break rule changed to rule {}".format(self.settings.get("Settings", "BreakRule")))
def print_rules(self):
logging.info("Break Rules: ")
for rule_id in self.rules:
logging.info(' [{}] {}'.format(rule_id, self.rules[rule_id]))
def get_break_rule(self, desired_rule_id=None):
if not desired_rule_id: desired_rule_id = self.settings.get("Settings", "BreakRule")
if self._check_rule_exists(desired_rule_id):
for rule_id in self.rules:
if rule_id == desired_rule_id:
logging.info(' [{}] {}'.format(rule_id, self.rules[desired_rule_id]))
def cmd_update_break_rule(self):
self.print_rules()
selection_query = None
while selection_query is None:
logging.info('Please enter the ID of the rule to be used...')
selection = input()
try:
int(selection)
except ValueError:
logging.warning('WARNING: Please enter a numeric value corresponding to a rule ID.')
else:
if self._check_rule_exists(selection):
selection_query = bool_query('Select Rule "{}" for use?'.format(selection, default="y"))
self._update_break_rule(selection)
| true
| true
|
7907846247f0f03b6ff0972b2b828280e46f807c
| 928
|
py
|
Python
|
src/client_py/olist.py
|
epmcj/nextflix
|
de15f0a63fe8906a0417da675b9a1c408f71bc79
|
[
"MIT"
] | null | null | null |
src/client_py/olist.py
|
epmcj/nextflix
|
de15f0a63fe8906a0417da675b9a1c408f71bc79
|
[
"MIT"
] | null | null | null |
src/client_py/olist.py
|
epmcj/nextflix
|
de15f0a63fe8906a0417da675b9a1c408f71bc79
|
[
"MIT"
] | null | null | null |
class OrderedList:
def __init__(self, unique=False):
self.list = []
self.__unique = unique
def add(self, value):
i = 0
while (i < len(self.list)) and (self.list[i] < value):
i += 1
if self.__unique:
if len(self.list) == i or self.list[i] != value:
self.list.insert(i, value)
else:
self.list.insert(i, value)
def is_empty(self):
return (len(self.list) == 0)
def remove_min(self):
if len(self.list) == 0:
return None
return self.list.pop(0)
def remove_max(self):
if len(self.list) == 0:
return None
return self.list.pop()
def get_min(self):
if len(self.list) == 0:
return None
return self.list[0]
def get_max(self):
if len(self.list) == 0:
return None
return self.list[-1]
| 25.081081
| 62
| 0.501078
|
class OrderedList:
def __init__(self, unique=False):
self.list = []
self.__unique = unique
def add(self, value):
i = 0
while (i < len(self.list)) and (self.list[i] < value):
i += 1
if self.__unique:
if len(self.list) == i or self.list[i] != value:
self.list.insert(i, value)
else:
self.list.insert(i, value)
def is_empty(self):
return (len(self.list) == 0)
def remove_min(self):
if len(self.list) == 0:
return None
return self.list.pop(0)
def remove_max(self):
if len(self.list) == 0:
return None
return self.list.pop()
def get_min(self):
if len(self.list) == 0:
return None
return self.list[0]
def get_max(self):
if len(self.list) == 0:
return None
return self.list[-1]
| true
| true
|
7907846fe820f0323c6f7d08edf0ea83ee22e584
| 3,020
|
py
|
Python
|
vendor-local/src/django-extensions/django_extensions/db/fields/json.py
|
drkitty/cyder
|
1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8
|
[
"BSD-3-Clause"
] | 22
|
2015-01-16T01:36:32.000Z
|
2020-06-08T00:46:18.000Z
|
vendor-local/src/django-extensions/django_extensions/db/fields/json.py
|
drkitty/cyder
|
1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8
|
[
"BSD-3-Clause"
] | 267
|
2015-01-01T00:18:57.000Z
|
2015-10-14T00:01:13.000Z
|
vendor-local/src/django-extensions/django_extensions/db/fields/json.py
|
drkitty/cyder
|
1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8
|
[
"BSD-3-Clause"
] | 13
|
2015-01-13T20:56:22.000Z
|
2022-02-23T06:01:17.000Z
|
"""
JSONField automatically serializes most Python terms to JSON data.
Creates a TEXT field with a default value of "{}". See test_json.py for
more information.
from django.db import models
from django_extensions.db.fields import json
class LOL(models.Model):
extra = json.JSONField()
"""
import datetime
from decimal import Decimal
from django.db import models
from django.conf import settings
from django.utils import simplejson
from django.utils.encoding import smart_unicode
class JSONEncoder(simplejson.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return str(obj)
elif isinstance(obj, datetime.datetime):
assert settings.TIME_ZONE == 'UTC'
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
return simplejson.JSONEncoder.default(self, obj)
def dumps(value):
return JSONEncoder().encode(value)
def loads(txt):
value = simplejson.loads(
txt,
parse_float=Decimal,
encoding=settings.DEFAULT_CHARSET
)
return value
class JSONDict(dict):
"""
Hack so repr() called by dumpdata will output JSON instead of
Python formatted data. This way fixtures will work!
"""
def __repr__(self):
return dumps(self)
class JSONList(list):
"""
As above
"""
def __repr__(self):
return dumps(self)
class JSONField(models.TextField):
"""JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly. Main thingy must be a dict object."""
# Used so to_python() is called
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'default' not in kwargs:
kwargs['default'] = '{}'
models.TextField.__init__(self, *args, **kwargs)
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if value is None or value == '':
return {}
elif isinstance(value, basestring):
res = loads(value)
if isinstance(res, dict):
return JSONDict(**res)
else:
return JSONList(res)
else:
return value
def get_db_prep_save(self, value, connection):
"""Convert our JSON object to a string before we save"""
if not isinstance(value, (list, dict)):
return super(JSONField, self).get_db_prep_save("", connection=connection)
else:
return super(JSONField, self).get_db_prep_save(dumps(value),
connection=connection)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.TextField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
| 29.607843
| 85
| 0.636755
|
import datetime
from decimal import Decimal
from django.db import models
from django.conf import settings
from django.utils import simplejson
from django.utils.encoding import smart_unicode
class JSONEncoder(simplejson.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return str(obj)
elif isinstance(obj, datetime.datetime):
assert settings.TIME_ZONE == 'UTC'
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
return simplejson.JSONEncoder.default(self, obj)
def dumps(value):
return JSONEncoder().encode(value)
def loads(txt):
value = simplejson.loads(
txt,
parse_float=Decimal,
encoding=settings.DEFAULT_CHARSET
)
return value
class JSONDict(dict):
def __repr__(self):
return dumps(self)
class JSONList(list):
def __repr__(self):
return dumps(self)
class JSONField(models.TextField):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'default' not in kwargs:
kwargs['default'] = '{}'
models.TextField.__init__(self, *args, **kwargs)
def to_python(self, value):
if value is None or value == '':
return {}
elif isinstance(value, basestring):
res = loads(value)
if isinstance(res, dict):
return JSONDict(**res)
else:
return JSONList(res)
else:
return value
def get_db_prep_save(self, value, connection):
if not isinstance(value, (list, dict)):
return super(JSONField, self).get_db_prep_save("", connection=connection)
else:
return super(JSONField, self).get_db_prep_save(dumps(value),
connection=connection)
def south_field_triple(self):
from south.modelsinspector import introspector
field_class = "django.db.models.fields.TextField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
| true
| true
|
790784fd50a217bc4c8ef7e4d9578ad17a1edc59
| 4,328
|
py
|
Python
|
show/plugins/mlnx.py
|
chaoskao/sonic-utilities
|
47a9a0f56db95265c15c74c4c8dc6a3998bfd2d3
|
[
"Apache-2.0"
] | 1
|
2021-02-03T06:28:38.000Z
|
2021-02-03T06:28:38.000Z
|
show/plugins/mlnx.py
|
chaoskao/sonic-utilities
|
47a9a0f56db95265c15c74c4c8dc6a3998bfd2d3
|
[
"Apache-2.0"
] | 5
|
2020-02-27T09:19:52.000Z
|
2021-05-24T16:04:51.000Z
|
show/plugins/mlnx.py
|
chaoskao/sonic-utilities
|
47a9a0f56db95265c15c74c4c8dc6a3998bfd2d3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# main.py
#
# Specific command-line utility for Mellanox platform
#
try:
import sys
import subprocess
import click
import xml.etree.ElementTree as ET
from sonic_py_common import device_info
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
ENV_VARIABLE_SX_SNIFFER = 'SX_SNIFFER_ENABLE'
CONTAINER_NAME = 'syncd'
SNIFFER_CONF_FILE = '/etc/supervisor/conf.d/mlnx_sniffer.conf'
SNIFFER_CONF_FILE_IN_CONTAINER = CONTAINER_NAME + ':' + SNIFFER_CONF_FILE
TMP_SNIFFER_CONF_FILE = '/tmp/tmp.conf'
HWSKU_PATH = '/usr/share/sonic/hwsku/'
SAI_PROFILE_DELIMITER = '='
# run command
def run_command(command, display_cmd=False, ignore_error=False, print_to_console=True):
"""Run bash command and print output to stdout
"""
if display_cmd == True:
click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green'))
proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE)
(out, err) = proc.communicate()
if len(out) > 0 and print_to_console:
click.echo(out)
if proc.returncode != 0 and not ignore_error:
sys.exit(proc.returncode)
return out, err
# 'mlnx' group
@click.group()
def mlnx():
""" Show Mellanox platform information """
pass
# get current status of sniffer from conf file
def sniffer_status_get(env_variable_name):
enabled = False
command = "docker exec {} bash -c 'touch {}'".format(CONTAINER_NAME, SNIFFER_CONF_FILE)
run_command(command)
command = 'docker cp {} {}'.format(SNIFFER_CONF_FILE_IN_CONTAINER, TMP_SNIFFER_CONF_FILE)
run_command(command)
conf_file = open(TMP_SNIFFER_CONF_FILE, 'r')
for env_variable_string in conf_file:
if env_variable_string.find(env_variable_name) >= 0:
enabled = True
break
conf_file.close()
command = 'rm -rf {}'.format(TMP_SNIFFER_CONF_FILE)
run_command(command)
return enabled
def is_issu_status_enabled():
""" This function parses the SAI XML profile used for mlnx to
get whether ISSU is enabled or disabled
@return: True/False
"""
# ISSU disabled if node in XML config wasn't found
issu_enabled = False
# Get the SAI XML path from sai.profile
sai_profile_path = '/{}/sai.profile'.format(HWSKU_PATH)
DOCKER_CAT_COMMAND = 'docker exec {container_name} cat {path}'
command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_profile_path)
sai_profile_content, _ = run_command(command, print_to_console=False)
sai_profile_kvs = {}
for line in sai_profile_content.split('\n'):
if not SAI_PROFILE_DELIMITER in line:
continue
key, value = line.split(SAI_PROFILE_DELIMITER)
sai_profile_kvs[key] = value.strip()
try:
sai_xml_path = sai_profile_kvs['SAI_INIT_CONFIG_FILE']
except KeyError:
click.echo("Failed to get SAI XML from sai profile", err=True)
sys.exit(1)
# Get ISSU from SAI XML
command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_xml_path)
sai_xml_content, _ = run_command(command, print_to_console=False)
try:
root = ET.fromstring(sai_xml_content)
except ET.ParseError:
click.echo("Failed to parse SAI xml", err=True)
sys.exit(1)
el = root.find('platform_info').find('issu-enabled')
if el is not None:
issu_enabled = int(el.text) == 1
return issu_enabled
@mlnx.command('sniffer')
def sniffer_status():
""" Show sniffer status """
components = ['sdk']
env_variable_strings = [ENV_VARIABLE_SX_SNIFFER]
for index in range(len(components)):
enabled = sniffer_status_get(env_variable_strings[index])
if enabled is True:
click.echo(components[index] + " sniffer is enabled")
else:
click.echo(components[index] + " sniffer is disabled")
@mlnx.command('issu')
def issu_status():
""" Show ISSU status """
res = is_issu_status_enabled()
click.echo('ISSU is enabled' if res else 'ISSU is disabled')
def register(cli):
version_info = device_info.get_sonic_version_info()
if (version_info and version_info.get('asic_type') == 'mellanox'):
cli.commands['platform'].add_command(mlnx)
| 29.643836
| 98
| 0.69085
|
try:
import sys
import subprocess
import click
import xml.etree.ElementTree as ET
from sonic_py_common import device_info
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
ENV_VARIABLE_SX_SNIFFER = 'SX_SNIFFER_ENABLE'
CONTAINER_NAME = 'syncd'
SNIFFER_CONF_FILE = '/etc/supervisor/conf.d/mlnx_sniffer.conf'
SNIFFER_CONF_FILE_IN_CONTAINER = CONTAINER_NAME + ':' + SNIFFER_CONF_FILE
TMP_SNIFFER_CONF_FILE = '/tmp/tmp.conf'
HWSKU_PATH = '/usr/share/sonic/hwsku/'
SAI_PROFILE_DELIMITER = '='
def run_command(command, display_cmd=False, ignore_error=False, print_to_console=True):
if display_cmd == True:
click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green'))
proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE)
(out, err) = proc.communicate()
if len(out) > 0 and print_to_console:
click.echo(out)
if proc.returncode != 0 and not ignore_error:
sys.exit(proc.returncode)
return out, err
@click.group()
def mlnx():
pass
def sniffer_status_get(env_variable_name):
enabled = False
command = "docker exec {} bash -c 'touch {}'".format(CONTAINER_NAME, SNIFFER_CONF_FILE)
run_command(command)
command = 'docker cp {} {}'.format(SNIFFER_CONF_FILE_IN_CONTAINER, TMP_SNIFFER_CONF_FILE)
run_command(command)
conf_file = open(TMP_SNIFFER_CONF_FILE, 'r')
for env_variable_string in conf_file:
if env_variable_string.find(env_variable_name) >= 0:
enabled = True
break
conf_file.close()
command = 'rm -rf {}'.format(TMP_SNIFFER_CONF_FILE)
run_command(command)
return enabled
def is_issu_status_enabled():
issu_enabled = False
# Get the SAI XML path from sai.profile
sai_profile_path = '/{}/sai.profile'.format(HWSKU_PATH)
DOCKER_CAT_COMMAND = 'docker exec {container_name} cat {path}'
command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_profile_path)
sai_profile_content, _ = run_command(command, print_to_console=False)
sai_profile_kvs = {}
for line in sai_profile_content.split('\n'):
if not SAI_PROFILE_DELIMITER in line:
continue
key, value = line.split(SAI_PROFILE_DELIMITER)
sai_profile_kvs[key] = value.strip()
try:
sai_xml_path = sai_profile_kvs['SAI_INIT_CONFIG_FILE']
except KeyError:
click.echo("Failed to get SAI XML from sai profile", err=True)
sys.exit(1)
# Get ISSU from SAI XML
command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_xml_path)
sai_xml_content, _ = run_command(command, print_to_console=False)
try:
root = ET.fromstring(sai_xml_content)
except ET.ParseError:
click.echo("Failed to parse SAI xml", err=True)
sys.exit(1)
el = root.find('platform_info').find('issu-enabled')
if el is not None:
issu_enabled = int(el.text) == 1
return issu_enabled
@mlnx.command('sniffer')
def sniffer_status():
components = ['sdk']
env_variable_strings = [ENV_VARIABLE_SX_SNIFFER]
for index in range(len(components)):
enabled = sniffer_status_get(env_variable_strings[index])
if enabled is True:
click.echo(components[index] + " sniffer is enabled")
else:
click.echo(components[index] + " sniffer is disabled")
@mlnx.command('issu')
def issu_status():
res = is_issu_status_enabled()
click.echo('ISSU is enabled' if res else 'ISSU is disabled')
def register(cli):
version_info = device_info.get_sonic_version_info()
if (version_info and version_info.get('asic_type') == 'mellanox'):
cli.commands['platform'].add_command(mlnx)
| true
| true
|
79078562da997314e044513a049f2fa405083a7b
| 1,216
|
py
|
Python
|
test/test_del_contact_from_group.py
|
vatanov/python_training
|
884a6fc08a7d2130e45dcf7850b2ff3a30f50bf7
|
[
"Apache-2.0"
] | null | null | null |
test/test_del_contact_from_group.py
|
vatanov/python_training
|
884a6fc08a7d2130e45dcf7850b2ff3a30f50bf7
|
[
"Apache-2.0"
] | null | null | null |
test/test_del_contact_from_group.py
|
vatanov/python_training
|
884a6fc08a7d2130e45dcf7850b2ff3a30f50bf7
|
[
"Apache-2.0"
] | null | null | null |
from model.contact import Contact
from model.group import Group
import random
def test_add_contact_in_group(app, db):
if app.contact.count() == 0:
app.contact.create_new(Contact(firstname="Contact for deletion", middlename="some middlename", lastname="some last name"))
if len(app.group.get_group_list()) == 0:
app.group.create(Group(name="Group for deletion"))
group_id = app.group.get_random_group_id()
contacts_in_group = app.contact.get_contacts_in_group(group_id)
if len(contacts_in_group) > 0:
contact = random.choice(contacts_in_group)
app.contact.remove_from_group(contact.id, group_id)
contact_ui = app.contact.get_contacts_in_group(group_id)
contact_db = db.get_contacts_in_group(group_id)
print()
print(contact_db)
print(contact_ui)
assert contact_db == contact_ui
else:
True
#
# contact = app.contact.get_contacts_in_group(group_id)
#
# contacts = db.get_contact_list()
#
# contact = random.choice(contacts)
# app.contact.add_contact_to_group(contact.id, group_id)
#
# contact_db = db.get_contacts_in_group(group_id)
# assert contact_db == contact_ui
| 36.848485
| 130
| 0.697368
|
from model.contact import Contact
from model.group import Group
import random
def test_add_contact_in_group(app, db):
if app.contact.count() == 0:
app.contact.create_new(Contact(firstname="Contact for deletion", middlename="some middlename", lastname="some last name"))
if len(app.group.get_group_list()) == 0:
app.group.create(Group(name="Group for deletion"))
group_id = app.group.get_random_group_id()
contacts_in_group = app.contact.get_contacts_in_group(group_id)
if len(contacts_in_group) > 0:
contact = random.choice(contacts_in_group)
app.contact.remove_from_group(contact.id, group_id)
contact_ui = app.contact.get_contacts_in_group(group_id)
contact_db = db.get_contacts_in_group(group_id)
print()
print(contact_db)
print(contact_ui)
assert contact_db == contact_ui
else:
True
| true
| true
|
7907866e030247e6434c8bb6a162224af0e779c6
| 289
|
py
|
Python
|
efax/_src/samplable.py
|
NeilGirdhar/efax
|
3a0f1ea3fafb456b024137dc5a20a9e7f9806a9f
|
[
"MIT"
] | 34
|
2020-03-24T06:21:08.000Z
|
2022-03-19T04:48:17.000Z
|
efax/_src/samplable.py
|
NeilGirdhar/efax
|
3a0f1ea3fafb456b024137dc5a20a9e7f9806a9f
|
[
"MIT"
] | 8
|
2020-03-30T11:27:48.000Z
|
2021-07-05T06:10:06.000Z
|
efax/_src/samplable.py
|
NeilGirdhar/efax
|
3a0f1ea3fafb456b024137dc5a20a9e7f9806a9f
|
[
"MIT"
] | 1
|
2022-03-17T01:34:07.000Z
|
2022-03-17T01:34:07.000Z
|
from typing import Optional
from tjax import Array, Generator, Shape
from .parametrization import Parametrization
__all__ = ['Samplable']
class Samplable(Parametrization):
def sample(self, rng: Generator, shape: Optional[Shape] = None) -> Array:
raise NotImplementedError
| 22.230769
| 77
| 0.754325
|
from typing import Optional
from tjax import Array, Generator, Shape
from .parametrization import Parametrization
__all__ = ['Samplable']
class Samplable(Parametrization):
def sample(self, rng: Generator, shape: Optional[Shape] = None) -> Array:
raise NotImplementedError
| true
| true
|
790786c0bb0eddc0e2979ef07022a6b74817db71
| 2,443
|
py
|
Python
|
classiPi.py
|
yagyapandeya/Sound-classification-on-Raspberry-Pi-with-Tensorflow
|
47450ade902c3d7127901565cc2d74d5e5490854
|
[
"MIT"
] | 89
|
2017-11-14T16:02:10.000Z
|
2022-01-31T03:55:48.000Z
|
classiPi.py
|
yagyapandeya/Sound-classification-on-Raspberry-Pi-with-Tensorflow
|
47450ade902c3d7127901565cc2d74d5e5490854
|
[
"MIT"
] | 7
|
2018-06-24T12:36:16.000Z
|
2021-08-18T07:35:58.000Z
|
classiPi.py
|
yagyapandeya/Sound-classification-on-Raspberry-Pi-with-Tensorflow
|
47450ade902c3d7127901565cc2d74d5e5490854
|
[
"MIT"
] | 33
|
2017-11-17T18:52:48.000Z
|
2022-01-05T12:53:41.000Z
|
import glob
import os
import librosa
import numpy as np
import tensorflow as tf
import sounddevice
from sklearn.preprocessing import StandardScaler
duration = 0.1 # seconds
sample_rate=44100
'''0 = air_conditioner
1 = car_horn
2 = children_playing
3 = dog_bark
4 = drilling
5 = engine_idling
6 = gun_shot
7 = jackhammer
8 = siren
9 = street_music'''
def extract_features():
X = sounddevice.rec(int(duration * sample_rate), samplerate=sample_rate, channels=1)
sounddevice.wait()
X= np.squeeze(X)
stft = np.abs(librosa.stft(X))
mfccs = np.array(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=8).T)
chroma = np.array(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T)
mel = np.array(librosa.feature.melspectrogram(X, sr=sample_rate).T)
contrast = np.array(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T)
tonnetz = np.array(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T)
ext_features = np.hstack([mfccs,chroma,mel,contrast,tonnetz])
features = np.vstack([features,ext_features])
return features
model_path = "model"
fit_params = np.load('fit_params.npy')
sc = StandardScaler()
sc.fit(fit_params)
n_dim = 161
n_classes = 10
n_hidden_units_one = 256
n_hidden_units_two = 256
sd = 1 / np.sqrt(n_dim)
learning_rate = 0.01
X = tf.placeholder(tf.float32,[None,n_dim])
Y = tf.placeholder(tf.float32,[None,n_classes])
W_1 = tf.Variable(tf.random_normal([n_dim,n_hidden_units_one], mean = 0, stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean = 0, stddev=sd))
h_1 = tf.nn.tanh(tf.matmul(X,W_1) + b_1)
W_2 = tf.Variable(tf.random_normal([n_hidden_units_one,n_hidden_units_two], mean = 0, stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean = 0, stddev=sd))
h_2 = tf.nn.sigmoid(tf.matmul(h_1,W_2) + b_2)
W = tf.Variable(tf.random_normal([n_hidden_units_two,n_classes], mean = 0, stddev=sd))
b = tf.Variable(tf.random_normal([n_classes], mean = 0, stddev=sd))
y_ = tf.nn.softmax(tf.matmul(h_2,W) + b)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
y_true, y_pred = None, None
with tf.Session() as sess:
saver.restore(sess, model_path)
print "Model loaded"
sess.run(tf.global_variables())
while 1:
feat = extract_features()
feat = sc.transform(feat)
y_pred = sess.run(tf.argmax(y_, 1), feed_dict={X: feat})
print y_pred
| 28.08046
| 97
| 0.715104
|
import glob
import os
import librosa
import numpy as np
import tensorflow as tf
import sounddevice
from sklearn.preprocessing import StandardScaler
duration = 0.1
sample_rate=44100
'''0 = air_conditioner
1 = car_horn
2 = children_playing
3 = dog_bark
4 = drilling
5 = engine_idling
6 = gun_shot
7 = jackhammer
8 = siren
9 = street_music'''
def extract_features():
X = sounddevice.rec(int(duration * sample_rate), samplerate=sample_rate, channels=1)
sounddevice.wait()
X= np.squeeze(X)
stft = np.abs(librosa.stft(X))
mfccs = np.array(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=8).T)
chroma = np.array(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T)
mel = np.array(librosa.feature.melspectrogram(X, sr=sample_rate).T)
contrast = np.array(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T)
tonnetz = np.array(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T)
ext_features = np.hstack([mfccs,chroma,mel,contrast,tonnetz])
features = np.vstack([features,ext_features])
return features
model_path = "model"
fit_params = np.load('fit_params.npy')
sc = StandardScaler()
sc.fit(fit_params)
n_dim = 161
n_classes = 10
n_hidden_units_one = 256
n_hidden_units_two = 256
sd = 1 / np.sqrt(n_dim)
learning_rate = 0.01
X = tf.placeholder(tf.float32,[None,n_dim])
Y = tf.placeholder(tf.float32,[None,n_classes])
W_1 = tf.Variable(tf.random_normal([n_dim,n_hidden_units_one], mean = 0, stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean = 0, stddev=sd))
h_1 = tf.nn.tanh(tf.matmul(X,W_1) + b_1)
W_2 = tf.Variable(tf.random_normal([n_hidden_units_one,n_hidden_units_two], mean = 0, stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean = 0, stddev=sd))
h_2 = tf.nn.sigmoid(tf.matmul(h_1,W_2) + b_2)
W = tf.Variable(tf.random_normal([n_hidden_units_two,n_classes], mean = 0, stddev=sd))
b = tf.Variable(tf.random_normal([n_classes], mean = 0, stddev=sd))
y_ = tf.nn.softmax(tf.matmul(h_2,W) + b)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
y_true, y_pred = None, None
with tf.Session() as sess:
saver.restore(sess, model_path)
print "Model loaded"
sess.run(tf.global_variables())
while 1:
feat = extract_features()
feat = sc.transform(feat)
y_pred = sess.run(tf.argmax(y_, 1), feed_dict={X: feat})
print y_pred
| false
| true
|
7907873f6c2d0369175c3b733e8fd18def3435ce
| 2,320
|
py
|
Python
|
integration_tests/emukit/quadrature/test_vanilla_bq_loop.py
|
alexgessner/emukit
|
355e26bb30edd772a81af2a1267c569d7f446d42
|
[
"Apache-2.0"
] | 6
|
2019-06-02T21:23:27.000Z
|
2020-02-17T09:46:30.000Z
|
integration_tests/emukit/quadrature/test_vanilla_bq_loop.py
|
Tony-Chiong/emukit
|
a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a
|
[
"Apache-2.0"
] | 4
|
2019-05-17T13:30:21.000Z
|
2019-06-21T13:49:19.000Z
|
integration_tests/emukit/quadrature/test_vanilla_bq_loop.py
|
Tony-Chiong/emukit
|
a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a
|
[
"Apache-2.0"
] | 1
|
2020-01-12T19:50:44.000Z
|
2020-01-12T19:50:44.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import GPy
from emukit.quadrature.methods.vanilla_bq import VanillaBayesianQuadrature
from emukit.quadrature.loop.quadrature_loop import VanillaBayesianQuadratureLoop
from emukit.core.loop.user_function import UserFunctionWrapper
from emukit.model_wrappers.gpy_quadrature_wrappers import QuadratureRBF, RBFGPy, BaseGaussianProcessGPy
from numpy.testing import assert_array_equal
def func(x):
return np.ones((x.shape[0], 1))
def test_vanilla_bq_loop():
init_size = 5
x_init = np.random.rand(init_size, 2)
y_init = np.random.rand(init_size, 1)
bounds = [(-1, 1), (0, 1)]
gpy_model = GPy.models.GPRegression(X=x_init, Y=y_init, kernel=GPy.kern.RBF(input_dim=x_init.shape[1],
lengthscale=1., variance=1.))
emukit_qrbf = QuadratureRBF(RBFGPy(gpy_model.kern), integral_bounds=bounds)
emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf, gpy_model=gpy_model)
emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model)
emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
num_iter = 5
emukit_loop.run_loop(user_function=UserFunctionWrapper(func), stopping_condition=num_iter)
assert emukit_loop.loop_state.X.shape[0] == num_iter + init_size
assert emukit_loop.loop_state.Y.shape[0] == num_iter + init_size
def test_vanilla_bq_loop_initial_state():
x_init = np.random.rand(5, 2)
y_init = np.random.rand(5, 1)
bounds = [(-1, 1), (0, 1)]
gpy_model = GPy.models.GPRegression(X=x_init, Y=y_init, kernel=GPy.kern.RBF(input_dim=x_init.shape[1],
lengthscale=1., variance=1.))
emukit_qrbf = QuadratureRBF(RBFGPy(gpy_model.kern), integral_bounds=bounds)
emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf, gpy_model=gpy_model)
emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model)
emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
assert_array_equal(emukit_loop.loop_state.X, x_init)
assert_array_equal(emukit_loop.loop_state.Y, y_init)
assert emukit_loop.loop_state.iteration == 0
| 41.428571
| 109
| 0.721121
|
import numpy as np
import GPy
from emukit.quadrature.methods.vanilla_bq import VanillaBayesianQuadrature
from emukit.quadrature.loop.quadrature_loop import VanillaBayesianQuadratureLoop
from emukit.core.loop.user_function import UserFunctionWrapper
from emukit.model_wrappers.gpy_quadrature_wrappers import QuadratureRBF, RBFGPy, BaseGaussianProcessGPy
from numpy.testing import assert_array_equal
def func(x):
return np.ones((x.shape[0], 1))
def test_vanilla_bq_loop():
init_size = 5
x_init = np.random.rand(init_size, 2)
y_init = np.random.rand(init_size, 1)
bounds = [(-1, 1), (0, 1)]
gpy_model = GPy.models.GPRegression(X=x_init, Y=y_init, kernel=GPy.kern.RBF(input_dim=x_init.shape[1],
lengthscale=1., variance=1.))
emukit_qrbf = QuadratureRBF(RBFGPy(gpy_model.kern), integral_bounds=bounds)
emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf, gpy_model=gpy_model)
emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model)
emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
num_iter = 5
emukit_loop.run_loop(user_function=UserFunctionWrapper(func), stopping_condition=num_iter)
assert emukit_loop.loop_state.X.shape[0] == num_iter + init_size
assert emukit_loop.loop_state.Y.shape[0] == num_iter + init_size
def test_vanilla_bq_loop_initial_state():
x_init = np.random.rand(5, 2)
y_init = np.random.rand(5, 1)
bounds = [(-1, 1), (0, 1)]
gpy_model = GPy.models.GPRegression(X=x_init, Y=y_init, kernel=GPy.kern.RBF(input_dim=x_init.shape[1],
lengthscale=1., variance=1.))
emukit_qrbf = QuadratureRBF(RBFGPy(gpy_model.kern), integral_bounds=bounds)
emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf, gpy_model=gpy_model)
emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model)
emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
assert_array_equal(emukit_loop.loop_state.X, x_init)
assert_array_equal(emukit_loop.loop_state.Y, y_init)
assert emukit_loop.loop_state.iteration == 0
| true
| true
|
79078834c7c05e76f0619d7c05cadcd8e87dba2a
| 6,127
|
py
|
Python
|
awrams/config/system/default.py
|
kaamilah/awra_cms
|
bbbb85ad8864e2c835926439acc1e6dabb137a97
|
[
"NetCDF"
] | 20
|
2016-12-01T03:13:50.000Z
|
2021-12-02T23:43:38.000Z
|
awrams/config/system/default.py
|
kaamilah/awra_cms
|
bbbb85ad8864e2c835926439acc1e6dabb137a97
|
[
"NetCDF"
] | 2
|
2018-02-05T03:42:11.000Z
|
2018-04-27T05:49:44.000Z
|
awrams/config/system/default.py
|
kaamilah/awra_cms
|
bbbb85ad8864e2c835926439acc1e6dabb137a97
|
[
"NetCDF"
] | 22
|
2016-12-13T19:57:43.000Z
|
2021-12-08T02:52:19.000Z
|
from os.path import join
from awrams.utils.metatypes import objectify
import os
from logging import FATAL,CRITICAL,ERROR,WARNING,INFO,DEBUG
from awrams.utils.awrams_log import APPEND_FILE,TIMESTAMPED_FILE,ROTATED_SIZED_FILE,DAILY_ROTATED_FILE
from awrams.utils import config_manager
AWRAMS_BASE_PATH = str(config_manager.get_awrams_base_path())
BASE_DATA_PATH = str(config_manager.get_awrams_data_path())
# Mapping for /data/cwd_awra_data/awra_test_inputs/climate*
FORCING_MAP_AWAP = {
'tmin': ('temp_min_day/temp_min_day*.nc', 'temp_min_day'),
'tmax': ('temp_max_day/temp_max_day*.nc', 'temp_max_day'),
'precip': ('rain_day/rain_day*.nc', 'rain_day'),
'solar': ('solar_exposure_day/solar_exposure_day*.nc', 'solar_exposure_day'),
'wind': ('wind/wind*.nc', 'wind')
}
config_options = {
'CHUNKSIZES': {
'TIME': 32,
'SPATIAL': 32
},
'LOGGER_SETTINGS': {
'APP_NAME': 'awrams',
'LOG_FORMAT': '%(asctime)s %(levelname)s %(message)s',
'LOG_TO_STDOUT': True,
'LOG_TO_STDERR': False,
'LOG_TO_FILE': False,
# File logging options
'FILE_LOGGING_MODE': APPEND_FILE,
'LOGFILE_BASE': os.path.join(AWRAMS_BASE_PATH,'awrams'),
#
'LOG_LEVEL': INFO,
'DEBUG_MODULES': [],
# The following are the default values which affect DAILY_ROTATED_FILE and
# ROTATED_SIZED_FILE modes only
# If you select one of these FILE_LOGGING_MODEs you can then customise how
# many or what size the files are
# ROTATED_SIZED_FILE mode is affected by these params:
# How many files to rotate:
'ROTATED_SIZED_FILES': 10,
#Sze of the file before it rotates:
'ROTATED_SIZED_BYTES': 20000,
# DAILY_ROTATED_FILE mode is affected by:
# How many files to rotate(on a daily basis) so 7 is a week's worth of daily
# files
'DAILY_ROTATED_FILES': 7
}
}
config_options = objectify(config_options)
def get_settings():
TEST_DATA_PATH = join(BASE_DATA_PATH, 'test_data')
TRAINING_DATA_PATH = join(BASE_DATA_PATH, 'training')
benchmark_sites_file = join(BASE_DATA_PATH, 'benchmarking/SiteLocationsWithUniqueID.csv')
SHAPEFILES = join(BASE_DATA_PATH, 'spatial/shapefiles')
CLIMATOLOGIES = {
'AWAP_DAILY': {
'solar': (join(BASE_DATA_PATH, 'climatology/climatology_daily_solar_exposure_day.nc'), 'solar_exposure_day')
}
}
if os.name == 'nt':
COMPILER = 'CLANG_WINDOWS'
else:
COMPILER = 'GCC_DEFAULT'
settings = {
'DATA_PATHS': {
'AWRAMS_BASE': AWRAMS_BASE_PATH,
'BASE_DATA': BASE_DATA_PATH,
'MASKS': join(BASE_DATA_PATH, 'spatial/masks'),
'SHAPEFILES': SHAPEFILES,
'CATCHMENT_SHAPEFILE': join(SHAPEFILES,'Final_list_all_attributes.shp'),
'TEST_DATA': TEST_DATA_PATH,
'TRAINING_DATA': TRAINING_DATA_PATH,
'MODEL_DATA': join(BASE_DATA_PATH, 'model_data'),
'CODE': join(AWRAMS_BASE_PATH, 'code'),
'ASCAT': {
'TRAINING': join(TRAINING_DATA_PATH, 'benchmarking/ascat/'),
'TEST': join(TEST_DATA_PATH, 'benchmarking/ascat/')
},
'BUILD_CACHE': join(AWRAMS_BASE_PATH, 'build_cache')
},
'SIMULATION': {
'SPATIAL_CHUNK': 128,
'TIME_CHUNK': 32,
'MIN_CELLS_PER_WORKER': 32,
'TASK_BUFFERS': 3
},
# +++ Should move to external file so datasets can be shared between profiles
'CLIMATE_DATASETS': {
'TRAINING': {
'FORCING': {
'PATH': join(TRAINING_DATA_PATH, 'climate/bom_awap'),
'MAPPING': FORCING_MAP_AWAP
},
'CLIMATOLOGY': CLIMATOLOGIES['AWAP_DAILY']
},
'TESTING': {
'FORCING': {
'PATH': join(TEST_DATA_PATH, 'simulation/climate'),
'MAPPING': FORCING_MAP_AWAP
},
'CLIMATOLOGY': CLIMATOLOGIES['AWAP_DAILY']
}
},
'BENCHMARKING': {
'BENCHMARK_SITES': benchmark_sites_file,
'MONTHLY_REJECTION_THRESHOLD': 15,
'ANNUAL_REJECTION_THRESHOLD': 6,
'SM_MODEL_VARNAMES': ['s0_avg', 'ss_avg', 'sd_avg'],
'SM_MODEL_LAYERS': {'s0_avg': 100., 'ss_avg': 900., 'sd_avg':
5000.},
'SM_OBSERVED_LAYERS': ('profile','top','shallow','middle','deep'),
'FIG_SIZE': (14,6),
'CELLSIZE': 0.05,
'LANDSCAPE_VERSION_EQUIVALENCE': {"5":"45","5R":"45","5Q":"45"}
},
# Preferred compiler; referenced in model settings
'COMPILER': COMPILER,
'IO_SETTINGS' : {
'CHUNKSIZES': config_options['CHUNKSIZES'],
'DEFAULT_CHUNKSIZE': (config_options['CHUNKSIZES']['TIME'], \
config_options['CHUNKSIZES']['SPATIAL'], \
config_options['CHUNKSIZES']['SPATIAL']),
'VAR_CHUNK_CACHE_SIZE': 2**20, # =1048576 ie 1Mb
'VAR_CHUNK_CACHE_NELEMS': 1009, # prime number
'VAR_CHUNK_CACHE_PREEMPTION': 0.75, # 1 for read or write only
# '_fallthrough' will attempt to use _h5py, then netCDF4 if that fails
'DB_OPEN_WITH': '_fallthrough', #"_h5py" OR "_nc"
'MAX_FILES_PER_SFM': 32, # Maximum files allowed open in each SplitFileManager.
# Maximum chunksize to read during extraction (in bytes)
'MAX_EXTRACT_CHUNK': 2**24
},
'LOGGER_SETTINGS': config_options['LOGGER_SETTINGS'],
# Used in extents.get_default_extent
# Consider creating extents objects explicitly from files rather than using this method.
# It exists for backwards compatibility, and will be deprecated
'DEFAULT_MASK_FILE': 'web_mask_v5.h5'
}
return objectify(settings)
| 36.254438
| 120
| 0.593602
|
from os.path import join
from awrams.utils.metatypes import objectify
import os
from logging import FATAL,CRITICAL,ERROR,WARNING,INFO,DEBUG
from awrams.utils.awrams_log import APPEND_FILE,TIMESTAMPED_FILE,ROTATED_SIZED_FILE,DAILY_ROTATED_FILE
from awrams.utils import config_manager
AWRAMS_BASE_PATH = str(config_manager.get_awrams_base_path())
BASE_DATA_PATH = str(config_manager.get_awrams_data_path())
FORCING_MAP_AWAP = {
'tmin': ('temp_min_day/temp_min_day*.nc', 'temp_min_day'),
'tmax': ('temp_max_day/temp_max_day*.nc', 'temp_max_day'),
'precip': ('rain_day/rain_day*.nc', 'rain_day'),
'solar': ('solar_exposure_day/solar_exposure_day*.nc', 'solar_exposure_day'),
'wind': ('wind/wind*.nc', 'wind')
}
config_options = {
'CHUNKSIZES': {
'TIME': 32,
'SPATIAL': 32
},
'LOGGER_SETTINGS': {
'APP_NAME': 'awrams',
'LOG_FORMAT': '%(asctime)s %(levelname)s %(message)s',
'LOG_TO_STDOUT': True,
'LOG_TO_STDERR': False,
'LOG_TO_FILE': False,
'FILE_LOGGING_MODE': APPEND_FILE,
'LOGFILE_BASE': os.path.join(AWRAMS_BASE_PATH,'awrams'),
'LOG_LEVEL': INFO,
'DEBUG_MODULES': [],
'ROTATED_SIZED_FILES': 10,
'ROTATED_SIZED_BYTES': 20000,
# files
'DAILY_ROTATED_FILES': 7
}
}
config_options = objectify(config_options)
def get_settings():
TEST_DATA_PATH = join(BASE_DATA_PATH, 'test_data')
TRAINING_DATA_PATH = join(BASE_DATA_PATH, 'training')
benchmark_sites_file = join(BASE_DATA_PATH, 'benchmarking/SiteLocationsWithUniqueID.csv')
SHAPEFILES = join(BASE_DATA_PATH, 'spatial/shapefiles')
CLIMATOLOGIES = {
'AWAP_DAILY': {
'solar': (join(BASE_DATA_PATH, 'climatology/climatology_daily_solar_exposure_day.nc'), 'solar_exposure_day')
}
}
if os.name == 'nt':
COMPILER = 'CLANG_WINDOWS'
else:
COMPILER = 'GCC_DEFAULT'
settings = {
'DATA_PATHS': {
'AWRAMS_BASE': AWRAMS_BASE_PATH,
'BASE_DATA': BASE_DATA_PATH,
'MASKS': join(BASE_DATA_PATH, 'spatial/masks'),
'SHAPEFILES': SHAPEFILES,
'CATCHMENT_SHAPEFILE': join(SHAPEFILES,'Final_list_all_attributes.shp'),
'TEST_DATA': TEST_DATA_PATH,
'TRAINING_DATA': TRAINING_DATA_PATH,
'MODEL_DATA': join(BASE_DATA_PATH, 'model_data'),
'CODE': join(AWRAMS_BASE_PATH, 'code'),
'ASCAT': {
'TRAINING': join(TRAINING_DATA_PATH, 'benchmarking/ascat/'),
'TEST': join(TEST_DATA_PATH, 'benchmarking/ascat/')
},
'BUILD_CACHE': join(AWRAMS_BASE_PATH, 'build_cache')
},
'SIMULATION': {
'SPATIAL_CHUNK': 128,
'TIME_CHUNK': 32,
'MIN_CELLS_PER_WORKER': 32,
'TASK_BUFFERS': 3
},
# +++ Should move to external file so datasets can be shared between profiles
'CLIMATE_DATASETS': {
'TRAINING': {
'FORCING': {
'PATH': join(TRAINING_DATA_PATH, 'climate/bom_awap'),
'MAPPING': FORCING_MAP_AWAP
},
'CLIMATOLOGY': CLIMATOLOGIES['AWAP_DAILY']
},
'TESTING': {
'FORCING': {
'PATH': join(TEST_DATA_PATH, 'simulation/climate'),
'MAPPING': FORCING_MAP_AWAP
},
'CLIMATOLOGY': CLIMATOLOGIES['AWAP_DAILY']
}
},
'BENCHMARKING': {
'BENCHMARK_SITES': benchmark_sites_file,
'MONTHLY_REJECTION_THRESHOLD': 15,
'ANNUAL_REJECTION_THRESHOLD': 6,
'SM_MODEL_VARNAMES': ['s0_avg', 'ss_avg', 'sd_avg'],
'SM_MODEL_LAYERS': {'s0_avg': 100., 'ss_avg': 900., 'sd_avg':
5000.},
'SM_OBSERVED_LAYERS': ('profile','top','shallow','middle','deep'),
'FIG_SIZE': (14,6),
'CELLSIZE': 0.05,
'LANDSCAPE_VERSION_EQUIVALENCE': {"5":"45","5R":"45","5Q":"45"}
},
# Preferred compiler; referenced in model settings
'COMPILER': COMPILER,
'IO_SETTINGS' : {
'CHUNKSIZES': config_options['CHUNKSIZES'],
'DEFAULT_CHUNKSIZE': (config_options['CHUNKSIZES']['TIME'], \
config_options['CHUNKSIZES']['SPATIAL'], \
config_options['CHUNKSIZES']['SPATIAL']),
'VAR_CHUNK_CACHE_SIZE': 2**20, # =1048576 ie 1Mb
'VAR_CHUNK_CACHE_NELEMS': 1009, # prime number
'VAR_CHUNK_CACHE_PREEMPTION': 0.75, # 1 for read or write only
# '_fallthrough' will attempt to use _h5py, then netCDF4 if that fails
'DB_OPEN_WITH': '_fallthrough', #"_h5py" OR "_nc"
'MAX_FILES_PER_SFM': 32, # Maximum files allowed open in each SplitFileManager.
# Maximum chunksize to read during extraction (in bytes)
'MAX_EXTRACT_CHUNK': 2**24
},
'LOGGER_SETTINGS': config_options['LOGGER_SETTINGS'],
# Used in extents.get_default_extent
# Consider creating extents objects explicitly from files rather than using this method.
# It exists for backwards compatibility, and will be deprecated
'DEFAULT_MASK_FILE': 'web_mask_v5.h5'
}
return objectify(settings)
| true
| true
|
790788bf4117aba521c9c8636f84c4045f0d6178
| 915
|
py
|
Python
|
OracleWebLogic/samples/12213-domain/container-scripts/add-machine.py
|
PfizerRD/oracle-docker
|
348b8584aa53335601caded4f654f3722c591495
|
[
"UPL-1.0"
] | null | null | null |
OracleWebLogic/samples/12213-domain/container-scripts/add-machine.py
|
PfizerRD/oracle-docker
|
348b8584aa53335601caded4f654f3722c591495
|
[
"UPL-1.0"
] | null | null | null |
OracleWebLogic/samples/12213-domain/container-scripts/add-machine.py
|
PfizerRD/oracle-docker
|
348b8584aa53335601caded4f654f3722c591495
|
[
"UPL-1.0"
] | null | null | null |
#Copyright (c) 2014-2017 Oracle and/or its affiliates. All rights reserved.
#
#Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl.
#
# Script to create and add NodeManager automatically to the domain's AdminServer running on '$ADMIN_HOST'.
#
# Since: October, 2014
# Author: bruno.borges@oracle.com
#
# =============================
import os
import socket
execfile('/u01/oracle/commonfuncs.py')
# NodeManager details
nmhost = os.environ.get('NM_HOST', socket.gethostbyname(hostname))
nmport = os.environ.get('NM_PORT', '5556')
# Connect to the AdminServer
# ==========================
connectToAdmin()
# Create a Machine
# ================
editMode()
cd('/')
cmo.createMachine(nmname)
cd('/Machines/%s/NodeManager/%s' % (nmname, nmname))
cmo.setListenPort(int(nmport))
cmo.setListenAddress(nmhost)
cmo.setNMType('Plain')
saveActivate()
# Exit
# ====
exit()
| 22.875
| 106
| 0.679781
|
#
# Since: October, 2014
# Author: bruno.borges@oracle.com
#
# =============================
import os
import socket
execfile('/u01/oracle/commonfuncs.py')
# NodeManager details
nmhost = os.environ.get('NM_HOST', socket.gethostbyname(hostname))
nmport = os.environ.get('NM_PORT', '5556')
# Connect to the AdminServer
# ==========================
connectToAdmin()
# Create a Machine
# ================
editMode()
cd('/')
cmo.createMachine(nmname)
cd('/Machines/%s/NodeManager/%s' % (nmname, nmname))
cmo.setListenPort(int(nmport))
cmo.setListenAddress(nmhost)
cmo.setNMType('Plain')
saveActivate()
# Exit
# ====
exit()
| true
| true
|
79078927593b897acca8b25027f41844a3d328ad
| 691
|
py
|
Python
|
ginit.py
|
ghlmtz/airline-sim
|
5899e0390aaa5792e0bc6b1673ad2f0b3dd11d1d
|
[
"MIT"
] | null | null | null |
ginit.py
|
ghlmtz/airline-sim
|
5899e0390aaa5792e0bc6b1673ad2f0b3dd11d1d
|
[
"MIT"
] | null | null | null |
ginit.py
|
ghlmtz/airline-sim
|
5899e0390aaa5792e0bc6b1673ad2f0b3dd11d1d
|
[
"MIT"
] | null | null | null |
import timeit
mapx = 512
mapy = 512
# Good seeds:
# 772855 Spaced out continents
# 15213 Tight continents
# 1238 What I've been working with, for the most part
# 374539 Sparse continents
# 99999
seed = 773202
sea_level = 0.6
DEBUG = 0
GFXDEBUG = 0
setup_time = timeit.default_timer()
tiles = [[None] * mapx for _ in range(mapy)]
lands = []
towns = []
countries = []
have_savefile = False
class Clock():
def __init__(self,t):
self.time_minutes = t
def inc(self,t):
self.time_minutes += t
self.time_minutes = self.time_minutes % (60*24)
def fmt_time(self):
m = self.time_minutes % 60
h = self.time_minutes // 60
return ("%02d%02dZ" % (h, m))
clock = Clock(9*60) # 9 AM
| 18.184211
| 53
| 0.677279
|
import timeit
mapx = 512
mapy = 512
# 374539 Sparse continents
# 99999
seed = 773202
sea_level = 0.6
DEBUG = 0
GFXDEBUG = 0
setup_time = timeit.default_timer()
tiles = [[None] * mapx for _ in range(mapy)]
lands = []
towns = []
countries = []
have_savefile = False
class Clock():
def __init__(self,t):
self.time_minutes = t
def inc(self,t):
self.time_minutes += t
self.time_minutes = self.time_minutes % (60*24)
def fmt_time(self):
m = self.time_minutes % 60
h = self.time_minutes // 60
return ("%02d%02dZ" % (h, m))
clock = Clock(9*60) # 9 AM
| true
| true
|
79078a1053c43f07eeaae1b829b4c69ed59c38c1
| 2,736
|
py
|
Python
|
pipeline/filter.py
|
hadyelsahar/RE-NLG-Dataset
|
460d52d50e5dc302cdd879f1435bda45a4946202
|
[
"MIT"
] | 44
|
2018-03-05T00:40:30.000Z
|
2022-03-21T04:44:09.000Z
|
pipeline/filter.py
|
hadyelsahar/RE-NLG-Dataset
|
460d52d50e5dc302cdd879f1435bda45a4946202
|
[
"MIT"
] | 4
|
2018-11-08T15:32:46.000Z
|
2020-10-24T14:32:10.000Z
|
pipeline/filter.py
|
hadyelsahar/RE-NLG-Dataset
|
460d52d50e5dc302cdd879f1435bda45a4946202
|
[
"MIT"
] | 10
|
2018-01-23T00:30:39.000Z
|
2021-11-08T03:24:25.000Z
|
from pipeline import *
class SentenceLimiter:
"""
Limit the text, word boundaries and
sentence boundaries of a given document
to the number of sentences given
"""
def run(self, document, number_sentences):
"""
:param: number_sentences, starts with 0 for the fist sentence
"""
boundaries = (document.sentences_boundaries[0][0], document.sentences_boundaries[:number_sentences+1][-1][1])
document.text = document.text[boundaries[0]:boundaries[1]]
document.sentences_boundaries = self._limitSenteceBoundaries(document.sentences_boundaries, boundaries[1])
document.words_boundaries = self._limitWordBoundaries(document.words_boundaries, boundaries[1])
document.entities = self._limitEntities(document.entities, boundaries[1])
document.triples = self._limitTriples(document.triples, boundaries[1])
return document
def _limitSenteceBoundaries(self, sentences_boundaries, maxi):
sentences_boundaries_new = []
for sent in sentences_boundaries:
if sent[1] <= maxi:
sentences_boundaries_new.append(sent)
return sentences_boundaries_new
def _limitEntities(self, entities, maxi):
entities_new = []
for e in entities:
if e.boundaries[1] <= maxi:
entities_new.append(e)
return entities_new
def _limitTriples(self, triples, maxi):
triples_new = []
for t in triples:
if t.sentence_id == 0:
triples_new.append(t)
return triples_new
def _limitWordBoundaries(self, words_boundaries, maxi):
words_boundaries_new = []
for word in words_boundaries:
if word[1] <= maxi:
words_boundaries_new.append(word)
return words_boundaries_new
class MainEntityLimiter:
"""
Remove a document's content if the main entity is not aligned
"""
def run(self, document):
if not document.uri in [i.uri for i in document.entities]:
document = None
return document
class EntityTypeFilter:
"""
Remove all documents that are of a certain type
"""
def __init__(self, all_triples, entities):
"""
:param: input TripleReaderTriples object
:param: a list of entity that should be filtered
"""
self.wikidata_triples = all_triples
self.entities = entities
def run(self, document):
# P31: instance of
prop_id = 'http://www.wikidata.org/prop/direct/P31'
if any([i for i in self.wikidata_triples.get(document.docid) if i[1] == prop_id and i[2] in self.entities]):
document = None
return document
| 35.076923
| 117
| 0.645468
|
from pipeline import *
class SentenceLimiter:
def run(self, document, number_sentences):
boundaries = (document.sentences_boundaries[0][0], document.sentences_boundaries[:number_sentences+1][-1][1])
document.text = document.text[boundaries[0]:boundaries[1]]
document.sentences_boundaries = self._limitSenteceBoundaries(document.sentences_boundaries, boundaries[1])
document.words_boundaries = self._limitWordBoundaries(document.words_boundaries, boundaries[1])
document.entities = self._limitEntities(document.entities, boundaries[1])
document.triples = self._limitTriples(document.triples, boundaries[1])
return document
def _limitSenteceBoundaries(self, sentences_boundaries, maxi):
sentences_boundaries_new = []
for sent in sentences_boundaries:
if sent[1] <= maxi:
sentences_boundaries_new.append(sent)
return sentences_boundaries_new
def _limitEntities(self, entities, maxi):
entities_new = []
for e in entities:
if e.boundaries[1] <= maxi:
entities_new.append(e)
return entities_new
def _limitTriples(self, triples, maxi):
triples_new = []
for t in triples:
if t.sentence_id == 0:
triples_new.append(t)
return triples_new
def _limitWordBoundaries(self, words_boundaries, maxi):
words_boundaries_new = []
for word in words_boundaries:
if word[1] <= maxi:
words_boundaries_new.append(word)
return words_boundaries_new
class MainEntityLimiter:
def run(self, document):
if not document.uri in [i.uri for i in document.entities]:
document = None
return document
class EntityTypeFilter:
def __init__(self, all_triples, entities):
self.wikidata_triples = all_triples
self.entities = entities
def run(self, document):
prop_id = 'http://www.wikidata.org/prop/direct/P31'
if any([i for i in self.wikidata_triples.get(document.docid) if i[1] == prop_id and i[2] in self.entities]):
document = None
return document
| true
| true
|
79078a8aa5b23cba43055803cf62956b0b6ca3bb
| 25,006
|
py
|
Python
|
app.py
|
SantiLJ/strategy-template
|
28ec389a7ebac93e85e07b5310976bb08445f230
|
[
"MIT"
] | null | null | null |
app.py
|
SantiLJ/strategy-template
|
28ec389a7ebac93e85e07b5310976bb08445f230
|
[
"MIT"
] | null | null | null |
app.py
|
SantiLJ/strategy-template
|
28ec389a7ebac93e85e07b5310976bb08445f230
|
[
"MIT"
] | null | null | null |
# Fetches and displays a basic candlestick app.
import dash
import plotly.graph_objects as go
import plotly.express as px
import dash_core_components as dcc
import dash_html_components as html
from dash_table import DataTable, FormatTemplate
from utils import *
from datetime import date, timedelta
from math import ceil
from backtest import *
from bloomberg_functions import req_historical_data
import numpy as np
from sklearn import linear_model
from statistics import mean
# Create a Dash app
app = dash.Dash(__name__)
# Create the page layout
app.layout = html.Div([
html.H1(
'Trading Strategy Example Template',
style={'display': 'block', 'text-align': 'center'}
),
html.Div([
html.H2('Strategy'),
html.P('This app explores a simple strategy that works as follows:'),
html.Ol([
html.Li([
"While the market is not open, retrieve the past N days' " + \
"worth of data for:",
html.Ul([
html.Li("IVV: daily open, high, low, & close prices"),
html.Li(
"US Treasury CMT Rates for 1 mo, 2 mo, 3 mo, 6 mo, " + \
"1 yr and 2 yr maturities."
)
])
]),
html.Li([
'Fit a linear trend line through the yield curve defined ' + \
'by the CMT rates and record in a dataframe:',
html.Ul([
html.Li('the y-intercept ("a")'),
html.Li('the slope ("b")'),
html.Li('the coefficient of determination ("R^2")')
]),
'...for the fitted line.'
]),
html.Li(
'Repeat 2. for past CMT data to create a FEATURES ' + \
'dataframe containing historical values of a, b, and R^2 '
),
html.Li(
'Add volatility of day-over-day log returns of IVV ' + \
'closing prices -- observed over the past N days -- to ' + \
'each historical data row in the FEATURES dataframe.'
),
html.Li(
'Add RESPONSE data to the historical FEATURES dataframe.' + \
'The RESPONSE data includes information that communicates ' + \
'whether when, and how a limit order to SELL IVV at a ' + \
'price equal to (IVV Open Price of Next Trading Day) * ' + \
'(1 + alpha) would have filled over the next n trading days.'
),
html.Li(
'Using the features a, b, R^2, and IVV vol alongside the ' + \
'RESPONSE data for the past N observed trading days, ' + \
'train a logistic regression. Use it to predict whether a ' + \
'limit order to SELL IVV at a price equal to (IVV Open ' + \
'Price of Next Trading Day) * (1 + alpha) would have ' + \
'filled over the next n trading days.'
),
html.Li(
'If the regression in 6. predicts TRUE, submit two trades:'),
html.Ul([
html.Li(
'A market order to BUY lot_size shares of IVV, which ' + \
'fills at open price the next trading day.'
),
html.Li(
'A limit order to SELL lot_size shares of IVV at ' + \
'(next day\'s opening price * (1+alpha)'
)
]),
html.Li(
'If the limit order does not fill after n days, issue a ' + \
'market order to sell lot_size shares of IVV at close of ' + \
'the nth day.'
)
])
],
style={'display': 'inline-block', 'width': '50%'}
),
html.Div([
html.H2('Data Note & Disclaimer'),
html.P(
'This Dash app makes use of Bloomberg\'s Python API to append ' + \
'the latest historical data to what\'s already provided in the ' + \
'.csv files in the directory \'bbg_data\'. These initial data ' + \
'files were compiled using publicly available information on ' + \
'the Internet and do not contain historical stock market data ' + \
'from Bloomberg. This app does NOT need a Bloomberg ' + \
'subscription to work -- only to update data. Always know and ' + \
'obey your data stewardship obligations!'
),
html.H2('Parameters'),
html.Ol([
html.Li(
"n: number of days a limit order to exit a position is " + \
"kept open"
),
html.Li(
"N: number of observed historical trading days to use in " + \
"training the logistic regression model."
),
html.Li(
'alpha: a percentage in numeric form ' + \
'(e.g., "0.02" == "2%") that defines the profit sought by ' + \
'entering a trade; for example, if IVV is bought at ' + \
'price X, then a limit order to sell the shares will be put' + \
' in place at a price = X*(1+alpha)'
),
html.Li(
'lot_size: number of shares traded in each round-trip ' + \
'trade. Kept constant for simplicity.'
),
html.Li(
'date_range: Date range over which to perform the backtest.'
)
]),
html.Div(
[
html.Div(
[
html.Button(
"RUN BACKTEST", id='run-backtest', n_clicks=0
),
html.Table(
[html.Tr([
html.Th('Alpha'), html.Th('Beta'),
html.Th('Geometric Mean Return'),
html.Th('Average Trades per Year'),
html.Th('Volatility'), html.Th('Sharpe')
])] + [html.Tr([
html.Td(html.Div(id='strategy-alpha')),
html.Td(html.Div(id='strategy-beta')),
html.Td(html.Div(id='strategy-gmrr')),
html.Td(html.Div(id='strategy-trades-per-yr')),
html.Td(html.Div(id='strategy-vol')),
html.Td(html.Div(id='strategy-sharpe'))
])],
className='main-summary-table'
),
html.Table(
# Header
[html.Tr([
html.Th('Date Range'),
html.Th('Bloomberg Identifier'),
html.Th('n'), html.Th('N'), html.Th('alpha'),
html.Th('Lot Size'),
html.Th('Starting Cash')
])] +
# Body
[html.Tr([
html.Td(
dcc.DatePickerRange(
id='hist-data-range',
min_date_allowed=date(2015, 1, 1),
max_date_allowed=date.today(),
initial_visible_month=date.today(),
start_date=date(2019, 3, 16),
end_date=date(2021, 4, 12)
)
),
html.Td(dcc.Input(
id='bbg-identifier-1', type="text",
value="IVV US Equity",
style={'text-align': 'center'}
)),
html.Td(
dcc.Input(
id='lil-n', type="number", value=5,
style={'text-align': 'center',
'width': '30px'}
)
),
html.Td(
dcc.Input(
id='big-N', type="number", value=10,
style={'text-align': 'center',
'width': '50px'}
)
),
html.Td(
dcc.Input(
id="alpha", type="number", value=0.02,
style={'text-align': 'center',
'width': '50px'}
)
),
html.Td(
dcc.Input(
id="lot-size", type="number", value=100,
style={'text-align': 'center',
'width': '50px'}
)
),
html.Td(
dcc.Input(
id="starting-cash", type="number",
value=50000,
style={'text-align': 'center',
'width': '100px'}
)
)
])]
)
],
style={'display': 'inline-block', 'width': '50%'}
)
],
style={'display': 'block'}
)
],
style={
'display': 'inline-block', 'width': '50%', 'vertical-align': 'top'
}
),
##### Intermediate Variables (hidden in divs as JSON) ######################
############################################################################
# Hidden div inside the app that stores IVV historical data
html.Div(id='ivv-hist', style={'display': 'none'}),
# Hidden div inside the app that stores bonds historical data
html.Div(id='bonds-hist', style={'display': 'none'}),
############################################################################
############################################################################
html.Div(
[dcc.Graph(id='alpha-beta')],
style={'display': 'inline-block', 'width': '50%'}
),
# Display the current selected date range
html.Div(id='date-range-output'),
html.Div([
html.H2(
'Trade Ledger',
style={
'display': 'inline-block', 'text-align': 'center',
'width': '100%'
}
),
DataTable(
id='trade-ledger',
fixed_rows={'headers': True},
style_cell={'textAlign': 'center'},
style_table={'height': '300px', 'overflowY': 'auto'}
)
]),
html.Div([
html.Div([
html.H2(
'Calendar Ledger',
style={
'display': 'inline-block', 'width': '45%',
'text-align': 'center'
}
),
html.H2(
'Trade Blotter',
style={
'display': 'inline-block', 'width': '55%',
'text-align': 'center'
}
)
]),
html.Div(
DataTable(
id='calendar-ledger',
fixed_rows={'headers': True},
style_cell={'textAlign': 'center'},
style_table={'height': '300px', 'overflowY': 'auto'}
),
style={'display': 'inline-block', 'width': '45%'}
),
html.Div(
DataTable(
id='blotter',
fixed_rows={'headers': True},
style_cell={'textAlign': 'center'},
style_table={'height': '300px', 'overflowY': 'auto'}
),
style={'display': 'inline-block', 'width': '55%'}
)
]),
html.Div([
html.H2(
'Features and Responses',
style={
'display': 'inline-block', 'text-align': 'center',
'width': '100%'
}
),
DataTable(
id='features-and-responses',
fixed_rows={'headers': True},
style_cell={'textAlign': 'center'},
style_table={'height': '300px', 'overflowY': 'auto'}
)
]),
html.Div([
html.Div(
dcc.Graph(id='bonds-3d-graph', style={'display': 'none'}),
style={'display': 'inline-block', 'width': '50%'}
),
html.Div(
dcc.Graph(id='candlestick', style={'display': 'none'}),
style={'display': 'inline-block', 'width': '50%'}
)
]),
html.Div(id='proposed-trade'),
############################################################################
############################################################################
])
@app.callback(
#### Update Historical Bloomberg Data
[dash.dependencies.Output('ivv-hist', 'children'),
dash.dependencies.Output('date-range-output', 'children'),
dash.dependencies.Output('candlestick', 'figure'),
dash.dependencies.Output('candlestick', 'style')],
dash.dependencies.Input("run-backtest", 'n_clicks'),
[dash.dependencies.State("bbg-identifier-1", "value"),
dash.dependencies.State("big-N", "value"),
dash.dependencies.State("lil-n", "value"),
dash.dependencies.State('hist-data-range', 'start_date'),
dash.dependencies.State('hist-data-range', 'end_date')],
prevent_initial_call=True
)
def update_bbg_data(nclicks, bbg_id_1, N, n, start_date, end_date):
# Need to query enough days to run the backtest on every date in the
# range start_date to end_date
start_date = pd.to_datetime(start_date).date() - timedelta(
days=ceil((N + n) * (365 / 252))
)
start_date = start_date.strftime("%Y-%m-%d")
historical_data = req_historical_data(bbg_id_1, start_date, end_date)
date_output_msg = 'Backtesting from '
if start_date is not None:
start_date_object = date.fromisoformat(start_date)
start_date_string = start_date_object.strftime('%B %d, %Y')
date_output_msg = date_output_msg + 'Start Date: ' + \
start_date_string + ' to '
if end_date is not None:
end_date_object = date.fromisoformat(end_date)
end_date_string = end_date_object.strftime('%B %d, %Y')
date_output_msg = date_output_msg + 'End Date: ' + end_date_string
if len(date_output_msg) == len('You have selected: '):
date_output_msg = 'Select a date to see it displayed here'
fig = go.Figure(
data=[
go.Candlestick(
x=historical_data['Date'],
open=historical_data['Open'],
high=historical_data['High'],
low=historical_data['Low'],
close=historical_data['Close']
)
]
)
return historical_data.to_json(), date_output_msg, fig, {'display': 'block'}
@app.callback(
[dash.dependencies.Output('bonds-hist', 'children'),
dash.dependencies.Output('bonds-3d-graph', 'figure'),
dash.dependencies.Output('bonds-3d-graph', 'style')],
dash.dependencies.Input("run-backtest", 'n_clicks'),
[dash.dependencies.State('hist-data-range', 'start_date'),
dash.dependencies.State('hist-data-range', 'end_date'),
dash.dependencies.State('big-N', 'value'),
dash.dependencies.State('lil-n', 'value')
],
prevent_initial_call=True
)
def update_bonds_hist(n_clicks, startDate, endDate, N, n):
# Need to query enough days to run the backtest on every date in the
# range start_date to end_date
startDate = pd.to_datetime(startDate).date() - timedelta(
days=ceil((N + n) * (365 / 252))
)
startDate = startDate.strftime("%Y-%m-%d")
data_years = list(
range(pd.to_datetime(startDate).date().year,
pd.to_datetime(endDate).date().year + 1, 1)
)
bonds_data = fetch_usdt_rates(data_years[0])
if len(data_years) > 1:
for year in data_years[1:]:
bonds_data = pd.concat([bonds_data, fetch_usdt_rates(year)],
axis=0, ignore_index=True)
# How to filter a dataframe for rows that you want
bonds_data = bonds_data[bonds_data.Date >= pd.to_datetime(startDate)]
bonds_data = bonds_data[bonds_data.Date <= pd.to_datetime(endDate)]
fig = go.Figure(
data=[
go.Surface(
z=bonds_data,
y=bonds_data.Date,
x=[
to_years(cmt_colname) for cmt_colname in list(
filter(lambda x: ' ' in x, bonds_data.columns.values)
)
]
)
]
)
fig.update_layout(
scene=dict(
xaxis_title='Maturity (years)',
yaxis_title='Date',
zaxis_title='APR (%)',
zaxis=dict(ticksuffix='%')
)
)
bonds_data.reset_index(drop=True, inplace=True)
return bonds_data.to_json(), fig, {'display': 'block'}
@app.callback(
[
dash.dependencies.Output('features-and-responses', 'data'),
dash.dependencies.Output('features-and-responses', 'columns'),
dash.dependencies.Output('blotter', 'data'),
dash.dependencies.Output('blotter', 'columns'),
dash.dependencies.Output('calendar-ledger', 'data'),
dash.dependencies.Output('calendar-ledger', 'columns'),
dash.dependencies.Output('trade-ledger', 'data'),
dash.dependencies.Output('trade-ledger', 'columns')
],
[dash.dependencies.Input('ivv-hist', 'children'),
dash.dependencies.Input('bonds-hist', 'children'),
dash.dependencies.Input('lil-n', 'value'),
dash.dependencies.Input('big-N', 'value'),
dash.dependencies.Input('alpha', 'value'),
dash.dependencies.Input('lot-size', 'value'),
dash.dependencies.Input('starting-cash', 'value'),
dash.dependencies.State('hist-data-range', 'start_date'),
dash.dependencies.State('hist-data-range', 'end_date')],
prevent_initial_call=True
)
def calculate_backtest(ivv_hist, bonds_hist, n, N, alpha, lot_size,
starting_cash, start_date, end_date):
features_and_responses, blotter, calendar_ledger, trade_ledger = backtest(
ivv_hist, bonds_hist, n, N, alpha, lot_size, start_date, end_date,
starting_cash
)
features_and_responses_columns = [
{"name": i, "id": i} for i in features_and_responses.columns
]
features_and_responses = features_and_responses.to_dict('records')
blotter = blotter.to_dict('records')
blotter_columns = [
dict(id='ID', name='ID'),
dict(id='ls', name='long/short'),
dict(id='submitted', name='Created'),
dict(id='action', name='Action'),
dict(id='size', name='Size'),
dict(id='symbol', name='Symb'),
dict(
id='price', name='Order Price', type='numeric',
format=FormatTemplate.money(2)
),
dict(id='type', name='Type'),
dict(id='status', name='Status'),
dict(id='fill_price', name='Fill Price', type='numeric',
format=FormatTemplate.money(2)
),
dict(id='filled_or_cancelled', name='Filled/Cancelled')
]
calendar_ledger = calendar_ledger.to_dict('records')
calendar_ledger_columns = [
dict(id='Date', name='Date'),
dict(id='position', name='position'),
dict(id='ivv_close', name='IVV Close', type='numeric',
format=FormatTemplate.money(2)),
dict(id='cash', name='Cash', type='numeric',
format=FormatTemplate.money(2)),
dict(id='stock_value', name='Stock Value', type='numeric',
format=FormatTemplate.money(2)),
dict(id='total_value', name='Total Value', type='numeric',
format=FormatTemplate.money(2))
]
trade_ledger = trade_ledger.to_dict('records')
trade_ledger_columns = [
dict(id='trade_id', name="ID"),
dict(id='open_dt', name='Trade Opened'),
dict(id='close_dt', name='Trade Closed'),
dict(id='trading_days_open', name='Trading Days Open'),
dict(id='buy_price', name='Entry Price', type='numeric',
format=FormatTemplate.money(2)),
dict(id='sell_price', name='Exit Price', type='numeric',
format=FormatTemplate.money(2)),
dict(id='benchmark_buy_price', name='Benchmark Buy Price',
type='numeric', format=FormatTemplate.money(2)),
dict(id='benchmark_sell_price', name='Benchmark sell Price',
type='numeric', format=FormatTemplate.money(2)),
dict(id='trade_rtn', name='Return on Trade', type='numeric',
format=FormatTemplate.percentage(3)),
dict(id='benchmark_rtn', name='Benchmark Return', type='numeric',
format=FormatTemplate.percentage(3)),
dict(id='trade_rtn_per_trading_day', name='Trade Rtn / trd day',
type='numeric', format=FormatTemplate.percentage(3)),
dict(id='benchmark_rtn_per_trading_day', name='Benchmark Rtn / trd day',
type='numeric', format=FormatTemplate.percentage(3))
]
return features_and_responses, features_and_responses_columns, blotter, \
blotter_columns, calendar_ledger, calendar_ledger_columns, \
trade_ledger, trade_ledger_columns
@app.callback(
[
dash.dependencies.Output('alpha-beta', 'figure'),
dash.dependencies.Output('strategy-alpha', 'children'),
dash.dependencies.Output('strategy-beta', 'children'),
dash.dependencies.Output('strategy-gmrr', 'children'),
dash.dependencies.Output('strategy-trades-per-yr', 'children'),
dash.dependencies.Output('strategy-vol', 'children'),
dash.dependencies.Output('strategy-sharpe', 'children')
],
dash.dependencies.Input('trade-ledger', 'data'),
prevent_initial_call=True
)
def update_performance_metrics(trade_ledger):
trade_ledger = pd.DataFrame(trade_ledger)
trade_ledger = trade_ledger[1:]
X = trade_ledger['benchmark_rtn_per_trading_day'].values.reshape(-1, 1)
linreg_model = linear_model.LinearRegression()
linreg_model.fit(X, trade_ledger['trade_rtn_per_trading_day'])
x_range = np.linspace(X.min(), X.max(), 100)
y_range = linreg_model.predict(x_range.reshape(-1, 1))
fig = px.scatter(
trade_ledger,
title="Performance against Benchmark",
x='benchmark_rtn_per_trading_day',
y='trade_rtn_per_trading_day'
)
fig.add_traces(go.Scatter(x=x_range, y=y_range, name='OLS Fit'))
alpha = str(round(linreg_model.intercept_ * 100, 3)) + "% / trade"
beta = round(linreg_model.coef_[0], 3)
gmrr = (trade_ledger['trade_rtn_per_trading_day'] + 1).product() ** (
1 / len(
trade_ledger)) - 1
avg_trades_per_yr = round(
trade_ledger['open_dt'].groupby(
pd.DatetimeIndex(trade_ledger['open_dt']).year
).agg('count').mean(),
0
)
vol = stdev(trade_ledger['trade_rtn_per_trading_day'])
sharpe = round(gmrr / vol, 3)
gmrr_str = str(round(gmrr, 3)) + "% / trade"
vol_str = str(round(vol, 3)) + "% / trade"
return fig, alpha, beta, gmrr_str, avg_trades_per_yr, vol_str, sharpe
# Run it!
if __name__ == '__main__':
app.run_server(debug=True)
| 41.332231
| 81
| 0.477765
|
import dash
import plotly.graph_objects as go
import plotly.express as px
import dash_core_components as dcc
import dash_html_components as html
from dash_table import DataTable, FormatTemplate
from utils import *
from datetime import date, timedelta
from math import ceil
from backtest import *
from bloomberg_functions import req_historical_data
import numpy as np
from sklearn import linear_model
from statistics import mean
app = dash.Dash(__name__)
app.layout = html.Div([
html.H1(
'Trading Strategy Example Template',
style={'display': 'block', 'text-align': 'center'}
),
html.Div([
html.H2('Strategy'),
html.P('This app explores a simple strategy that works as follows:'),
html.Ol([
html.Li([
"While the market is not open, retrieve the past N days' " + \
"worth of data for:",
html.Ul([
html.Li("IVV: daily open, high, low, & close prices"),
html.Li(
"US Treasury CMT Rates for 1 mo, 2 mo, 3 mo, 6 mo, " + \
"1 yr and 2 yr maturities."
)
])
]),
html.Li([
'Fit a linear trend line through the yield curve defined ' + \
'by the CMT rates and record in a dataframe:',
html.Ul([
html.Li('the y-intercept ("a")'),
html.Li('the slope ("b")'),
html.Li('the coefficient of determination ("R^2")')
]),
'...for the fitted line.'
]),
html.Li(
'Repeat 2. for past CMT data to create a FEATURES ' + \
'dataframe containing historical values of a, b, and R^2 '
),
html.Li(
'Add volatility of day-over-day log returns of IVV ' + \
'closing prices -- observed over the past N days -- to ' + \
'each historical data row in the FEATURES dataframe.'
),
html.Li(
'Add RESPONSE data to the historical FEATURES dataframe.' + \
'The RESPONSE data includes information that communicates ' + \
'whether when, and how a limit order to SELL IVV at a ' + \
'price equal to (IVV Open Price of Next Trading Day) * ' + \
'(1 + alpha) would have filled over the next n trading days.'
),
html.Li(
'Using the features a, b, R^2, and IVV vol alongside the ' + \
'RESPONSE data for the past N observed trading days, ' + \
'train a logistic regression. Use it to predict whether a ' + \
'limit order to SELL IVV at a price equal to (IVV Open ' + \
'Price of Next Trading Day) * (1 + alpha) would have ' + \
'filled over the next n trading days.'
),
html.Li(
'If the regression in 6. predicts TRUE, submit two trades:'),
html.Ul([
html.Li(
'A market order to BUY lot_size shares of IVV, which ' + \
'fills at open price the next trading day.'
),
html.Li(
'A limit order to SELL lot_size shares of IVV at ' + \
'(next day\'s opening price * (1+alpha)'
)
]),
html.Li(
'If the limit order does not fill after n days, issue a ' + \
'market order to sell lot_size shares of IVV at close of ' + \
'the nth day.'
)
])
],
style={'display': 'inline-block', 'width': '50%'}
),
html.Div([
html.H2('Data Note & Disclaimer'),
html.P(
'This Dash app makes use of Bloomberg\'s Python API to append ' + \
'the latest historical data to what\'s already provided in the ' + \
'.csv files in the directory \'bbg_data\'. These initial data ' + \
'files were compiled using publicly available information on ' + \
'the Internet and do not contain historical stock market data ' + \
'from Bloomberg. This app does NOT need a Bloomberg ' + \
'subscription to work -- only to update data. Always know and ' + \
'obey your data stewardship obligations!'
),
html.H2('Parameters'),
html.Ol([
html.Li(
"n: number of days a limit order to exit a position is " + \
"kept open"
),
html.Li(
"N: number of observed historical trading days to use in " + \
"training the logistic regression model."
),
html.Li(
'alpha: a percentage in numeric form ' + \
'(e.g., "0.02" == "2%") that defines the profit sought by ' + \
'entering a trade; for example, if IVV is bought at ' + \
'price X, then a limit order to sell the shares will be put' + \
' in place at a price = X*(1+alpha)'
),
html.Li(
'lot_size: number of shares traded in each round-trip ' + \
'trade. Kept constant for simplicity.'
),
html.Li(
'date_range: Date range over which to perform the backtest.'
)
]),
html.Div(
[
html.Div(
[
html.Button(
"RUN BACKTEST", id='run-backtest', n_clicks=0
),
html.Table(
[html.Tr([
html.Th('Alpha'), html.Th('Beta'),
html.Th('Geometric Mean Return'),
html.Th('Average Trades per Year'),
html.Th('Volatility'), html.Th('Sharpe')
])] + [html.Tr([
html.Td(html.Div(id='strategy-alpha')),
html.Td(html.Div(id='strategy-beta')),
html.Td(html.Div(id='strategy-gmrr')),
html.Td(html.Div(id='strategy-trades-per-yr')),
html.Td(html.Div(id='strategy-vol')),
html.Td(html.Div(id='strategy-sharpe'))
])],
className='main-summary-table'
),
html.Table(
[html.Tr([
html.Th('Date Range'),
html.Th('Bloomberg Identifier'),
html.Th('n'), html.Th('N'), html.Th('alpha'),
html.Th('Lot Size'),
html.Th('Starting Cash')
])] +
[html.Tr([
html.Td(
dcc.DatePickerRange(
id='hist-data-range',
min_date_allowed=date(2015, 1, 1),
max_date_allowed=date.today(),
initial_visible_month=date.today(),
start_date=date(2019, 3, 16),
end_date=date(2021, 4, 12)
)
),
html.Td(dcc.Input(
id='bbg-identifier-1', type="text",
value="IVV US Equity",
style={'text-align': 'center'}
)),
html.Td(
dcc.Input(
id='lil-n', type="number", value=5,
style={'text-align': 'center',
'width': '30px'}
)
),
html.Td(
dcc.Input(
id='big-N', type="number", value=10,
style={'text-align': 'center',
'width': '50px'}
)
),
html.Td(
dcc.Input(
id="alpha", type="number", value=0.02,
style={'text-align': 'center',
'width': '50px'}
)
),
html.Td(
dcc.Input(
id="lot-size", type="number", value=100,
style={'text-align': 'center',
'width': '50px'}
)
),
html.Td(
dcc.Input(
id="starting-cash", type="number",
value=50000,
style={'text-align': 'center',
'width': '100px'}
)
)
])]
)
],
style={'display': 'inline-block', 'width': '50%'}
)
],
style={'display': 'block'}
)
],
style={
'display': 'inline-block', 'width': '50%', 'vertical-align': 'top'
}
),
| true
| true
|
79078b662dee357e0c099b4fd95ccd17c1e54069
| 7,215
|
py
|
Python
|
wandb/vendor/pygments/lexers/smalltalk.py
|
dreamflasher/client
|
c8267f1c6b8b6970172d622bb8fbf7cc773d78b2
|
[
"MIT"
] | 3,968
|
2017-08-23T21:27:19.000Z
|
2022-03-31T22:00:19.000Z
|
wandb/vendor/pygments/lexers/smalltalk.py
|
dreamflasher/client
|
c8267f1c6b8b6970172d622bb8fbf7cc773d78b2
|
[
"MIT"
] | 2,725
|
2017-04-17T00:29:15.000Z
|
2022-03-31T21:01:53.000Z
|
wandb/vendor/pygments/lexers/smalltalk.py
|
dreamflasher/client
|
c8267f1c6b8b6970172d622bb8fbf7cc773d78b2
|
[
"MIT"
] | 351
|
2018-04-08T19:39:34.000Z
|
2022-03-30T19:38:08.000Z
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.smalltalk
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Smalltalk and related languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['SmalltalkLexer', 'NewspeakLexer']
class SmalltalkLexer(RegexLexer):
"""
For `Smalltalk <http://www.smalltalk.org/>`_ syntax.
Contributed by Stefan Matthias Aust.
Rewritten by Nils Winter.
.. versionadded:: 0.10
"""
name = 'Smalltalk'
filenames = ['*.st']
aliases = ['smalltalk', 'squeak', 'st']
mimetypes = ['text/x-smalltalk']
tokens = {
'root': [
(r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
include('squeak fileout'),
include('whitespaces'),
include('method definition'),
(r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
include('objects'),
(r'\^|\:=|\_', Operator),
# temporaries
(r'[\]({}.;!]', Text),
],
'method definition': [
# Not perfect can't allow whitespaces at the beginning and the
# without breaking everything
(r'([a-zA-Z]+\w*:)(\s*)(\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
(r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
bygroups(Name.Function, Text, Name.Variable, Text)),
],
'blockvariables': [
include('whitespaces'),
(r'(:)(\s*)(\w+)',
bygroups(Operator, Text, Name.Variable)),
(r'\|', Operator, '#pop'),
default('#pop'), # else pop
],
'literals': [
(r"'(''|[^'])*'", String, 'afterobject'),
(r'\$.', String.Char, 'afterobject'),
(r'#\(', String.Symbol, 'parenth'),
(r'\)', Text, 'afterobject'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
],
'_parenth_helper': [
include('whitespaces'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol),
# literals
(r"'(''|[^'])*'", String),
(r'\$.', String.Char),
(r'#*\(', String.Symbol, 'inner_parenth'),
],
'parenth': [
# This state is a bit tricky since
# we can't just pop this state
(r'\)', String.Symbol, ('root', 'afterobject')),
include('_parenth_helper'),
],
'inner_parenth': [
(r'\)', String.Symbol, '#pop'),
include('_parenth_helper'),
],
'whitespaces': [
# skip whitespace and comments
(r'\s+', Text),
(r'"(""|[^"])*"', Comment),
],
'objects': [
(r'\[', Text, 'blockvariables'),
(r'\]', Text, 'afterobject'),
(r'\b(self|super|true|false|nil|thisContext)\b',
Name.Builtin.Pseudo, 'afterobject'),
(r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
(r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
(r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
String.Symbol, 'afterobject'),
include('literals'),
],
'afterobject': [
(r'! !$', Keyword, '#pop'), # squeak chunk delimiter
include('whitespaces'),
(r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
Name.Builtin, '#pop'),
(r'\b(new\b(?!:))', Name.Builtin),
(r'\:=|\_', Operator, '#pop'),
(r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'),
(r'\b[a-zA-Z]+\w*', Name.Function),
(r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'),
(r'\.', Punctuation, '#pop'),
(r';', Punctuation),
(r'[\])}]', Text),
(r'[\[({]', Text, '#pop'),
],
'squeak fileout': [
# Squeak fileout format (optional)
(r'^"(""|[^"])*"!', Keyword),
(r"^'(''|[^'])*'!", Keyword),
(r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
(r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)",
bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
(r'^(\w+)( subclass: )(#\w+)'
r'(\s+instanceVariableNames: )(.*?)'
r'(\s+classVariableNames: )(.*?)'
r'(\s+poolDictionaries: )(.*?)'
r'(\s+category: )(.*?)(!)',
bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
String, Keyword, String, Keyword, String, Keyword)),
(r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
bygroups(Name.Class, Keyword, String, Keyword)),
(r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
(r'! !$', Keyword),
],
}
class NewspeakLexer(RegexLexer):
"""
For `Newspeak <http://newspeaklanguage.org/>` syntax.
.. versionadded:: 1.1
"""
name = 'Newspeak'
filenames = ['*.ns2']
aliases = ['newspeak', ]
mimetypes = ['text/x-newspeak']
tokens = {
'root': [
(r'\b(Newsqueak2)\b', Keyword.Declaration),
(r"'[^']*'", String),
(r'\b(class)(\s+)(\w+)(\s*)',
bygroups(Keyword.Declaration, Text, Name.Class, Text)),
(r'\b(mixin|self|super|private|public|protected|nil|true|false)\b',
Keyword),
(r'(\w+\:)(\s*)([a-zA-Z_]\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
(r'<\w+>', Comment.Special),
include('expressionstat'),
include('whitespace')
],
'expressionstat': [
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'\d+', Number.Integer),
(r':\w+', Name.Variable),
(r'(\w+)(::)', bygroups(Name.Variable, Operator)),
(r'\w+:', Name.Function),
(r'\w+', Name.Variable),
(r'\(|\)', Punctuation),
(r'\[|\]', Punctuation),
(r'\{|\}', Punctuation),
(r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator),
(r'\.|;', Punctuation),
include('whitespace'),
include('literals'),
],
'literals': [
(r'\$.', String),
(r"'[^']*'", String),
(r"#'[^']*'", String.Symbol),
(r"#\w+:?", String.Symbol),
(r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol)
],
'whitespace': [
(r'\s+', Text),
(r'"[^"]*"', Comment)
],
}
| 36.811224
| 88
| 0.427859
|
from pygments.lexer import RegexLexer, include, bygroups, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['SmalltalkLexer', 'NewspeakLexer']
class SmalltalkLexer(RegexLexer):
name = 'Smalltalk'
filenames = ['*.st']
aliases = ['smalltalk', 'squeak', 'st']
mimetypes = ['text/x-smalltalk']
tokens = {
'root': [
(r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
include('squeak fileout'),
include('whitespaces'),
include('method definition'),
(r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
include('objects'),
(r'\^|\:=|\_', Operator),
(r'[\]({}.;!]', Text),
],
'method definition': [
# without breaking everything
(r'([a-zA-Z]+\w*:)(\s*)(\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
(r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
bygroups(Name.Function, Text, Name.Variable, Text)),
],
'blockvariables': [
include('whitespaces'),
(r'(:)(\s*)(\w+)',
bygroups(Operator, Text, Name.Variable)),
(r'\|', Operator, '
default('
],
'literals': [
(r"'(''|[^'])*'", String, 'afterobject'),
(r'\$.', String.Char, 'afterobject'),
(r'#\(', String.Symbol, 'parenth'),
(r'\)', Text, 'afterobject'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
],
'_parenth_helper': [
include('whitespaces'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol),
(r"'(''|[^'])*'", String),
(r'\$.', String.Char),
(r'
],
'parenth': [
# This state is a bit tricky since
# we can't just pop this state
(r'\)', String.Symbol, ('root', 'afterobject')),
include('_parenth_helper'),
],
'inner_parenth': [
(r'\)', String.Symbol, '#pop'),
include('_parenth_helper'),
],
'whitespaces': [
(r'\s+', Text),
(r'"(""|[^"])*"', Comment),
],
'objects': [
(r'\[', Text, 'blockvariables'),
(r'\]', Text, 'afterobject'),
(r'\b(self|super|true|false|nil|thisContext)\b',
Name.Builtin.Pseudo, 'afterobject'),
(r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
(r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
(r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
String.Symbol, 'afterobject'),
include('literals'),
],
'afterobject': [
(r'! !$', Keyword, '#pop'),
include('whitespaces'),
(r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
Name.Builtin, '#pop'),
(r'\b(new\b(?!:))', Name.Builtin),
(r'\:=|\_', Operator, '#pop'),
(r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'),
(r'\b[a-zA-Z]+\w*', Name.Function),
(r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'),
(r'\.', Punctuation, '#pop'),
(r';', Punctuation),
(r'[\])}]', Text),
(r'[\[({]', Text, '#pop'),
],
'squeak fileout': [
(r'^"(""|[^"])*"!', Keyword),
(r"^'(''|[^'])*'!", Keyword),
(r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
(r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)",
bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
(r'^(\w+)( subclass: )(#\w+)'
r'(\s+instanceVariableNames: )(.*?)'
r'(\s+classVariableNames: )(.*?)'
r'(\s+poolDictionaries: )(.*?)'
r'(\s+category: )(.*?)(!)',
bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
String, Keyword, String, Keyword, String, Keyword)),
(r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
bygroups(Name.Class, Keyword, String, Keyword)),
(r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
(r'! !$', Keyword),
],
}
class NewspeakLexer(RegexLexer):
name = 'Newspeak'
filenames = ['*.ns2']
aliases = ['newspeak', ]
mimetypes = ['text/x-newspeak']
tokens = {
'root': [
(r'\b(Newsqueak2)\b', Keyword.Declaration),
(r"'[^']*'", String),
(r'\b(class)(\s+)(\w+)(\s*)',
bygroups(Keyword.Declaration, Text, Name.Class, Text)),
(r'\b(mixin|self|super|private|public|protected|nil|true|false)\b',
Keyword),
(r'(\w+\:)(\s*)([a-zA-Z_]\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
(r'<\w+>', Comment.Special),
include('expressionstat'),
include('whitespace')
],
'expressionstat': [
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'\d+', Number.Integer),
(r':\w+', Name.Variable),
(r'(\w+)(::)', bygroups(Name.Variable, Operator)),
(r'\w+:', Name.Function),
(r'\w+', Name.Variable),
(r'\(|\)', Punctuation),
(r'\[|\]', Punctuation),
(r'\{|\}', Punctuation),
(r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator),
(r'\.|;', Punctuation),
include('whitespace'),
include('literals'),
],
'literals': [
(r'\$.', String),
(r"'[^']*'", String),
(r"
(r"#\w+:?", String.Symbol),
(r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol)
],
'whitespace': [
(r'\s+', Text),
(r'"[^"]*"', Comment)
],
}
| true
| true
|
79078d2d6cb76a38c500d4b3243655c644efa6ad
| 1,036
|
py
|
Python
|
fixture/application.py
|
oksanacps/python_for_testing
|
6b358e1900518c02ea0732d95fff2cedb24272e1
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
oksanacps/python_for_testing
|
6b358e1900518c02ea0732d95fff2cedb24272e1
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
oksanacps/python_for_testing
|
6b358e1900518c02ea0732d95fff2cedb24272e1
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox(capabilities={"marionette": False}, firefox_binary="C:/Program Files/Mozilla Firefox/firefox.exe")
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper (self)
self.group = GroupHelper (self)
self.contact = ContactHelper(self)
self.base_url = base_url
def is_valid (self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy (self):
self.wd.quit()
| 30.470588
| 138
| 0.619691
|
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox(capabilities={"marionette": False}, firefox_binary="C:/Program Files/Mozilla Firefox/firefox.exe")
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper (self)
self.group = GroupHelper (self)
self.contact = ContactHelper(self)
self.base_url = base_url
def is_valid (self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy (self):
self.wd.quit()
| true
| true
|
79078d3a91fd1b326df6198be67c254dfc19289c
| 81
|
py
|
Python
|
HacoWeb/haco/events/apps.py
|
DeanORourke1996/haco
|
fc04d763735ca376c51e82e1f1be20b092ce751c
|
[
"MIT"
] | null | null | null |
HacoWeb/haco/events/apps.py
|
DeanORourke1996/haco
|
fc04d763735ca376c51e82e1f1be20b092ce751c
|
[
"MIT"
] | null | null | null |
HacoWeb/haco/events/apps.py
|
DeanORourke1996/haco
|
fc04d763735ca376c51e82e1f1be20b092ce751c
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class Events(AppConfig):
name = 'events'
| 13.5
| 33
| 0.728395
|
from django.apps import AppConfig
class Events(AppConfig):
name = 'events'
| true
| true
|
79078d42eeb06c659b02123ebd5b46621e1ddf48
| 3,909
|
py
|
Python
|
Modules/Scripted/DMRIInstall/DMRIInstall.py
|
forfullstack/slicersources-src
|
91bcecf037a27f3fad4c0ab57e8286fc258bb0f5
|
[
"Apache-2.0"
] | null | null | null |
Modules/Scripted/DMRIInstall/DMRIInstall.py
|
forfullstack/slicersources-src
|
91bcecf037a27f3fad4c0ab57e8286fc258bb0f5
|
[
"Apache-2.0"
] | null | null | null |
Modules/Scripted/DMRIInstall/DMRIInstall.py
|
forfullstack/slicersources-src
|
91bcecf037a27f3fad4c0ab57e8286fc258bb0f5
|
[
"Apache-2.0"
] | null | null | null |
import os
import string
import textwrap
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
#
# DMRIInstall
#
class DMRIInstall(ScriptedLoadableModule):
"""
"""
helpText = textwrap.dedent(
"""
The SlicerDMRI extension provides diffusion-related tools including:
<ul>
<li> Diffusion Tensor Estimation</li>
<li>Tractography Display</li>
<li>Tractography Seeding</li>
<li>Fiber Tract Measurement</li>
</ul>
<br>
<br>
For more information, please visit:
<br>
<br>
<a href="http://dmri.slicer.org">http://dmri.slicer.org</a>
<br>
<br>
Questions are welcome on the Slicer forum:
<br>
<br>
<a href="https://discourse.slicer.org">https://discourse.slicer.org</a><br><br>
""")
errorText = textwrap.dedent(
"""
<h5 style="color:red">The SlicerDMRI extension is currently unavailable.</h5><br>
Please try a manual installation via the Extension Manager,
and contact the Slicer forum at:<br><br>
<a href="https://discourse.slicer.org">https://discourse.slicer.org</a><br><br>
With the following information:<br>
Slicer version: {builddate}<br>
Slicer revision: {revision}<br>
Platform: {platform}
""").format(builddate=slicer.app.applicationVersion,
revision = slicer.app.repositoryRevision,
platform = slicer.app.platform)
def __init__(self, parent):
# Hide this module if SlicerDMRI is already installed
model = slicer.app.extensionsManagerModel()
if model.isExtensionInstalled("SlicerDMRI"):
parent.hidden = True
ScriptedLoadableModule.__init__(self, parent)
self.parent.categories = ["Diffusion"]
self.parent.title = "Install Slicer Diffusion Tools (SlicerDMRI)"
self.parent.dependencies = []
self.parent.contributors = ["Isaiah Norton (BWH), Lauren O'Donnell (BWH)"]
self.parent.helpText = DMRIInstall.helpText
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = textwrap.dedent(
"""
SlicerDMRI supported by NIH NCI ITCR U01CA199459 (Open Source Diffusion MRI
Technology For Brain Cancer Research), and made possible by NA-MIC, NAC,
BIRN, NCIGT, and the Slicer Community.
""")
class DMRIInstallWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.textBox = ctk.ctkFittedTextBrowser()
self.textBox.setOpenExternalLinks(True) # Open links in default browser
self.textBox.setHtml(DMRIInstall.helpText)
self.parent.layout().addWidget(self.textBox)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Install SlicerDMRI")
self.applyButton.toolTip = 'Installs the "SlicerDMRI" extension from the Diffusion category.'
self.applyButton.icon = qt.QIcon(":/Icons/ExtensionDefaultIcon.png")
self.applyButton.enabled = True
self.applyButton.connect('clicked()', self.onApply)
self.parent.layout().addWidget(self.applyButton)
self.parent.layout().addStretch(1)
def onError(self):
self.applyButton.enabled = False
self.textBox.setHtml(DMRIInstall.errorText)
return
def onApply(self):
emm = slicer.app.extensionsManagerModel()
if emm.isExtensionInstalled("SlicerDMRI"):
self.textBox.setHtml("<h4>SlicerDMRI is already installed.<h4>")
self.applyButton.enabled = False
return
md = emm.retrieveExtensionMetadataByName("SlicerDMRI")
if not md or 'extension_id' not in md:
return self.onError()
if emm.downloadAndInstallExtension(md['extension_id']):
slicer.app.confirmRestart("Restart to complete SlicerDMRI installation?")
else:
self.onError()
| 30.539063
| 97
| 0.712714
|
import os
import string
import textwrap
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
class DMRIInstall(ScriptedLoadableModule):
helpText = textwrap.dedent(
"""
The SlicerDMRI extension provides diffusion-related tools including:
<ul>
<li> Diffusion Tensor Estimation</li>
<li>Tractography Display</li>
<li>Tractography Seeding</li>
<li>Fiber Tract Measurement</li>
</ul>
<br>
<br>
For more information, please visit:
<br>
<br>
<a href="http://dmri.slicer.org">http://dmri.slicer.org</a>
<br>
<br>
Questions are welcome on the Slicer forum:
<br>
<br>
<a href="https://discourse.slicer.org">https://discourse.slicer.org</a><br><br>
""")
errorText = textwrap.dedent(
"""
<h5 style="color:red">The SlicerDMRI extension is currently unavailable.</h5><br>
Please try a manual installation via the Extension Manager,
and contact the Slicer forum at:<br><br>
<a href="https://discourse.slicer.org">https://discourse.slicer.org</a><br><br>
With the following information:<br>
Slicer version: {builddate}<br>
Slicer revision: {revision}<br>
Platform: {platform}
""").format(builddate=slicer.app.applicationVersion,
revision = slicer.app.repositoryRevision,
platform = slicer.app.platform)
def __init__(self, parent):
model = slicer.app.extensionsManagerModel()
if model.isExtensionInstalled("SlicerDMRI"):
parent.hidden = True
ScriptedLoadableModule.__init__(self, parent)
self.parent.categories = ["Diffusion"]
self.parent.title = "Install Slicer Diffusion Tools (SlicerDMRI)"
self.parent.dependencies = []
self.parent.contributors = ["Isaiah Norton (BWH), Lauren O'Donnell (BWH)"]
self.parent.helpText = DMRIInstall.helpText
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = textwrap.dedent(
"""
SlicerDMRI supported by NIH NCI ITCR U01CA199459 (Open Source Diffusion MRI
Technology For Brain Cancer Research), and made possible by NA-MIC, NAC,
BIRN, NCIGT, and the Slicer Community.
""")
class DMRIInstallWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.textBox = ctk.ctkFittedTextBrowser()
self.textBox.setOpenExternalLinks(True) # Open links in default browser
self.textBox.setHtml(DMRIInstall.helpText)
self.parent.layout().addWidget(self.textBox)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Install SlicerDMRI")
self.applyButton.toolTip = 'Installs the "SlicerDMRI" extension from the Diffusion category.'
self.applyButton.icon = qt.QIcon(":/Icons/ExtensionDefaultIcon.png")
self.applyButton.enabled = True
self.applyButton.connect('clicked()', self.onApply)
self.parent.layout().addWidget(self.applyButton)
self.parent.layout().addStretch(1)
def onError(self):
self.applyButton.enabled = False
self.textBox.setHtml(DMRIInstall.errorText)
return
def onApply(self):
emm = slicer.app.extensionsManagerModel()
if emm.isExtensionInstalled("SlicerDMRI"):
self.textBox.setHtml("<h4>SlicerDMRI is already installed.<h4>")
self.applyButton.enabled = False
return
md = emm.retrieveExtensionMetadataByName("SlicerDMRI")
if not md or 'extension_id' not in md:
return self.onError()
if emm.downloadAndInstallExtension(md['extension_id']):
slicer.app.confirmRestart("Restart to complete SlicerDMRI installation?")
else:
self.onError()
| true
| true
|
79078e60f336ac659b3e4be78384693a30f6d379
| 1,280
|
py
|
Python
|
nodes/teleop_joy.py
|
Lovestarni/asv_simulator
|
824c832f071c51212367569a07f67e2dadfc1401
|
[
"MIT"
] | 7
|
2016-10-07T14:46:19.000Z
|
2021-05-14T03:18:04.000Z
|
nodes/teleop_joy.py
|
Lovestarni/asv_simulator
|
824c832f071c51212367569a07f67e2dadfc1401
|
[
"MIT"
] | 2
|
2015-03-18T10:16:04.000Z
|
2015-03-23T12:00:00.000Z
|
nodes/teleop_joy.py
|
Lovestarni/asv_simulator
|
824c832f071c51212367569a07f67e2dadfc1401
|
[
"MIT"
] | 1
|
2021-05-14T03:17:57.000Z
|
2021-05-14T03:17:57.000Z
|
#!/usr/bin/env python
## @package teleop_joy A node for controlling the P3DX with an XBox controller
import rospy
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Joy
import numpy as np
def quat2yaw(q):
return np.arctan2(2*(q.y*q.z + q.w*q.x), 1 - 2*(q.z**2 + q.w**2))
def joyCallback(msg):
global cmd_vel_pub
global linear_axis
global linear_scale
global rotation_axis
global rotation_scale
global yaw
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = msg.axes[linear_axis] * linear_scale
cmd_vel_msg.angular.z = msg.axes[rotation_axis] * rotation_scale
cmd_vel_msg.angular.y = np.inf
cmd_vel_pub.publish(cmd_vel_msg)
if __name__ == '__main__':
rospy.init_node('teleop_joy')
global cmd_vel_pub
global linear_axis
global linear_scale
global rotation_axis
global rotation_scale
global yaw
linear_axis = rospy.get_param('linear_axis' , 1)
linear_scale = rospy.get_param('linear_scale' , 5)
rotation_axis = rospy.get_param('rotation_axis' , 3)
rotation_scale = rospy.get_param('rotation_scale', 1)
cmd_vel_pub = rospy.Publisher("/asv/cmd_vel", Twist, queue_size=1)
rospy.Subscriber("joy", Joy, joyCallback)
rospy.spin()
| 24.615385
| 78
| 0.715625
|
Odometry
from sensor_msgs.msg import Joy
import numpy as np
def quat2yaw(q):
return np.arctan2(2*(q.y*q.z + q.w*q.x), 1 - 2*(q.z**2 + q.w**2))
def joyCallback(msg):
global cmd_vel_pub
global linear_axis
global linear_scale
global rotation_axis
global rotation_scale
global yaw
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = msg.axes[linear_axis] * linear_scale
cmd_vel_msg.angular.z = msg.axes[rotation_axis] * rotation_scale
cmd_vel_msg.angular.y = np.inf
cmd_vel_pub.publish(cmd_vel_msg)
if __name__ == '__main__':
rospy.init_node('teleop_joy')
global cmd_vel_pub
global linear_axis
global linear_scale
global rotation_axis
global rotation_scale
global yaw
linear_axis = rospy.get_param('linear_axis' , 1)
linear_scale = rospy.get_param('linear_scale' , 5)
rotation_axis = rospy.get_param('rotation_axis' , 3)
rotation_scale = rospy.get_param('rotation_scale', 1)
cmd_vel_pub = rospy.Publisher("/asv/cmd_vel", Twist, queue_size=1)
rospy.Subscriber("joy", Joy, joyCallback)
rospy.spin()
| true
| true
|
79078f4776eebf20cf5d78387beeb983ccfe4a12
| 3,963
|
py
|
Python
|
benchmark/startQiskit_noisy1996.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy1996.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy1996.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=32
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
prog.x(input_qubit[3]) # number=14
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.cx(input_qubit[2],input_qubit[3]) # number=22
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=24
prog.cz(input_qubit[3],input_qubit[2]) # number=25
prog.h(input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.x(input_qubit[2]) # number=23
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[1]) # number=20
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.x(input_qubit[1]) # number=30
prog.cx(input_qubit[0],input_qubit[1]) # number=31
prog.x(input_qubit[3]) # number=27
prog.x(input_qubit[3]) # number=28
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1996.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 33.871795
| 140
| 0.651527
|
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.x(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.y(input_qubit[3])
prog.cx(input_qubit[2],input_qubit[3])
prog.h(input_qubit[0])
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.cz(input_qubit[3],input_qubit[2])
prog.h(input_qubit[2])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.x(input_qubit[2])
prog.h(input_qubit[0])
prog.y(input_qubit[2])
prog.y(input_qubit[2])
prog.x(input_qubit[1])
prog.cx(input_qubit[0],input_qubit[1])
prog.x(input_qubit[1])
prog.cx(input_qubit[0],input_qubit[1])
prog.x(input_qubit[3])
prog.x(input_qubit[3])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1996.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true
| true
|
79078f88f1002751c1f78d94c189c53568202ab0
| 1,743
|
py
|
Python
|
002_Particle_Filter/Particle_Filter.py
|
zhyongquan/Automotive-Software-Blog
|
c35bed037190fd6181f20c55d1621fd11f01480b
|
[
"MIT"
] | 4
|
2018-08-12T01:40:39.000Z
|
2021-03-19T23:58:25.000Z
|
002_Particle_Filter/Particle_Filter.py
|
zhyongquan/Automotive-Software-Blog
|
c35bed037190fd6181f20c55d1621fd11f01480b
|
[
"MIT"
] | null | null | null |
002_Particle_Filter/Particle_Filter.py
|
zhyongquan/Automotive-Software-Blog
|
c35bed037190fd6181f20c55d1621fd11f01480b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
def estimate(particles, weights):
"""returns mean and variance of the weighted particles"""
pos = particles
mean = np.average(pos, weights=weights, axis=0)
var = np.average((pos - mean)**2, weights=weights, axis=0)
return mean, var
def simple_resample(particles, weights):
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, np.random.rand(N))
# resample according to indexes
particles[:] = particles[indexes]
weights.fill(1.0 / N)
return particles,weights
x=0.1#初始真实状态
x_N=1#系统过程噪声的协方差(由于是一维的,这里就是方差)
x_R=1#测量的协方差
T=75#共进行75次
N=100#粒子数,越大效果越好,计算量也越大
V=2#初始分布的方差
x_P=x+np.random.randn(N)*np.sqrt(V)
#plt.hist(x_P,N, normed=True)
z_out=[x**2/20+np.random.randn(1)*np.sqrt(x_R)]#实际测量值
x_out=[x]#测量值的输出向量
x_est=x#估计值
x_est_out=[x_est]
#print(x_out)
for t in range(1,T):
x=0.5*x+25*x/(1+x**2)+8*np.cos(1.2*(t-1))+np.random.randn()*np.sqrt(x_N)
z=x**2/20+np.random.randn()*np.sqrt(x_R)
#更新粒子
x_P_update=0.5*x_P+25*x_P/(1+x_P**2)+8*np.cos(1.2*(t-1))+np.random.randn(N)*np.sqrt(x_N)
z_update=x_P_update**2/20+np.random.randn(N)*np.sqrt(x_R)
#print(z_update)
#计算权重
P_w=(1/np.sqrt(2*np.pi*x_R))*np.exp(-(z-z_update)**2/(2*x_R))
#估计
x_est,var=estimate(z_update,P_w)
#重采样
x_P,P_w=simple_resample(x_P,P_w)
#保存数据
x_out.append(x)
z_out.append(z)
x_est_out.append(x_est)
#print(x_out)
t=np.arange(0,T)
plt.plot(t,x_out,color='blue',label='true value')
plt.plot(t,x_est_out,color='red',label='estimate value')
plt.legend()
plt.show()
| 29.05
| 93
| 0.650602
|
import numpy as np
import matplotlib.pyplot as plt
def estimate(particles, weights):
pos = particles
mean = np.average(pos, weights=weights, axis=0)
var = np.average((pos - mean)**2, weights=weights, axis=0)
return mean, var
def simple_resample(particles, weights):
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1.
indexes = np.searchsorted(cumulative_sum, np.random.rand(N))
particles[:] = particles[indexes]
weights.fill(1.0 / N)
return particles,weights
x=0.1
x_N=1
x_R=1
T=75
N=100
V=2
x_P=x+np.random.randn(N)*np.sqrt(V)
z_out=[x**2/20+np.random.randn(1)*np.sqrt(x_R)]
x_out=[x]
x_est=x
x_est_out=[x_est]
for t in range(1,T):
x=0.5*x+25*x/(1+x**2)+8*np.cos(1.2*(t-1))+np.random.randn()*np.sqrt(x_N)
z=x**2/20+np.random.randn()*np.sqrt(x_R)
x_P_update=0.5*x_P+25*x_P/(1+x_P**2)+8*np.cos(1.2*(t-1))+np.random.randn(N)*np.sqrt(x_N)
z_update=x_P_update**2/20+np.random.randn(N)*np.sqrt(x_R)
P_w=(1/np.sqrt(2*np.pi*x_R))*np.exp(-(z-z_update)**2/(2*x_R))
x_est,var=estimate(z_update,P_w)
x_P,P_w=simple_resample(x_P,P_w)
x_out.append(x)
z_out.append(z)
x_est_out.append(x_est)
t=np.arange(0,T)
plt.plot(t,x_out,color='blue',label='true value')
plt.plot(t,x_est_out,color='red',label='estimate value')
plt.legend()
plt.show()
| true
| true
|
7907903afbdf9bf8e217b08c3df28f2e7b310fd6
| 48
|
py
|
Python
|
resources_crawler/__init__.py
|
ruzhnikov/resources-crawler
|
700d316588d54ad142ce6ae48e5d1d62477e3e5e
|
[
"MIT"
] | null | null | null |
resources_crawler/__init__.py
|
ruzhnikov/resources-crawler
|
700d316588d54ad142ce6ae48e5d1d62477e3e5e
|
[
"MIT"
] | null | null | null |
resources_crawler/__init__.py
|
ruzhnikov/resources-crawler
|
700d316588d54ad142ce6ae48e5d1d62477e3e5e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__version__ = "0.1.0"
| 9.6
| 23
| 0.5
|
__version__ = "0.1.0"
| true
| true
|
790790f4f5ffb85003d5e29b48b026613b25aacf
| 1,881
|
py
|
Python
|
euca2ools/commands/iam/deleteaccount.py
|
salewski/euca2ools
|
6b3f62f2cb1c54f14d3bfa5fd92dab3c0ecafecb
|
[
"BSD-2-Clause"
] | 30
|
2015-02-10T05:47:38.000Z
|
2022-01-20T08:48:43.000Z
|
euca2ools/commands/iam/deleteaccount.py
|
salewski/euca2ools
|
6b3f62f2cb1c54f14d3bfa5fd92dab3c0ecafecb
|
[
"BSD-2-Clause"
] | 16
|
2015-01-08T23:24:34.000Z
|
2018-07-18T07:15:40.000Z
|
euca2ools/commands/iam/deleteaccount.py
|
salewski/euca2ools
|
6b3f62f2cb1c54f14d3bfa5fd92dab3c0ecafecb
|
[
"BSD-2-Clause"
] | 19
|
2015-05-07T05:34:42.000Z
|
2020-12-13T10:50:14.000Z
|
# Copyright 2009-2015 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from euca2ools.commands.iam import IAMRequest, arg_account_name
class DeleteAccount(IAMRequest):
DESCRIPTION = '[Eucalyptus cloud admin only] Delete an account'
ARGS = [arg_account_name(
help='name or ID of the account to delete (required)'),
Arg('-r', '--recursive', dest='Recursive', action='store_const',
const='true', help='''delete all users, groups, and policies
associated with the account as well''')]
| 49.5
| 78
| 0.743222
|
from requestbuilder import Arg
from euca2ools.commands.iam import IAMRequest, arg_account_name
class DeleteAccount(IAMRequest):
DESCRIPTION = '[Eucalyptus cloud admin only] Delete an account'
ARGS = [arg_account_name(
help='name or ID of the account to delete (required)'),
Arg('-r', '--recursive', dest='Recursive', action='store_const',
const='true', help='''delete all users, groups, and policies
associated with the account as well''')]
| true
| true
|
790791c317e80f011fe64c0acfdb9d4842f95ca4
| 3,629
|
py
|
Python
|
userbot/modules/gitcommit.py
|
fhmyngrh/ZELDA-UBOT
|
c75bb37f6cd952e429a869fb524c061c530b6046
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2
|
2021-12-27T02:23:24.000Z
|
2021-12-28T06:25:39.000Z
|
userbot/modules/gitcommit.py
|
Ditomaheswara/Dito-Ubot
|
c75bb37f6cd952e429a869fb524c061c530b6046
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/gitcommit.py
|
Ditomaheswara/Dito-Ubot
|
c75bb37f6cd952e429a869fb524c061c530b6046
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5
|
2021-12-27T02:23:06.000Z
|
2022-02-05T08:33:06.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
# Ported to UserBot by @Mayur_Karaniya
import os
import time
from datetime import datetime
from github import Github
# from sample_config import Config
# from uniborg.util import admin_cmd, humanbytes, progress, time_formatter
# from userbot.events import humanbytes, progress, time_formatter
from userbot import CMD_HELP, GIT_REPO_NAME, GITHUB_ACCESS_TOKEN, bot
from userbot.events import zelda_cmd
GIT_TEMP_DIR = "./userbot/temp/"
@bot.on(zelda_cmd(outgoing=True, pattern=r"gcommit(?: |$)(.*)"))
async def download(event):
if event.fwd_from:
return
if GITHUB_ACCESS_TOKEN is None:
await event.edit("`Please ADD Proper Access Token from github.com`")
return
if GIT_REPO_NAME is None:
await event.edit("`Please ADD Proper Github Repo Name of your userbot`")
return
mone = await event.reply("Processing ...")
if not os.path.isdir(GIT_TEMP_DIR):
os.makedirs(GIT_TEMP_DIR)
start = datetime.now()
reply_message = await event.get_reply_message()
try:
time.time()
print("Downloading to TEMP directory")
downloaded_file_name = await bot.download_media(
reply_message.media, GIT_TEMP_DIR
)
except Exception as e:
await mone.edit(str(e))
else:
end = datetime.now()
ms = (end - start).seconds
await event.delete()
await mone.edit(
"Downloaded to `{}` in {} seconds.".format(downloaded_file_name, ms)
)
await mone.edit("Committing to Github....")
await git_commit(downloaded_file_name, mone)
async def git_commit(file_name, mone):
content_list = []
access_token = GITHUB_ACCESS_TOKEN
g = Github(access_token)
file = open(file_name, "r", encoding="utf-8")
commit_data = file.read()
repo = g.get_repo(GIT_REPO_NAME)
print(repo.name)
create_file = True
contents = repo.get_contents("")
for content_file in contents:
content_list.append(str(content_file))
print(content_file)
for i in content_list:
create_file = True
if i == 'ContentFile(path="' + file_name + '")':
return await mone.edit("`File Already Exists`")
file_name = "userbot/modules/" + file_name
if create_file:
file_name = file_name.replace("./userbot/temp/", "")
print(file_name)
try:
repo.create_file(
file_name, "Uploaded New Plugin", commit_data, branch="master"
)
print("Committed File")
ccess = GIT_REPO_NAME
ccess = ccess.strip()
await mone.edit(
f"`Commited On Your Github Repo`\n\n[Your Modules](https://github.com/{ccess}/tree/sql-extended/userbot/modules/)"
)
except BaseException:
print("Cannot Create Plugin")
await mone.edit("Cannot Upload Plugin")
else:
return await mone.edit("`Committed Suicide`")
CMD_HELP.update(
{
"gcommit": "**Plugin : **`gcommit`\
\n\n • **Syntax :** `.gcommit`\
\n • **Function : **Plugin Pengunggah File GITHUB untuk userbot. Otomatisasi Heroku harus Diaktifkan. Untuk orang pemalas\
\n\n • **Instructions:-** Pertama Atur variabel GITHUB_ACCESS_TOKEN dan GIT_REPO_NAME di Heroku vars.\n.commit reply_to_any_plugin bisa menjadi tipe berkas apapun juga. tetapi untuk plugin harus di .py\
"
}
)
| 35.23301
| 212
| 0.647837
|
import os
import time
from datetime import datetime
from github import Github
from userbot import CMD_HELP, GIT_REPO_NAME, GITHUB_ACCESS_TOKEN, bot
from userbot.events import zelda_cmd
GIT_TEMP_DIR = "./userbot/temp/"
@bot.on(zelda_cmd(outgoing=True, pattern=r"gcommit(?: |$)(.*)"))
async def download(event):
if event.fwd_from:
return
if GITHUB_ACCESS_TOKEN is None:
await event.edit("`Please ADD Proper Access Token from github.com`")
return
if GIT_REPO_NAME is None:
await event.edit("`Please ADD Proper Github Repo Name of your userbot`")
return
mone = await event.reply("Processing ...")
if not os.path.isdir(GIT_TEMP_DIR):
os.makedirs(GIT_TEMP_DIR)
start = datetime.now()
reply_message = await event.get_reply_message()
try:
time.time()
print("Downloading to TEMP directory")
downloaded_file_name = await bot.download_media(
reply_message.media, GIT_TEMP_DIR
)
except Exception as e:
await mone.edit(str(e))
else:
end = datetime.now()
ms = (end - start).seconds
await event.delete()
await mone.edit(
"Downloaded to `{}` in {} seconds.".format(downloaded_file_name, ms)
)
await mone.edit("Committing to Github....")
await git_commit(downloaded_file_name, mone)
async def git_commit(file_name, mone):
content_list = []
access_token = GITHUB_ACCESS_TOKEN
g = Github(access_token)
file = open(file_name, "r", encoding="utf-8")
commit_data = file.read()
repo = g.get_repo(GIT_REPO_NAME)
print(repo.name)
create_file = True
contents = repo.get_contents("")
for content_file in contents:
content_list.append(str(content_file))
print(content_file)
for i in content_list:
create_file = True
if i == 'ContentFile(path="' + file_name + '")':
return await mone.edit("`File Already Exists`")
file_name = "userbot/modules/" + file_name
if create_file:
file_name = file_name.replace("./userbot/temp/", "")
print(file_name)
try:
repo.create_file(
file_name, "Uploaded New Plugin", commit_data, branch="master"
)
print("Committed File")
ccess = GIT_REPO_NAME
ccess = ccess.strip()
await mone.edit(
f"`Commited On Your Github Repo`\n\n[Your Modules](https://github.com/{ccess}/tree/sql-extended/userbot/modules/)"
)
except BaseException:
print("Cannot Create Plugin")
await mone.edit("Cannot Upload Plugin")
else:
return await mone.edit("`Committed Suicide`")
CMD_HELP.update(
{
"gcommit": "**Plugin : **`gcommit`\
\n\n • **Syntax :** `.gcommit`\
\n • **Function : **Plugin Pengunggah File GITHUB untuk userbot. Otomatisasi Heroku harus Diaktifkan. Untuk orang pemalas\
\n\n • **Instructions:-** Pertama Atur variabel GITHUB_ACCESS_TOKEN dan GIT_REPO_NAME di Heroku vars.\n.commit reply_to_any_plugin bisa menjadi tipe berkas apapun juga. tetapi untuk plugin harus di .py\
"
}
)
| true
| true
|
790792a6a2d63a7e7be39c406e91d706802cb210
| 261
|
gyp
|
Python
|
binding.gyp
|
kvantetore/function-info
|
d0a23cd8b641b8f724c15ddde44c78014150a4f5
|
[
"MIT"
] | null | null | null |
binding.gyp
|
kvantetore/function-info
|
d0a23cd8b641b8f724c15ddde44c78014150a4f5
|
[
"MIT"
] | null | null | null |
binding.gyp
|
kvantetore/function-info
|
d0a23cd8b641b8f724c15ddde44c78014150a4f5
|
[
"MIT"
] | null | null | null |
{
"targets": [
{
"target_name": "functionInfo",
"sources": [
"src/functionInfo.cc"
],
"include_dirs": [
"<!(node -e \"require('nan')\")"
]
}
]
}
| 18.642857
| 48
| 0.310345
|
{
"targets": [
{
"target_name": "functionInfo",
"sources": [
"src/functionInfo.cc"
],
"include_dirs": [
"<!(node -e \"require('nan')\")"
]
}
]
}
| true
| true
|
790792f77ef2f199b0c0e36b5d65248374cfbf35
| 11,771
|
py
|
Python
|
nrekit/rl.py
|
qingdujun/manual-nre
|
c32ecc9397e2533dfd2cb8d7e5b9e748293028f8
|
[
"MIT"
] | null | null | null |
nrekit/rl.py
|
qingdujun/manual-nre
|
c32ecc9397e2533dfd2cb8d7e5b9e748293028f8
|
[
"MIT"
] | null | null | null |
nrekit/rl.py
|
qingdujun/manual-nre
|
c32ecc9397e2533dfd2cb8d7e5b9e748293028f8
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import os
import sklearn.metrics
import numpy as np
import sys
import math
import time
from . import framework
import network
class policy_agent(framework.re_model):
def __init__(self, train_data_loader, batch_size, max_length=120):
framework.re_model.__init__(self, train_data_loader, batch_size, max_length)
self.weights = tf.placeholder(tf.float32, shape=(), name="weights_scalar")
x = network.embedding.word_position_embedding(self.word, self.word_vec_mat, self.pos1, self.pos2)
x_train = network.encoder.cnn(x, keep_prob=0.5)
x_test = network.encoder.cnn(x, keep_prob=1.0)
self._train_logit = network.selector.instance(x_train, 2, keep_prob=0.5)
self._test_logit = network.selector.instance(x_test, 2, keep_prob=1.0)
self._loss = network.classifier.softmax_cross_entropy(self._train_logit, self.ins_label, 2, weights=self.weights)
def loss(self):
return self._loss
def train_logit(self):
return self._train_logit
def test_logit(self):
return self._test_logit
class rl_re_framework(framework.re_framework):
def __init__(self, train_data_loader, test_data_loader, max_length=120, batch_size=160):
framework.re_framework.__init__(self, train_data_loader, test_data_loader, max_length, batch_size)
def agent_one_step(self, sess, agent_model, batch_data, run_array, weights=1):
feed_dict = {
agent_model.word: batch_data['word'],
agent_model.pos1: batch_data['pos1'],
agent_model.pos2: batch_data['pos2'],
agent_model.ins_label: batch_data['agent_label'],
agent_model.length: batch_data['length'],
agent_model.weights: weights
}
if 'mask' in batch_data and hasattr(agent_model, "mask"):
feed_dict.update({agent_model.mask: batch_data['mask']})
result = sess.run(run_array, feed_dict)
return result
def pretrain_main_model(self, max_epoch):
for epoch in range(max_epoch):
print('###### Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
i = 0
time_sum = 0
for i, batch_data in enumerate(self.train_data_loader):
time_start = time.time()
iter_loss, iter_logit, _train_op = self.one_step(self.sess, self.model, batch_data, [self.model.loss(), self.model.train_logit(), self.train_op])
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
iter_label = batch_data['rel']
iter_correct = (iter_output == iter_label).sum()
iter_not_na_correct = np.logical_and(iter_output == iter_label, iter_label != 0).sum()
tot_correct += iter_correct
tot_not_na_correct += iter_not_na_correct
tot += iter_label.shape[0]
tot_not_na += (iter_label != 0).sum()
if tot_not_na > 0:
sys.stdout.write("[pretrain main model] epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
print("\nAverage iteration time: %f" % (time_sum / i))
def pretrain_agent_model(self, max_epoch):
# Pre-train policy agent
for epoch in range(max_epoch):
print('###### [Pre-train Policy Agent] Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
time_sum = 0
for i, batch_data in enumerate(self.train_data_loader):
time_start = time.time()
batch_data['agent_label'] = batch_data['ins_rel'] + 0
batch_data['agent_label'][batch_data['agent_label'] > 0] = 1
iter_loss, iter_logit, _train_op = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.loss(), self.agent_model.train_logit(), self.agent_train_op])
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
iter_label = batch_data['ins_rel']
iter_correct = (iter_output == iter_label).sum()
iter_not_na_correct = np.logical_and(iter_output == iter_label, iter_label != 0).sum()
tot_correct += iter_correct
tot_not_na_correct += iter_not_na_correct
tot += iter_label.shape[0]
tot_not_na += (iter_label != 0).sum()
if tot_not_na > 0:
sys.stdout.write("[pretrain policy agent] epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
def train(self,
model, # The main model
agent_model, # The model of policy agent
model_name,
ckpt_dir='./checkpoint',
summary_dir='./summary',
test_result_dir='./test_result',
learning_rate=0.5,
max_epoch=60,
pretrain_agent_epoch=1,
pretrain_model=None,
test_epoch=1,
optimizer=tf.train.GradientDescentOptimizer):
print("Start training...")
# Init
self.model = model(self.train_data_loader, self.train_data_loader.batch_size, self.train_data_loader.max_length)
model_optimizer = optimizer(learning_rate)
grads = model_optimizer.compute_gradients(self.model.loss())
self.train_op = model_optimizer.apply_gradients(grads)
# Init policy agent
self.agent_model = agent_model(self.train_data_loader, self.train_data_loader.batch_size, self.train_data_loader.max_length)
agent_optimizer = optimizer(learning_rate)
agent_grads = agent_optimizer.compute_gradients(self.agent_model.loss())
self.agent_train_op = agent_optimizer.apply_gradients(agent_grads)
# Session, writer and saver
self.sess = tf.Session()
summary_writer = tf.summary.FileWriter(summary_dir, self.sess.graph)
saver = tf.train.Saver(max_to_keep=None)
if pretrain_model is None:
self.sess.run(tf.global_variables_initializer())
else:
saver.restore(self.sess, pretrain_model)
self.pretrain_main_model(max_epoch=5) # Pre-train main model
self.pretrain_agent_model(max_epoch=1) # Pre-train policy agent
# Train
tot_delete = 0
batch_count = 0
instance_count = 0
reward = 0.0
best_metric = 0
best_prec = None
best_recall = None
not_best_count = 0 # Stop training after several epochs without improvement.
for epoch in range(max_epoch):
print('###### Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
i = 0
time_sum = 0
batch_stack = []
# Update policy agent
for i, batch_data in enumerate(self.train_data_loader):
# Make action
batch_data['agent_label'] = batch_data['ins_rel'] + 0
batch_data['agent_label'][batch_data['agent_label'] > 0] = 1
batch_stack.append(batch_data)
iter_logit = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.train_logit()])[0]
action_result = iter_logit.argmax(-1)
# Calculate reward
batch_delete = np.sum(np.logical_and(batch_data['ins_rel'] != 0, action_result == 0))
batch_data['ins_rel'][action_result == 0] = 0
iter_loss = self.one_step(self.sess, self.model, batch_data, [self.model.loss()])[0]
reward += iter_loss
tot_delete += batch_delete
batch_count += 1
# Update parameters of policy agent
alpha = 0.1
if batch_count == 100:
reward = reward / float(batch_count)
average_loss = reward
reward = - math.log(1 - math.e ** (-reward))
sys.stdout.write('tot delete : %f | reward : %f | average loss : %f\r' % (tot_delete, reward, average_loss))
sys.stdout.flush()
for batch_data in batch_stack:
self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_train_op], weights=reward * alpha)
batch_count = 0
reward = 0
tot_delete = 0
batch_stack = []
i += 1
# Train the main model
for i, batch_data in enumerate(self.train_data_loader):
batch_data['agent_label'] = batch_data['ins_rel'] + 0
batch_data['agent_label'][batch_data['agent_label'] > 0] = 1
time_start = time.time()
# Make actions
iter_logit = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.train_logit()])[0]
action_result = iter_logit.argmax(-1)
batch_data['ins_rel'][action_result == 0] = 0
# Real training
iter_loss, iter_logit, _train_op = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.loss(), self.agent_model.train_logit(), self.agent_train_op])
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
if tot_not_na > 0:
sys.stdout.write("epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
print("\nAverage iteration time: %f" % (time_sum / i))
if (epoch + 1) % test_epoch == 0:
metric = self.test(model)
if metric > best_metric:
best_metric = metric
best_prec = self.cur_prec
best_recall = self.cur_recall
print("Best model, storing...")
if not os.path.isdir(ckpt_dir):
os.mkdir(ckpt_dir)
path = saver.save(self.sess, os.path.join(ckpt_dir, model_name))
print("Finish storing")
not_best_count = 0
else:
not_best_count += 1
if not_best_count >= 20:
break
print("######")
print("Finish training " + model_name)
print("Best epoch auc = %f" % (best_metric))
if (not best_prec is None) and (not best_recall is None):
if not os.path.isdir(test_result_dir):
os.mkdir(test_result_dir)
np.save(os.path.join(test_result_dir, model_name + "_x.npy"), best_recall)
np.save(os.path.join(test_result_dir, model_name + "_y.npy"), best_prec)
| 46.525692
| 231
| 0.572084
|
import tensorflow as tf
import os
import sklearn.metrics
import numpy as np
import sys
import math
import time
from . import framework
import network
class policy_agent(framework.re_model):
def __init__(self, train_data_loader, batch_size, max_length=120):
framework.re_model.__init__(self, train_data_loader, batch_size, max_length)
self.weights = tf.placeholder(tf.float32, shape=(), name="weights_scalar")
x = network.embedding.word_position_embedding(self.word, self.word_vec_mat, self.pos1, self.pos2)
x_train = network.encoder.cnn(x, keep_prob=0.5)
x_test = network.encoder.cnn(x, keep_prob=1.0)
self._train_logit = network.selector.instance(x_train, 2, keep_prob=0.5)
self._test_logit = network.selector.instance(x_test, 2, keep_prob=1.0)
self._loss = network.classifier.softmax_cross_entropy(self._train_logit, self.ins_label, 2, weights=self.weights)
def loss(self):
return self._loss
def train_logit(self):
return self._train_logit
def test_logit(self):
return self._test_logit
class rl_re_framework(framework.re_framework):
def __init__(self, train_data_loader, test_data_loader, max_length=120, batch_size=160):
framework.re_framework.__init__(self, train_data_loader, test_data_loader, max_length, batch_size)
def agent_one_step(self, sess, agent_model, batch_data, run_array, weights=1):
feed_dict = {
agent_model.word: batch_data['word'],
agent_model.pos1: batch_data['pos1'],
agent_model.pos2: batch_data['pos2'],
agent_model.ins_label: batch_data['agent_label'],
agent_model.length: batch_data['length'],
agent_model.weights: weights
}
if 'mask' in batch_data and hasattr(agent_model, "mask"):
feed_dict.update({agent_model.mask: batch_data['mask']})
result = sess.run(run_array, feed_dict)
return result
def pretrain_main_model(self, max_epoch):
for epoch in range(max_epoch):
print('###### Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
i = 0
time_sum = 0
for i, batch_data in enumerate(self.train_data_loader):
time_start = time.time()
iter_loss, iter_logit, _train_op = self.one_step(self.sess, self.model, batch_data, [self.model.loss(), self.model.train_logit(), self.train_op])
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
iter_label = batch_data['rel']
iter_correct = (iter_output == iter_label).sum()
iter_not_na_correct = np.logical_and(iter_output == iter_label, iter_label != 0).sum()
tot_correct += iter_correct
tot_not_na_correct += iter_not_na_correct
tot += iter_label.shape[0]
tot_not_na += (iter_label != 0).sum()
if tot_not_na > 0:
sys.stdout.write("[pretrain main model] epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
print("\nAverage iteration time: %f" % (time_sum / i))
def pretrain_agent_model(self, max_epoch):
for epoch in range(max_epoch):
print('###### [Pre-train Policy Agent] Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
time_sum = 0
for i, batch_data in enumerate(self.train_data_loader):
time_start = time.time()
batch_data['agent_label'] = batch_data['ins_rel'] + 0
batch_data['agent_label'][batch_data['agent_label'] > 0] = 1
iter_loss, iter_logit, _train_op = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.loss(), self.agent_model.train_logit(), self.agent_train_op])
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
iter_label = batch_data['ins_rel']
iter_correct = (iter_output == iter_label).sum()
iter_not_na_correct = np.logical_and(iter_output == iter_label, iter_label != 0).sum()
tot_correct += iter_correct
tot_not_na_correct += iter_not_na_correct
tot += iter_label.shape[0]
tot_not_na += (iter_label != 0).sum()
if tot_not_na > 0:
sys.stdout.write("[pretrain policy agent] epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
def train(self,
model,
agent_model,
model_name,
ckpt_dir='./checkpoint',
summary_dir='./summary',
test_result_dir='./test_result',
learning_rate=0.5,
max_epoch=60,
pretrain_agent_epoch=1,
pretrain_model=None,
test_epoch=1,
optimizer=tf.train.GradientDescentOptimizer):
print("Start training...")
self.model = model(self.train_data_loader, self.train_data_loader.batch_size, self.train_data_loader.max_length)
model_optimizer = optimizer(learning_rate)
grads = model_optimizer.compute_gradients(self.model.loss())
self.train_op = model_optimizer.apply_gradients(grads)
self.agent_model = agent_model(self.train_data_loader, self.train_data_loader.batch_size, self.train_data_loader.max_length)
agent_optimizer = optimizer(learning_rate)
agent_grads = agent_optimizer.compute_gradients(self.agent_model.loss())
self.agent_train_op = agent_optimizer.apply_gradients(agent_grads)
self.sess = tf.Session()
summary_writer = tf.summary.FileWriter(summary_dir, self.sess.graph)
saver = tf.train.Saver(max_to_keep=None)
if pretrain_model is None:
self.sess.run(tf.global_variables_initializer())
else:
saver.restore(self.sess, pretrain_model)
self.pretrain_main_model(max_epoch=5)
self.pretrain_agent_model(max_epoch=1)
tot_delete = 0
batch_count = 0
instance_count = 0
reward = 0.0
best_metric = 0
best_prec = None
best_recall = None
not_best_count = 0
for epoch in range(max_epoch):
print('###### Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
i = 0
time_sum = 0
batch_stack = []
for i, batch_data in enumerate(self.train_data_loader):
batch_data['agent_label'] = batch_data['ins_rel'] + 0
batch_data['agent_label'][batch_data['agent_label'] > 0] = 1
batch_stack.append(batch_data)
iter_logit = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.train_logit()])[0]
action_result = iter_logit.argmax(-1)
batch_delete = np.sum(np.logical_and(batch_data['ins_rel'] != 0, action_result == 0))
batch_data['ins_rel'][action_result == 0] = 0
iter_loss = self.one_step(self.sess, self.model, batch_data, [self.model.loss()])[0]
reward += iter_loss
tot_delete += batch_delete
batch_count += 1
alpha = 0.1
if batch_count == 100:
reward = reward / float(batch_count)
average_loss = reward
reward = - math.log(1 - math.e ** (-reward))
sys.stdout.write('tot delete : %f | reward : %f | average loss : %f\r' % (tot_delete, reward, average_loss))
sys.stdout.flush()
for batch_data in batch_stack:
self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_train_op], weights=reward * alpha)
batch_count = 0
reward = 0
tot_delete = 0
batch_stack = []
i += 1
for i, batch_data in enumerate(self.train_data_loader):
batch_data['agent_label'] = batch_data['ins_rel'] + 0
batch_data['agent_label'][batch_data['agent_label'] > 0] = 1
time_start = time.time()
iter_logit = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.train_logit()])[0]
action_result = iter_logit.argmax(-1)
batch_data['ins_rel'][action_result == 0] = 0
iter_loss, iter_logit, _train_op = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.loss(), self.agent_model.train_logit(), self.agent_train_op])
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
if tot_not_na > 0:
sys.stdout.write("epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
print("\nAverage iteration time: %f" % (time_sum / i))
if (epoch + 1) % test_epoch == 0:
metric = self.test(model)
if metric > best_metric:
best_metric = metric
best_prec = self.cur_prec
best_recall = self.cur_recall
print("Best model, storing...")
if not os.path.isdir(ckpt_dir):
os.mkdir(ckpt_dir)
path = saver.save(self.sess, os.path.join(ckpt_dir, model_name))
print("Finish storing")
not_best_count = 0
else:
not_best_count += 1
if not_best_count >= 20:
break
print("######")
print("Finish training " + model_name)
print("Best epoch auc = %f" % (best_metric))
if (not best_prec is None) and (not best_recall is None):
if not os.path.isdir(test_result_dir):
os.mkdir(test_result_dir)
np.save(os.path.join(test_result_dir, model_name + "_x.npy"), best_recall)
np.save(os.path.join(test_result_dir, model_name + "_y.npy"), best_prec)
| true
| true
|
79079306448d7aa431a61b49583653c5dd895108
| 2,289
|
py
|
Python
|
src/attic/attic-python/test/test-secondorder.py
|
K0414/metaos
|
be36c88d3c22fd2f0968edd1fba03c2f2353e4e8
|
[
"MIT"
] | 3
|
2017-04-10T16:23:32.000Z
|
2020-07-04T07:59:25.000Z
|
src/attic/attic-python/test/test-secondorder.py
|
K0414/metaos
|
be36c88d3c22fd2f0968edd1fba03c2f2353e4e8
|
[
"MIT"
] | null | null | null |
src/attic/attic-python/test/test-secondorder.py
|
K0414/metaos
|
be36c88d3c22fd2f0968edd1fba03c2f2353e4e8
|
[
"MIT"
] | 6
|
2017-10-25T10:12:27.000Z
|
2020-07-04T07:59:27.000Z
|
symbols = [ '1288.HK', '3988.HK', '0883.HK', '0939.HK', '2628.HK', '3968.HK', '0941.HK', '0688.HK', '0386.HK', '1088.HK', '0728.HK', '0762.HK', '1398.HK', '0857.HK', '2318.HK', '0700.HK', 'GAZPq.L', 'LKOHyq.L', 'NKELyq.L', 'NVTKq.L', 'RELIq.L', 'ROSNq.L', 'SNGSyq.L', 'TATNxq.L', 'BSBR.N', 'BBD.N', 'ABV.N', 'CIG.N', 'SID.N', 'GGB.N', 'HDB.N', 'IBN.N', 'ITUB.N', 'MBT.N', 'PBR.N', 'TNE.N', 'VALE.N', 'VIP.N', 'BIDU.OQ', 'INFY.OQ']
#lineProcessor = CSVReutersAdaptative('BRIC_1min.csv')
textFormat = MessageFormat("{0}")
dateFormat = SimpleDateFormat('dd-MMM-yyyy')
timeFormat = SimpleDateFormat('HH:mm:ss.SSS')
doubleFormat = DecimalFormat('#.##')
lineProcessor = CSVSourceLineProcessor([textFormat,dateFormat,timeFormat,None,None,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat],[None,None,None,None,None,OPEN(PRICE),HIGH(PRICE),LOW(PRICE),CLOSE(PRICE),VOLUME(PRICE),Field.EXTENDED(PRICE,"Ave. Price"),Field.EXTENDED(PRICE,"VWAP"),Field.EXTENDED(PRICE,"No. Trades")],0,[1,2])
source = SecondOrderSource('BRIC40_1min.csv', symbols, lineProcessor)
print "Ready"
class MyObserver(PricesListener):
def update(self, ss, when):
strLine = Long.toString(when.getTimeInMillis()).encode('utf-8')
strLine = strLine + when.toString().encode('utf-8')
for s in symbols:
if s in ss:
strLine = strLine + ',' \
+ str(market.getLastPrice(0,s+'-OPEN')) + ','\
+ str(market.getLastPrice(0,s+'-HIGH')) + ','\
+ str(market.getLastPrice(0,s+'-LOW')) + ','\
+ str(market.getLastPrice(0,s+'-CLOSE')) + ','\
+ str(market.getLastPrice(0,s+'-VOLUME')) + ','\
else:
strLine = strLine + ',-,-,-,-,-'
print strLine
market = RandomAccessMarket(0.0, 5000)
lineProcessor.addMarketListener(market)
lineProcessor.addPricesListener(MyObserver())
print "Go!"
strLine = 'milliseconds'
for s in symbols:
strLine = strLine + ',' + s + '-OPEN'
strLine = strLine + ',' + s + '-HIGH'
strLine = strLine + ',' + s + '-LOW'
strLine = strLine + ',' + s + '-CLOSE'
strLine = strLine + ',' + s + '-Volume'
print strLine
source.run()
| 44.882353
| 430
| 0.596767
|
symbols = [ '1288.HK', '3988.HK', '0883.HK', '0939.HK', '2628.HK', '3968.HK', '0941.HK', '0688.HK', '0386.HK', '1088.HK', '0728.HK', '0762.HK', '1398.HK', '0857.HK', '2318.HK', '0700.HK', 'GAZPq.L', 'LKOHyq.L', 'NKELyq.L', 'NVTKq.L', 'RELIq.L', 'ROSNq.L', 'SNGSyq.L', 'TATNxq.L', 'BSBR.N', 'BBD.N', 'ABV.N', 'CIG.N', 'SID.N', 'GGB.N', 'HDB.N', 'IBN.N', 'ITUB.N', 'MBT.N', 'PBR.N', 'TNE.N', 'VALE.N', 'VIP.N', 'BIDU.OQ', 'INFY.OQ']
textFormat = MessageFormat("{0}")
dateFormat = SimpleDateFormat('dd-MMM-yyyy')
timeFormat = SimpleDateFormat('HH:mm:ss.SSS')
doubleFormat = DecimalFormat('#.##')
lineProcessor = CSVSourceLineProcessor([textFormat,dateFormat,timeFormat,None,None,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat],[None,None,None,None,None,OPEN(PRICE),HIGH(PRICE),LOW(PRICE),CLOSE(PRICE),VOLUME(PRICE),Field.EXTENDED(PRICE,"Ave. Price"),Field.EXTENDED(PRICE,"VWAP"),Field.EXTENDED(PRICE,"No. Trades")],0,[1,2])
source = SecondOrderSource('BRIC40_1min.csv', symbols, lineProcessor)
print "Ready"
class MyObserver(PricesListener):
def update(self, ss, when):
strLine = Long.toString(when.getTimeInMillis()).encode('utf-8')
strLine = strLine + when.toString().encode('utf-8')
for s in symbols:
if s in ss:
strLine = strLine + ',' \
+ str(market.getLastPrice(0,s+'-OPEN')) + ','\
+ str(market.getLastPrice(0,s+'-HIGH')) + ','\
+ str(market.getLastPrice(0,s+'-LOW')) + ','\
+ str(market.getLastPrice(0,s+'-CLOSE')) + ','\
+ str(market.getLastPrice(0,s+'-VOLUME')) + ','\
else:
strLine = strLine + ',-,-,-,-,-'
print strLine
market = RandomAccessMarket(0.0, 5000)
lineProcessor.addMarketListener(market)
lineProcessor.addPricesListener(MyObserver())
print "Go!"
strLine = 'milliseconds'
for s in symbols:
strLine = strLine + ',' + s + '-OPEN'
strLine = strLine + ',' + s + '-HIGH'
strLine = strLine + ',' + s + '-LOW'
strLine = strLine + ',' + s + '-CLOSE'
strLine = strLine + ',' + s + '-Volume'
print strLine
source.run()
| false
| true
|
790793a63d8014c617fe74429ff161c2065931eb
| 2,403
|
py
|
Python
|
feature_importance_v4.py
|
terryli710/MPS_regression
|
d8f9c94ad315734ff9376a53e6be3f508b4da742
|
[
"MIT"
] | null | null | null |
feature_importance_v4.py
|
terryli710/MPS_regression
|
d8f9c94ad315734ff9376a53e6be3f508b4da742
|
[
"MIT"
] | null | null | null |
feature_importance_v4.py
|
terryli710/MPS_regression
|
d8f9c94ad315734ff9376a53e6be3f508b4da742
|
[
"MIT"
] | null | null | null |
## Calculate feature importance, but focus on "meta-features" which are categorized by
## rules from different perspectives: orders, directions, powers.
## for "comprehensive methods"
from util_relaimpo import *
from util_ca import *
from util import loadNpy
def mainCA(x_name, y_name, divided_by = "", feature_names = []):
X = loadNpy(['data', 'X', x_name])
Y = loadNpy(['data', 'Y', y_name])
# INFO
print("Dataset", x_name, y_name)
print("Method: ", "CA")
print("Divided by", divided_by)
# make dataframe
if feature_names: xdf = pd.DataFrame(data=X, columns=feature_names)
else: xdf = pd.DataFrame(data=X)
# divide X
x_list, feature_names = dvdX(xdf, divided_by=divided_by)
# if power, only use the first four terms
if divided_by=='power': x_list, feature_names = x_list[0:4], feature_names[0:4]
print("bootstrapping ...")
coef_boot, comb_feature = bootstrappingCA(x_list, Y)
result_df = caResultDf(coef_boot, comb_feature)
printBootResultCA(result_df)
def mainDA(x_name, y_name, divided_by = "", feature_names = []):
X = loadNpy(['data', 'X', x_name])
Y = loadNpy(['data', 'Y', y_name])
# INFO
print("Dataset", x_name, y_name)
print("Method: ", "DA")
print("Divided by", divided_by)
# make dataframe
if feature_names:
xdf = pd.DataFrame(data=X, columns=feature_names)
else:
xdf = pd.DataFrame(data=X)
# divide X
x_list, feature_names = dvdX(xdf, divided_by=divided_by)
# if power, only use the first four terms
if divided_by=='power': x_list, feature_names = x_list[0:4], feature_names[0:4]
print("bootstrapping ...")
coef_boot, comb_feature, r2_mean, r2_ci, da_data, ave_data = bootstrappingDA(x_list, Y)
da_df = daResultDf(da_data, ave_data, r2_mean, comb_feature, feature_name=feature_names)
printBootResultCA(da_df)
if __name__ == '__main__':
# da or ca
x_prefix = ["HM", "MMA"]
y_suffix = ["MPS95", "MPSCC95", "CSDM"]
x_main = "{}_X_ang_vel.npy"
y_main = "{}_{}.npy"
divided_list = ["order", "direction", "power"]
for ys in y_suffix:
for xp in x_prefix:
for divide in divided_list:
x_name = x_main.format(xp)
y_name = y_main.format(xp, ys)
mainCA(x_name,y_name,divide,feature_names)
mainDA(x_name,y_name,divide,feature_names)
| 37.546875
| 92
| 0.651685
|
_name])
Y = loadNpy(['data', 'Y', y_name])
print("Dataset", x_name, y_name)
print("Method: ", "CA")
print("Divided by", divided_by)
if feature_names: xdf = pd.DataFrame(data=X, columns=feature_names)
else: xdf = pd.DataFrame(data=X)
x_list, feature_names = dvdX(xdf, divided_by=divided_by)
if divided_by=='power': x_list, feature_names = x_list[0:4], feature_names[0:4]
print("bootstrapping ...")
coef_boot, comb_feature = bootstrappingCA(x_list, Y)
result_df = caResultDf(coef_boot, comb_feature)
printBootResultCA(result_df)
def mainDA(x_name, y_name, divided_by = "", feature_names = []):
X = loadNpy(['data', 'X', x_name])
Y = loadNpy(['data', 'Y', y_name])
print("Dataset", x_name, y_name)
print("Method: ", "DA")
print("Divided by", divided_by)
if feature_names:
xdf = pd.DataFrame(data=X, columns=feature_names)
else:
xdf = pd.DataFrame(data=X)
x_list, feature_names = dvdX(xdf, divided_by=divided_by)
if divided_by=='power': x_list, feature_names = x_list[0:4], feature_names[0:4]
print("bootstrapping ...")
coef_boot, comb_feature, r2_mean, r2_ci, da_data, ave_data = bootstrappingDA(x_list, Y)
da_df = daResultDf(da_data, ave_data, r2_mean, comb_feature, feature_name=feature_names)
printBootResultCA(da_df)
if __name__ == '__main__':
x_prefix = ["HM", "MMA"]
y_suffix = ["MPS95", "MPSCC95", "CSDM"]
x_main = "{}_X_ang_vel.npy"
y_main = "{}_{}.npy"
divided_list = ["order", "direction", "power"]
for ys in y_suffix:
for xp in x_prefix:
for divide in divided_list:
x_name = x_main.format(xp)
y_name = y_main.format(xp, ys)
mainCA(x_name,y_name,divide,feature_names)
mainDA(x_name,y_name,divide,feature_names)
| true
| true
|
790793bc7b97f53cfb8310db981becae753e3b91
| 10,758
|
py
|
Python
|
pydatview/tools/signal.py
|
cdrtm/pyDatView
|
fe1acacde27d4eafda0b54e455fadfb2d6199cd1
|
[
"MIT"
] | null | null | null |
pydatview/tools/signal.py
|
cdrtm/pyDatView
|
fe1acacde27d4eafda0b54e455fadfb2d6199cd1
|
[
"MIT"
] | null | null | null |
pydatview/tools/signal.py
|
cdrtm/pyDatView
|
fe1acacde27d4eafda0b54e455fadfb2d6199cd1
|
[
"MIT"
] | null | null | null |
from __future__ import division
import numpy as np
from numpy.random import rand
import pandas as pd
# --- List of available filters
FILTERS=[
{'name':'Moving average','param':100,'paramName':'Window Size','paramRange':[0,100000],'increment':1},
{'name':'Low pass 1st order','param':1.0,'paramName':'Cutoff Freq.','paramRange':[0.0001,100000],'increment':0.1},
{'name':'High pass 1st order','param':1.0,'paramName':'Cutoff Freq.','paramRange':[0.0001,100000],'increment':0.1},
]
SAMPLERS=[
{'name':'Replace', 'param':[], 'paramName':'New x'},
{'name':'Insert', 'param':[], 'paramName':'Insert list'},
{'name':'Remove', 'param':[], 'paramName':'Remove list'},
{'name':'Every n', 'param':2 , 'paramName':'n'},
{'name':'Delta x', 'param':0.1, 'paramName':'dx'},
]
def reject_outliers(y, x=None, m = 2., replaceNaN=True):
""" Reject outliers:
If replaceNaN is true: they are replaced by NaN
Otherwise they are removed
"""
if m==0:
# No rejection...
pass
else:
dd = np.abs(y - np.nanmedian(y))
mdev = np.nanmedian(dd)
if mdev:
ss = dd/mdev
b=ss<m
if replaceNaN:
y=y.copy()
y[~b]=np.nan
else:
y=y[b]
if x is not None:
x= x[b]
if x is None:
return y
else:
return x, y
# --------------------------------------------------------------------------------}
# --- Resampling
# --------------------------------------------------------------------------------{
def multiInterp(x, xp, fp, extrap='bounded'):
j = np.searchsorted(xp, x) - 1
dd = np.zeros(len(x))
bOK = np.logical_and(j>=0, j< len(xp)-1)
bLower =j<0
bUpper =j>=len(xp)-1
jOK = j[bOK]
#import pdb; pdb.set_trace()
dd[bOK] = (x[bOK] - xp[jOK]) / (xp[jOK + 1] - xp[jOK])
jBef=j
jAft=j+1
#
# Use first and last values for anything beyond xp
jAft[bUpper] = len(xp)-1
jBef[bUpper] = len(xp)-1
jAft[bLower] = 0
jBef[bLower] = 0
if extrap=='bounded':
pass
# OK
elif extrap=='nan':
dd[~bOK] = np.nan
else:
raise NotImplementedError()
return (1 - dd) * fp[:,jBef] + fp[:,jAft] * dd
def resample_interp(x_old, x_new, y_old=None, df_old=None):
#x_new=np.sort(x_new)
if df_old is not None:
# --- Method 1 (pandas)
#df_new = df_old.copy()
#df_new = df_new.set_index(x_old)
#df_new = df_new.reindex(df_new.index | x_new)
#df_new = df_new.interpolate().loc[x_new]
#df_new = df_new.reset_index()
# --- Method 2 interp storing dx
data_new=multiInterp(x_new, x_old, df_old.values.T)
df_new = pd.DataFrame(data=data_new.T, columns=df_old.columns.values)
return x_new, df_new
if y_old is not None:
return x_new, np.interp(x_new, x_old, y_old)
def applySamplerDF(df_old, x_col, sampDict):
x_old=df_old[x_col].values
x_new, df_new =applySampler(x_old, y_old=None, sampDict=sampDict, df_old=df_old)
df_new[x_col]=x_new
return df_new
def applySampler(x_old, y_old, sampDict, df_old=None):
param = np.asarray(sampDict['param']).ravel()
if sampDict['name']=='Replace':
if len(param)==0:
raise Exception('Error: At least one value is required to resample the x values with')
x_new = param
return resample_interp(x_old, x_new, y_old, df_old)
elif sampDict['name']=='Insert':
if len(param)==0:
raise Exception('Error: provide a list of values to insert')
x_new = np.sort(np.concatenate((x_old.ravel(),param)))
return resample_interp(x_old, x_new, y_old, df_old)
elif sampDict['name']=='Remove':
I=[]
if len(param)==0:
raise Exception('Error: provide a list of values to remove')
for d in param:
Ifound= np.where(np.abs(x_old-d)<1e-3)[0]
if len(Ifound)>0:
I+=list(Ifound.ravel())
x_new=np.delete(x_old,I)
return resample_interp(x_old, x_new, y_old, df_old)
elif sampDict['name']=='Delta x':
if len(param)==0:
raise Exception('Error: provide value for dx')
dx = param[0]
x_new = np.arange(x_old[0], x_old[-1]+dx/2, dx)
return resample_interp(x_old, x_new, y_old, df_old)
elif sampDict['name']=='Every n':
if len(param)==0:
raise Exception('Error: provide value for n')
n = int(param[0])
if n==0:
raise Exception('Error: |n| should be at least 1')
x_new=x_old[::n]
if df_old is not None:
return x_new, (df_old.copy()).iloc[::n,:]
if y_old is not None:
return x_new, y_old[::n]
else:
raise NotImplementedError('{}'.format(sampDict))
pass
# --------------------------------------------------------------------------------}
# --- Filters
# --------------------------------------------------------------------------------{
# def moving_average(x, w):
# #t_new = np.arange(0,Tmax,dt)
# #nt = len(t_new)
# #nw=400
# #u_new = moving_average(np.floor(np.linspace(0,3,nt+nw-1))*3+3.5, nw)
# return np.convolve(x, np.ones(w), 'valid') / w
# def moving_average(x,N,mode='same'):
# y=np.convolve(x, np.ones((N,))/N, mode=mode)
# return y
def moving_average(a, n=3) :
"""
perform moving average, return a vector of same length as input
NOTE: also in kalman.filters
"""
a = a.ravel()
a = np.concatenate(([a[0]]*(n-1),a)) # repeating first values
ret = np.cumsum(a, dtype = float)
ret[n:] = ret[n:] - ret[:-n]
ret=ret[n - 1:] / n
return ret
def lowpass1(y, dt, fc=3) :
"""
1st order low pass filter
"""
tau=1/(2*np.pi*fc)
alpha=dt/(tau+dt)
y_filt=np.zeros(y.shape)
y_filt[0]=y[0]
for i in np.arange(1,len(y)):
y_filt[i]=alpha*y[i] + (1-alpha)*y_filt[i-1]
return y_filt
def highpass1(y, dt, fc=3) :
"""
1st order high pass filter
"""
tau=1/(2*np.pi*fc)
alpha=tau/(tau+dt)
y_filt=np.zeros(y.shape)
y_filt[0]=0
for i in np.arange(1,len(y)):
y_filt[i]=alpha*y_filt[i-1] + alpha*(y[i]-y[i-1])
m0=np.mean(y)
m1=np.mean(y_filt)
y_filt+=m0-m1
return y_filt
def applyFilter(x, y,filtDict):
if filtDict['name']=='Moving average':
return moving_average(y, n=np.round(filtDict['param']).astype(int))
elif filtDict['name']=='Low pass 1st order':
dt = x[1]-x[0]
return lowpass1(y, dt=dt, fc=filtDict['param'])
elif filtDict['name']=='High pass 1st order':
dt = x[1]-x[0]
return highpass1(y, dt=dt, fc=filtDict['param'])
else:
raise NotImplementedError('{}'.format(filtDict))
# --------------------------------------------------------------------------------}
# ---
# --------------------------------------------------------------------------------{
def zero_crossings(y,x=None,direction=None):
"""
Find zero-crossing points in a discrete vector, using linear interpolation.
direction: 'up' or 'down', to select only up-crossings or down-crossings
returns:
x values xzc such that y(yzc)==0
indexes izc, such that the zero is between y[izc] (excluded) and y[izc+1] (included)
if direction is not provided, also returns:
sign, equal to 1 for up crossing
"""
if x is None:
x=np.arange(len(y))
if np.any((x[1:] - x[0:-1]) <= 0.0):
raise Exception('x values need to be in ascending order')
# Indices before zero-crossing
iBef = np.where(y[1:]*y[0:-1] < 0.0)[0]
# Find the zero crossing by linear interpolation
xzc = x[iBef] - y[iBef] * (x[iBef+1] - x[iBef]) / (y[iBef+1] - y[iBef])
# Selecting points that are exactly 0 and where neighbor change sign
iZero = np.where(y == 0.0)[0]
iZero = iZero[np.where((iZero > 0) & (iZero < x.size-1))]
iZero = iZero[np.where(y[iZero-1]*y[iZero+1] < 0.0)]
# Concatenate
xzc = np.concatenate((xzc, x[iZero]))
iBef = np.concatenate((iBef, iZero))
# Sort
iSort = np.argsort(xzc)
xzc, iBef = xzc[iSort], iBef[iSort]
# Return up-crossing, down crossing or both
sign = np.sign(y[iBef+1]-y[iBef])
if direction == 'up':
I= np.where(sign==1)[0]
return xzc[I],iBef[I]
elif direction == 'down':
I= np.where(sign==-1)[0]
return xzc[I],iBef[I]
elif direction is not None:
raise Exception('Direction should be either `up` or `down`')
return xzc, iBef, sign
# --------------------------------------------------------------------------------}
# ---
# --------------------------------------------------------------------------------{
def correlation(x, nMax=80, dt=1, method='manual'):
"""
Compute auto correlation of a signal
"""
nvec = np.arange(0,nMax)
sigma2 = np.var(x)
R = np.zeros(nMax)
R[0] =1
for i,nDelay in enumerate(nvec[1:]):
R[i+1] = np.mean( x[0:-nDelay] * x[nDelay:] ) / sigma2
tau = nvec*dt
return R, tau
def correlated_signal(coeff, n=1000):
"""
Create a correlated random signal of length `n` based on the correlation coefficient `coeff`
value[t] = coeff * value[t-1] + (1-coeff) * random
"""
if coeff<0 or coeff>1:
raise Exception('Correlation coefficient should be between 0 and 1')
x = np.zeros(n)
rvec = rand(n)
x[0] = rvec[0]
for m in np.arange(1,n):
x[m] = coeff*x[m-1] + (1-coeff)*rvec[m]
x-=np.mean(x)
return x
if __name__=='__main__':
import numpy as np
import matplotlib.pyplot as plt
# Input
dt = 1
n = 10000
coeff = 0.95 # 1:full corr, 00-corr
nMax = 180
# Create a correlated time series
tvec = np.arange(0,n)*dt
ts = correlated_signal(coeff, n)
# --- Compute correlation coefficient
R, tau = correlation(x, nMax=nMax)
fig,axes = plt.subplots(2, 1, sharey=False, figsize=(6.4,4.8)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
ax=axes[0]
# Plot time series
ax.plot(tvec,ts)
ax.set_xlabel('t [s]')
ax.set_ylabel('u [m/s]')
ax.tick_params(direction='in')
# Plot correlation
ax=axes[1]
ax.plot(tau, R ,'b-o', label='computed')
ax.plot(tau, coeff**(tau/dt) , 'r--' ,label='coeff^{tau/dt}') # analytical coeff^n trend
ax.set_xlabel(r'$\tau$ [s]')
ax.set_ylabel(r'$R(\tau)$ [-]')
ax.legend()
plt.show()
| 31.002882
| 119
| 0.533278
|
from __future__ import division
import numpy as np
from numpy.random import rand
import pandas as pd
FILTERS=[
{'name':'Moving average','param':100,'paramName':'Window Size','paramRange':[0,100000],'increment':1},
{'name':'Low pass 1st order','param':1.0,'paramName':'Cutoff Freq.','paramRange':[0.0001,100000],'increment':0.1},
{'name':'High pass 1st order','param':1.0,'paramName':'Cutoff Freq.','paramRange':[0.0001,100000],'increment':0.1},
]
SAMPLERS=[
{'name':'Replace', 'param':[], 'paramName':'New x'},
{'name':'Insert', 'param':[], 'paramName':'Insert list'},
{'name':'Remove', 'param':[], 'paramName':'Remove list'},
{'name':'Every n', 'param':2 , 'paramName':'n'},
{'name':'Delta x', 'param':0.1, 'paramName':'dx'},
]
def reject_outliers(y, x=None, m = 2., replaceNaN=True):
if m==0:
pass
else:
dd = np.abs(y - np.nanmedian(y))
mdev = np.nanmedian(dd)
if mdev:
ss = dd/mdev
b=ss<m
if replaceNaN:
y=y.copy()
y[~b]=np.nan
else:
y=y[b]
if x is not None:
x= x[b]
if x is None:
return y
else:
return x, y
def multiInterp(x, xp, fp, extrap='bounded'):
j = np.searchsorted(xp, x) - 1
dd = np.zeros(len(x))
bOK = np.logical_and(j>=0, j< len(xp)-1)
bLower =j<0
bUpper =j>=len(xp)-1
jOK = j[bOK]
dd[bOK] = (x[bOK] - xp[jOK]) / (xp[jOK + 1] - xp[jOK])
jBef=j
jAft=j+1
jAft[bUpper] = len(xp)-1
jBef[bUpper] = len(xp)-1
jAft[bLower] = 0
jBef[bLower] = 0
if extrap=='bounded':
pass
elif extrap=='nan':
dd[~bOK] = np.nan
else:
raise NotImplementedError()
return (1 - dd) * fp[:,jBef] + fp[:,jAft] * dd
def resample_interp(x_old, x_new, y_old=None, df_old=None):
if df_old is not None:
data_new=multiInterp(x_new, x_old, df_old.values.T)
df_new = pd.DataFrame(data=data_new.T, columns=df_old.columns.values)
return x_new, df_new
if y_old is not None:
return x_new, np.interp(x_new, x_old, y_old)
def applySamplerDF(df_old, x_col, sampDict):
x_old=df_old[x_col].values
x_new, df_new =applySampler(x_old, y_old=None, sampDict=sampDict, df_old=df_old)
df_new[x_col]=x_new
return df_new
def applySampler(x_old, y_old, sampDict, df_old=None):
param = np.asarray(sampDict['param']).ravel()
if sampDict['name']=='Replace':
if len(param)==0:
raise Exception('Error: At least one value is required to resample the x values with')
x_new = param
return resample_interp(x_old, x_new, y_old, df_old)
elif sampDict['name']=='Insert':
if len(param)==0:
raise Exception('Error: provide a list of values to insert')
x_new = np.sort(np.concatenate((x_old.ravel(),param)))
return resample_interp(x_old, x_new, y_old, df_old)
elif sampDict['name']=='Remove':
I=[]
if len(param)==0:
raise Exception('Error: provide a list of values to remove')
for d in param:
Ifound= np.where(np.abs(x_old-d)<1e-3)[0]
if len(Ifound)>0:
I+=list(Ifound.ravel())
x_new=np.delete(x_old,I)
return resample_interp(x_old, x_new, y_old, df_old)
elif sampDict['name']=='Delta x':
if len(param)==0:
raise Exception('Error: provide value for dx')
dx = param[0]
x_new = np.arange(x_old[0], x_old[-1]+dx/2, dx)
return resample_interp(x_old, x_new, y_old, df_old)
elif sampDict['name']=='Every n':
if len(param)==0:
raise Exception('Error: provide value for n')
n = int(param[0])
if n==0:
raise Exception('Error: |n| should be at least 1')
x_new=x_old[::n]
if df_old is not None:
return x_new, (df_old.copy()).iloc[::n,:]
if y_old is not None:
return x_new, y_old[::n]
else:
raise NotImplementedError('{}'.format(sampDict))
pass
= float)
ret[n:] = ret[n:] - ret[:-n]
ret=ret[n - 1:] / n
return ret
def lowpass1(y, dt, fc=3) :
tau=1/(2*np.pi*fc)
alpha=dt/(tau+dt)
y_filt=np.zeros(y.shape)
y_filt[0]=y[0]
for i in np.arange(1,len(y)):
y_filt[i]=alpha*y[i] + (1-alpha)*y_filt[i-1]
return y_filt
def highpass1(y, dt, fc=3) :
tau=1/(2*np.pi*fc)
alpha=tau/(tau+dt)
y_filt=np.zeros(y.shape)
y_filt[0]=0
for i in np.arange(1,len(y)):
y_filt[i]=alpha*y_filt[i-1] + alpha*(y[i]-y[i-1])
m0=np.mean(y)
m1=np.mean(y_filt)
y_filt+=m0-m1
return y_filt
def applyFilter(x, y,filtDict):
if filtDict['name']=='Moving average':
return moving_average(y, n=np.round(filtDict['param']).astype(int))
elif filtDict['name']=='Low pass 1st order':
dt = x[1]-x[0]
return lowpass1(y, dt=dt, fc=filtDict['param'])
elif filtDict['name']=='High pass 1st order':
dt = x[1]-x[0]
return highpass1(y, dt=dt, fc=filtDict['param'])
else:
raise NotImplementedError('{}'.format(filtDict))
def zero_crossings(y,x=None,direction=None):
if x is None:
x=np.arange(len(y))
if np.any((x[1:] - x[0:-1]) <= 0.0):
raise Exception('x values need to be in ascending order')
iBef = np.where(y[1:]*y[0:-1] < 0.0)[0]
xzc = x[iBef] - y[iBef] * (x[iBef+1] - x[iBef]) / (y[iBef+1] - y[iBef])
iZero = np.where(y == 0.0)[0]
iZero = iZero[np.where((iZero > 0) & (iZero < x.size-1))]
iZero = iZero[np.where(y[iZero-1]*y[iZero+1] < 0.0)]
xzc = np.concatenate((xzc, x[iZero]))
iBef = np.concatenate((iBef, iZero))
iSort = np.argsort(xzc)
xzc, iBef = xzc[iSort], iBef[iSort]
sign = np.sign(y[iBef+1]-y[iBef])
if direction == 'up':
I= np.where(sign==1)[0]
return xzc[I],iBef[I]
elif direction == 'down':
I= np.where(sign==-1)[0]
return xzc[I],iBef[I]
elif direction is not None:
raise Exception('Direction should be either `up` or `down`')
return xzc, iBef, sign
def correlation(x, nMax=80, dt=1, method='manual'):
nvec = np.arange(0,nMax)
sigma2 = np.var(x)
R = np.zeros(nMax)
R[0] =1
for i,nDelay in enumerate(nvec[1:]):
R[i+1] = np.mean( x[0:-nDelay] * x[nDelay:] ) / sigma2
tau = nvec*dt
return R, tau
def correlated_signal(coeff, n=1000):
if coeff<0 or coeff>1:
raise Exception('Correlation coefficient should be between 0 and 1')
x = np.zeros(n)
rvec = rand(n)
x[0] = rvec[0]
for m in np.arange(1,n):
x[m] = coeff*x[m-1] + (1-coeff)*rvec[m]
x-=np.mean(x)
return x
if __name__=='__main__':
import numpy as np
import matplotlib.pyplot as plt
dt = 1
n = 10000
coeff = 0.95
nMax = 180
tvec = np.arange(0,n)*dt
ts = correlated_signal(coeff, n)
R, tau = correlation(x, nMax=nMax)
fig,axes = plt.subplots(2, 1, sharey=False, figsize=(6.4,4.8))
fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
ax=axes[0]
ax.plot(tvec,ts)
ax.set_xlabel('t [s]')
ax.set_ylabel('u [m/s]')
ax.tick_params(direction='in')
ax=axes[1]
ax.plot(tau, R ,'b-o', label='computed')
ax.plot(tau, coeff**(tau/dt) , 'r--' ,label='coeff^{tau/dt}')
ax.set_xlabel(r'$\tau$ [s]')
ax.set_ylabel(r'$R(\tau)$ [-]')
ax.legend()
plt.show()
| true
| true
|
790793fb6467579fd3b8eb4be805a081a2315aa9
| 65
|
py
|
Python
|
Chapter 05/Chap05_Example5.33.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 05/Chap05_Example5.33.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 05/Chap05_Example5.33.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
#games module
import Kabaddi.raider
Kabaddi.raider.name_raider()
| 16.25
| 28
| 0.830769
|
import Kabaddi.raider
Kabaddi.raider.name_raider()
| true
| true
|
790794546c498205a96e392889ad6c516d954937
| 31,068
|
py
|
Python
|
variation/methodological_experiment.py
|
tedunderwood/fiction
|
33e2986fecaa3d154b5fdd609146b65d97974275
|
[
"MIT"
] | 21
|
2016-05-25T00:02:19.000Z
|
2021-11-23T06:51:07.000Z
|
variation/methodological_experiment.py
|
tedunderwood/fiction
|
33e2986fecaa3d154b5fdd609146b65d97974275
|
[
"MIT"
] | null | null | null |
variation/methodological_experiment.py
|
tedunderwood/fiction
|
33e2986fecaa3d154b5fdd609146b65d97974275
|
[
"MIT"
] | 6
|
2016-10-18T12:56:18.000Z
|
2020-09-01T01:36:02.000Z
|
#!/usr/bin/env python3
# methodological_experiment.py
import sys, os, csv
import numpy as np
import pandas as pd
import versatiletrainer2
import metaselector
import matplotlib.pyplot as plt
from scipy import stats
def first_experiment():
sourcefolder = '../data/'
metadatapath = '../metadata/mastermetadata.csv'
vocabpath = '../modeloutput/experimentalvocab.txt'
tags4positive = {'fantasy_loc', 'fantasy_oclc'}
tags4negative = {'sf_loc', 'sf_oclc'}
sizecap = 200
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap)
c_range = [.004, .012, 0.3, 0.8, 2]
featurestart = 3000
featureend = 4400
featurestep = 100
modelparams = 'logistic', 10, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, 'first_experiment', '../modeloutput/first_experiment.csv')
plt.rcParams["figure.figsize"] = [9.0, 6.0]
plt.matshow(matrix, origin = 'lower', cmap = plt.cm.YlOrRd)
plt.show()
def get_ratio_data(vocabpath, sizecap, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000):
''' Loads metadata, selects instances for the positive
and negative classes (using a ratio to dilute the positive
class with negative instances), creates a lexicon if one doesn't
already exist, and creates a pandas dataframe storing
texts as rows and words/features as columns. A refactored
and simplified version of get_data_for_model().
'''
holdout_authors = True
freqs_already_normalized = True
verbose = False
datecols = ['firstpub']
indexcol = ['docid']
extension = '.tsv'
genrecol = 'tags'
numfeatures = 8000
sourcefolder = '../data/'
metadatapath = '../metadata/mastermetadata.csv'
# Get a list of files.
allthefiles = os.listdir(sourcefolder)
volumeIDsinfolder = list()
volumepaths = list()
numchars2trim = len(extension)
for filename in allthefiles:
if filename.endswith(extension):
volID = filename[0 : -numchars2trim]
# The volume ID is basically the filename minus its extension.
volumeIDsinfolder.append(volID)
metadata = metaselector.load_metadata(metadatapath, volumeIDsinfolder, excludebelow, excludeabove, indexcol = indexcol, datecols = datecols, genrecol = genrecol)
# That function returns a pandas dataframe which is guaranteed to be indexed by indexcol,
# and to contain a numeric column 'std_date' as well as a column 'tagset' which contains
# sets of genre tags for each row. It has also been filtered so it only contains volumes
# in the folder, and none whose date is below excludebelow or above excludeabove.
orderedIDs, classdictionary = metaselector.dilute_positive_class(metadata, sizecap, tags4positive, tags4negative, ratio)
metadata = metadata.loc[orderedIDs]
# Limits the metadata data frame to rows we are actually using
# (those selected in select_instances).
# We now create an ordered list of id-path tuples.
volspresent = [(x, sourcefolder + x + extension) for x in orderedIDs]
print(len(volspresent))
print('Building vocabulary.')
vocablist = versatiletrainer2.get_vocablist(vocabpath, volspresent, n = numfeatures)
numfeatures = len(vocablist)
print()
print("Number of features: " + str(numfeatures))
# For each volume, we're going to create a list of volumes that should be
# excluded from the training set when it is to be predicted. More precisely,
# we're going to create a list of their *indexes*, so that we can easily
# remove rows from the training matrix.
authormatches = [ [] for x in orderedIDs]
# Now we proceed to enlarge that list by identifying, for each volume,
# a set of indexes that have the same author. Obvs, there will always be at least one.
# We exclude a vol from it's own training set.
if holdout_authors:
for idx1, anid in enumerate(orderedIDs):
thisauthor = metadata.loc[anid, 'author']
authormatches[idx1] = list(np.flatnonzero(metadata['author'] == thisauthor))
for alist in authormatches:
alist.sort(reverse = True)
print()
print('Authors matched.')
print()
# I am reversing the order of indexes so that I can delete them from
# back to front, without changing indexes yet to be deleted.
# This will become important in the modelingprocess module.
masterdata, classvector = versatiletrainer2.get_dataframe(volspresent, classdictionary, vocablist, freqs_already_normalized)
return metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist
def vary_sf_ratio_against_random():
if not os.path.isfile('../measuredivergence/modeldata.tsv'):
with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:
outline = 'name\tsize\tratio\taccuracy\tfeatures\tregularization\n'
f.write(outline)
size = 80
for iteration in [5, 6, 7]:
ceiling = 105
if iteration == 7:
ceiling = 5
for pct in range(0, ceiling, 5):
ratio = pct / 100
name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)
vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'
tags4positive = {'sf_loc', 'sf_oclc'}
tags4negative = {'random'}
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)
c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8]
featurestart = 1000
featureend = 6000
featurestep = 300
modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)
# It's important not to write fullmodel if you want the csvs
# to accurately reflect terrible accuracy on diluted datasets.
# write_fullmodel = False forces crossvalidation.
with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:
outline = name + '\t' + str(size) + '\t' + str(ratio) + '\t' + str(maxaccuracy) + '\t' + str(features4max) + '\t' + str(best_regularization_coef) + '\n'
f.write(outline)
def vary_fantasy_ratio_against_sf():
if not os.path.isfile('../measuredivergence/modeldata.tsv'):
with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:
outline = 'name\tsize\tratio\taccuracy\tfeatures\tregularization\n'
f.write(outline)
size = 80
for iteration in [8, 9, 10]:
ceiling = 105
if iteration == 10:
ceiling = 5
for pct in range(0, ceiling, 5):
ratio = pct / 100
name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)
vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'
tags4positive = {'fantasy_loc', 'fantasy_oclc'}
tags4negative = {'sf_loc', 'sf_oclc'}
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)
c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8, 3]
featurestart = 2000
featureend = 7500
featurestep = 400
modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)
# write_fullmodel = False forces crossvalidation.
with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:
outline = name + '\t' + str(size) + '\t' + str(ratio) + '\t' + str(maxaccuracy) + '\t' + str(features4max) + '\t' + str(best_regularization_coef) + '\n'
f.write(outline)
def vary_fantasy_ratio_against_random():
if not os.path.isfile('../measuredivergence/modeldata.tsv'):
with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:
outline = 'name\tsize\tratio\taccuracy\tfeatures\tregularization\n'
f.write(outline)
size = 80
for iteration in [11, 12, 13]:
ceiling = 105
if iteration == 13:
ceiling = 5
for pct in range(0, ceiling, 5):
ratio = pct / 100
name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)
vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'
tags4positive = {'fantasy_loc', 'fantasy_oclc'}
tags4negative = {'random'}
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)
c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8, 3]
featurestart = 1600
featureend = 6400
featurestep = 400
modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)
# write_fullmodel = False forces crossvalidation.
with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:
outline = name + '\t' + str(size) + '\t' + str(ratio) + '\t' + str(maxaccuracy) + '\t' + str(features4max) + '\t' + str(best_regularization_coef) + '\n'
f.write(outline)
def accuracy(df, column):
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df[column] > 0.5))
tn = sum((df.realclass <= 0.5) & (df[column] <= 0.5))
fp = sum((df.realclass <= 0.5) & (df[column] > 0.5))
fn = sum((df.realclass > 0.5) & (df[column] <= 0.5))
assert totalcount == (tp + fp + tn + fn)
return (tp + tn) / totalcount
def accuracy_loss(df):
return accuracy(df, 'probability') - accuracy(df, 'alien_model')
def kldivergence(p, q):
"""Kullback-Leibler divergence D(P || Q) for discrete distributions
Parameters
----------
p, q : array-like, dtype=float, shape=n
Discrete probability distributions.
"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def averagecorr(r1, r2):
z1 = np.arctanh(r1)
z2 = np.arctanh(r2)
themean = (z1 + z2) / 2
return np.tanh(themean)
def get_divergences(gold, testname, itera, size, pct):
'''
This function gets several possible measures of divergence
between two models.
'''
# We start by constructing the paths to the gold
# standard model criteria (.pkl) and
# model output (.csv) on the examples
# originally used to train it.
# We're going to try applying the gold standard
# criteria to another model's output, and vice-
# versa.
model1 = '../measuredivergence/modeloutput/' + gold + '.pkl'
meta1 = '../measuredivergence/modeloutput/' + gold + '.csv'
# Now we construct paths to the test model
# criteria (.pkl) and output (.csv).
testpath = '../measuredivergence/modeloutput/' + testname
model2 = testpath + '.pkl'
meta2 = testpath + '.csv'
model1on2 = versatiletrainer2.apply_pickled_model(model1, '../data/', '.tsv', meta2)
model2on1 = versatiletrainer2.apply_pickled_model(model2, '../data/', '.tsv', meta1)
pearson1on2 = stats.pearsonr(model1on2.probability, model1on2.alien_model)[0]
pearson2on1 = stats.pearsonr(model2on1.probability, model2on1.alien_model)[0]
pearson = averagecorr(pearson1on2, pearson2on1)
spearman1on2 = stats.spearmanr(model1on2.probability, model1on2.alien_model)[0]
spearman2on1 = stats.spearmanr(model2on1.probability, model2on1.alien_model)[0]
spearman = averagecorr(spearman1on2, spearman2on1)
loss1on2 = accuracy_loss(model1on2)
loss2on1 = accuracy_loss(model2on1)
loss = (loss1on2 + loss2on1) / 2
kl1on2 = kldivergence(model1on2.probability, model1on2.alien_model)
kl2on1 = kldivergence(model2on1.probability, model2on1.alien_model)
kl = (kl1on2 + kl2on1) / 2
return pearson, spearman, loss, kl, spearman1on2, spearman2on1, loss1on2, loss2on1
def measure_sf_divergences():
columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']
if not os.path.isfile('../measuredivergence/sf_divergences.tsv'):
with open('../measuredivergence/sf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
goldstandards = ['iter5_size80_ratio0', 'iter6_size80_ratio0', 'iter7_size80_ratio0']
size = 80
modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\t', index_col = 'name')
for gold in goldstandards:
for itera in [5, 6]:
for pct in range(0, 105, 5):
ratio = pct / 100
testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)
if testname == gold:
continue
# we don't test a model against itself
else:
row = dict()
row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)
row['name1'] = gold
row['name2'] = testname
row['size'] = size
row['acc1'] = modeldata.loc[gold, 'accuracy']
row['acc2'] = modeldata.loc[testname, 'accuracy']
row['ratiodiff'] = ratio
with open('../measuredivergence/sf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writerow(row)
def measure_fsf_divergences():
columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']
if not os.path.isfile('../measuredivergence/fsf_divergences.tsv'):
with open('../measuredivergence/fsf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
goldstandards = ['iter8_size80_ratio0', 'iter9_size80_ratio0', 'iter10_size80_ratio0']
size = 80
modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\t', index_col = 'name')
for gold in goldstandards:
for itera in [8, 9]:
for pct in range(0, 105, 5):
ratio = pct / 100
testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)
if testname == gold:
continue
# we don't test a model against itself
else:
row = dict()
row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)
row['name1'] = gold
row['name2'] = testname
row['size'] = size
row['acc1'] = modeldata.loc[gold, 'accuracy']
row['acc2'] = modeldata.loc[testname, 'accuracy']
row['ratiodiff'] = ratio
with open('../measuredivergence/fsf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writerow(row)
def measure_fantasy_divergences():
columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']
if not os.path.isfile('../measuredivergence/fantasy_divergences.tsv'):
with open('../measuredivergence/fantasy_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
goldstandards = ['iter11_size80_ratio0', 'iter12_size80_ratio0', 'iter13_size80_ratio0']
size = 80
modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\t', index_col = 'name')
for gold in goldstandards:
for itera in [11, 12]:
for pct in range(0, 105, 5):
ratio = pct / 100
testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)
if testname == gold:
continue
# we don't test a model against itself
else:
row = dict()
row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)
row['name1'] = gold
row['name2'] = testname
row['size'] = size
row['acc1'] = modeldata.loc[gold, 'accuracy']
row['acc2'] = modeldata.loc[testname, 'accuracy']
row['ratiodiff'] = ratio
with open('../measuredivergence/fantasy_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writerow(row)
def new_experiment():
# The first time I ran this, I used partition 2 to build the
# mixed data, and partition 1 as a gold standard. Now reversing.
outmodelpath = '../measuredivergence/results/newexperimentmodels.csv'
columns = ['name', 'size', 'ratio', 'iteration', 'meandate', 'maxaccuracy', 'features', 'regularization']
if not os.path.isfile(outmodelpath):
with open(outmodelpath, mode = 'w', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writeheader()
c_range = [.00001, .0001, .001, .01, 0.1, 1, 10, 100]
featurestart = 1500
featureend = 6000
featurestep = 300
modelparams = 'logistic', 10, featurestart, featureend, featurestep, c_range
sizecap = 75
for i in range(3, 6):
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
sourcefolder = '../measuredivergence/mix/' + str(ratio) + '/'
metadatapath = '../measuredivergence/partitionmeta/meta' + str(ratio) + '.csv'
name = 'mixeddata_' + str(i) + '_' + str(ratio)
vocabpath = '../lexica/' + name + '.txt'
tags4positive = {'fantasy', 'detective'}
tags4negative = {'random'}
floor = 1800
ceiling = 1930
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')
meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))
row = dict()
row['name'] = name
row['size'] = sizecap
row['ratio'] = ratio
row['iteration'] = i
row['meandate'] = meandate
row['maxaccuracy'] = maxaccuracy
row['features'] = features4max
row['regularization'] = best_regularization_coef
with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writerow(row)
os.remove(vocabpath)
sourcefolder = '../data/'
metadatapath = '../measuredivergence/partitionmeta/part2.csv'
# note that this is changed if you create mix data with
# partition 2
name = 'goldfantasy_' + str(i)
vocabpath = '../lexica/' + name + '.txt'
tags4positive = {'fantasy'}
tags4negative = {'random', 'randomB'}
floor = 1800
ceiling = 1930
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')
meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))
row = dict()
row['name'] = name
row['size'] = sizecap
row['ratio'] = ratio
row['iteration'] = i
row['meandate'] = meandate
row['maxaccuracy'] = maxaccuracy
row['features'] = features4max
row['regularization'] = best_regularization_coef
with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writerow(row)
os.remove(vocabpath)
sourcefolder = '../data/'
metadatapath = '../measuredivergence/partitionmeta/part2.csv'
# depending on which partition you used to create mix data;
# this will be the other one
name = 'golddetective_' + str(i)
vocabpath = '../lexica/' + name + '.txt'
tags4positive = {'detective'}
tags4negative = {'random', 'randomB'}
floor = 1800
ceiling = 1930
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')
meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))
row = dict()
row['name'] = name
row['size'] = sizecap
row['ratio'] = ratio
row['iteration'] = i
row['meandate'] = meandate
row['maxaccuracy'] = maxaccuracy
row['features'] = features4max
row['regularization'] = best_regularization_coef
with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writerow(row)
os.remove(vocabpath)
def accuracy(df, column):
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df[column] > 0.5))
tn = sum((df.realclass <= 0.5) & (df[column] <= 0.5))
fp = sum((df.realclass <= 0.5) & (df[column] > 0.5))
fn = sum((df.realclass > 0.5) & (df[column] <= 0.5))
assert totalcount == (tp + fp + tn + fn)
return (tp + tn) / totalcount
def accuracy_loss(df):
return accuracy(df, 'probability') - accuracy(df, 'alien_model')
def get_divergence(sampleA, sampleB, twodatafolder = '../data/', onedatafolder = '../data/'):
'''
This function applies model a to b, and vice versa, and returns
a couple of measures of divergence: notably lost accuracy and
z-tranformed spearman correlation.
'''
# We start by constructing the paths to the sampleA
# standard model criteria (.pkl) and
# model output (.csv) on the examples
# originally used to train it.
# We're going to try applying the sampleA standard
# criteria to another model's output, and vice-
# versa.
model1 = '../measuredivergence/newmodeloutput/' + sampleA + '.pkl'
meta1 = '../measuredivergence/newmodeloutput/' + sampleA + '.csv'
# Now we construct paths to the test model
# criteria (.pkl) and output (.csv).
model2 = '../measuredivergence/newmodeloutput/' + sampleB + '.pkl'
meta2 = '../measuredivergence/newmodeloutput/' + sampleB + '.csv'
model1on2 = versatiletrainer2.apply_pickled_model(model1, twodatafolder, '.tsv', meta2)
model2on1 = versatiletrainer2.apply_pickled_model(model2, onedatafolder, '.tsv', meta1)
spearman1on2 = np.arctanh(stats.spearmanr(model1on2.probability, model1on2.alien_model)[0])
spearman2on1 = np.arctanh(stats.spearmanr(model2on1.probability, model2on1.alien_model)[0])
spearman = (spearman1on2 + spearman2on1) / 2
loss1on2 = accuracy_loss(model1on2)
loss2on1 = accuracy_loss(model2on1)
loss = (loss1on2 + loss2on1) / 2
alienacc2 = accuracy(model1on2, 'alien_model')
alienacc1 = accuracy(model2on1, 'alien_model')
acc2 = accuracy(model1on2, 'probability')
acc1 = accuracy(model2on1, 'probability')
meandate2 = np.mean(model1on2.std_date)
meandate1 = np.mean(model2on1.std_date)
return spearman, loss, spearman1on2, spearman2on1, loss1on2, loss2on1, acc1, acc2, alienacc1, alienacc2, meandate1, meandate2
def write_a_row(r, outfile, columns):
with open(outfile, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns, delimiter = '\t')
scribe.writerow(r)
def new_divergences():
outcomparisons = '../measuredivergence/results/new_comparisons.tsv'
columns = ['testype', 'name1', 'name2', 'ratio', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'acc1', 'acc2', 'alienacc1', 'alienacc2', 'meandate1', 'meandate2']
if not os.path.isfile(outcomparisons):
with open(outcomparisons, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
# I originally ran this with i and j
# iterating through range(3). Now trying
# on models generated with the partitions
# reversed.
for i in range(3, 6):
for j in range(3, 6):
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
r = dict()
r['testype'] = 'fantasy2mixed'
r['name1'] = 'goldfantasy_' + str(i)
r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)
r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/')
r['ratio'] = ratio
write_a_row(r, outcomparisons, columns)
r = dict()
r['testype'] = 'detective2mixed'
r['name1'] = 'golddetective_' + str(i)
r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)
r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/')
r['ratio'] = 100 - ratio
# note that distance from detective is the complement
# of distance from fantasy
write_a_row(r, outcomparisons, columns)
def new_self_comparisons ():
outcomparisons = '../measuredivergence/results/self_comparisons.tsv'
columns = ['testype', 'name1', 'name2', 'ratio', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'acc1', 'acc2', 'alienacc1', 'alienacc2', 'meandate1', 'meandate2']
if not os.path.isfile(outcomparisons):
with open(outcomparisons, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
for i in range(0, 3):
for j in range(3, 6):
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
r = dict()
r['testype'] = 'selfmixed'
r['name1'] = 'mixeddata_' + str(i) + '_' + str(ratio)
r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)
r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/', onedatafolder = '../measuredivergence/altmix/' + str(ratio) + '/')
r['ratio'] = ratio
write_a_row(r, outcomparisons, columns)
new_self_comparisons()
| 44.895954
| 360
| 0.629168
|
import sys, os, csv
import numpy as np
import pandas as pd
import versatiletrainer2
import metaselector
import matplotlib.pyplot as plt
from scipy import stats
def first_experiment():
sourcefolder = '../data/'
metadatapath = '../metadata/mastermetadata.csv'
vocabpath = '../modeloutput/experimentalvocab.txt'
tags4positive = {'fantasy_loc', 'fantasy_oclc'}
tags4negative = {'sf_loc', 'sf_oclc'}
sizecap = 200
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap)
c_range = [.004, .012, 0.3, 0.8, 2]
featurestart = 3000
featureend = 4400
featurestep = 100
modelparams = 'logistic', 10, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, 'first_experiment', '../modeloutput/first_experiment.csv')
plt.rcParams["figure.figsize"] = [9.0, 6.0]
plt.matshow(matrix, origin = 'lower', cmap = plt.cm.YlOrRd)
plt.show()
def get_ratio_data(vocabpath, sizecap, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000):
holdout_authors = True
freqs_already_normalized = True
verbose = False
datecols = ['firstpub']
indexcol = ['docid']
extension = '.tsv'
genrecol = 'tags'
numfeatures = 8000
sourcefolder = '../data/'
metadatapath = '../metadata/mastermetadata.csv'
allthefiles = os.listdir(sourcefolder)
volumeIDsinfolder = list()
volumepaths = list()
numchars2trim = len(extension)
for filename in allthefiles:
if filename.endswith(extension):
volID = filename[0 : -numchars2trim]
volumeIDsinfolder.append(volID)
metadata = metaselector.load_metadata(metadatapath, volumeIDsinfolder, excludebelow, excludeabove, indexcol = indexcol, datecols = datecols, genrecol = genrecol)
orderedIDs, classdictionary = metaselector.dilute_positive_class(metadata, sizecap, tags4positive, tags4negative, ratio)
metadata = metadata.loc[orderedIDs]
volspresent = [(x, sourcefolder + x + extension) for x in orderedIDs]
print(len(volspresent))
print('Building vocabulary.')
vocablist = versatiletrainer2.get_vocablist(vocabpath, volspresent, n = numfeatures)
numfeatures = len(vocablist)
print()
print("Number of features: " + str(numfeatures))
# excluded from the training set when it is to be predicted. More precisely,
# we're going to create a list of their *indexes*, so that we can easily
authormatches = [ [] for x in orderedIDs]
if holdout_authors:
for idx1, anid in enumerate(orderedIDs):
thisauthor = metadata.loc[anid, 'author']
authormatches[idx1] = list(np.flatnonzero(metadata['author'] == thisauthor))
for alist in authormatches:
alist.sort(reverse = True)
print()
print('Authors matched.')
print()
# I am reversing the order of indexes so that I can delete them from
# back to front, without changing indexes yet to be deleted.
# This will become important in the modelingprocess module.
masterdata, classvector = versatiletrainer2.get_dataframe(volspresent, classdictionary, vocablist, freqs_already_normalized)
return metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist
def vary_sf_ratio_against_random():
if not os.path.isfile('../measuredivergence/modeldata.tsv'):
with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:
outline = 'name\tsize\tratio\taccuracy\tfeatures\tregularization\n'
f.write(outline)
size = 80
for iteration in [5, 6, 7]:
ceiling = 105
if iteration == 7:
ceiling = 5
for pct in range(0, ceiling, 5):
ratio = pct / 100
name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)
vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'
tags4positive = {'sf_loc', 'sf_oclc'}
tags4negative = {'random'}
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)
c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8]
featurestart = 1000
featureend = 6000
featurestep = 300
modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)
# It's important not to write fullmodel if you want the csvs
with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:
outline = name + '\t' + str(size) + '\t' + str(ratio) + '\t' + str(maxaccuracy) + '\t' + str(features4max) + '\t' + str(best_regularization_coef) + '\n'
f.write(outline)
def vary_fantasy_ratio_against_sf():
if not os.path.isfile('../measuredivergence/modeldata.tsv'):
with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:
outline = 'name\tsize\tratio\taccuracy\tfeatures\tregularization\n'
f.write(outline)
size = 80
for iteration in [8, 9, 10]:
ceiling = 105
if iteration == 10:
ceiling = 5
for pct in range(0, ceiling, 5):
ratio = pct / 100
name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)
vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'
tags4positive = {'fantasy_loc', 'fantasy_oclc'}
tags4negative = {'sf_loc', 'sf_oclc'}
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)
c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8, 3]
featurestart = 2000
featureend = 7500
featurestep = 400
modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)
with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:
outline = name + '\t' + str(size) + '\t' + str(ratio) + '\t' + str(maxaccuracy) + '\t' + str(features4max) + '\t' + str(best_regularization_coef) + '\n'
f.write(outline)
def vary_fantasy_ratio_against_random():
if not os.path.isfile('../measuredivergence/modeldata.tsv'):
with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:
outline = 'name\tsize\tratio\taccuracy\tfeatures\tregularization\n'
f.write(outline)
size = 80
for iteration in [11, 12, 13]:
ceiling = 105
if iteration == 13:
ceiling = 5
for pct in range(0, ceiling, 5):
ratio = pct / 100
name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)
vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'
tags4positive = {'fantasy_loc', 'fantasy_oclc'}
tags4negative = {'random'}
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)
c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8, 3]
featurestart = 1600
featureend = 6400
featurestep = 400
modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)
with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:
outline = name + '\t' + str(size) + '\t' + str(ratio) + '\t' + str(maxaccuracy) + '\t' + str(features4max) + '\t' + str(best_regularization_coef) + '\n'
f.write(outline)
def accuracy(df, column):
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df[column] > 0.5))
tn = sum((df.realclass <= 0.5) & (df[column] <= 0.5))
fp = sum((df.realclass <= 0.5) & (df[column] > 0.5))
fn = sum((df.realclass > 0.5) & (df[column] <= 0.5))
assert totalcount == (tp + fp + tn + fn)
return (tp + tn) / totalcount
def accuracy_loss(df):
return accuracy(df, 'probability') - accuracy(df, 'alien_model')
def kldivergence(p, q):
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def averagecorr(r1, r2):
z1 = np.arctanh(r1)
z2 = np.arctanh(r2)
themean = (z1 + z2) / 2
return np.tanh(themean)
def get_divergences(gold, testname, itera, size, pct):
# criteria to another model's output, and vice-
model1 = '../measuredivergence/modeloutput/' + gold + '.pkl'
meta1 = '../measuredivergence/modeloutput/' + gold + '.csv'
testpath = '../measuredivergence/modeloutput/' + testname
model2 = testpath + '.pkl'
meta2 = testpath + '.csv'
model1on2 = versatiletrainer2.apply_pickled_model(model1, '../data/', '.tsv', meta2)
model2on1 = versatiletrainer2.apply_pickled_model(model2, '../data/', '.tsv', meta1)
pearson1on2 = stats.pearsonr(model1on2.probability, model1on2.alien_model)[0]
pearson2on1 = stats.pearsonr(model2on1.probability, model2on1.alien_model)[0]
pearson = averagecorr(pearson1on2, pearson2on1)
spearman1on2 = stats.spearmanr(model1on2.probability, model1on2.alien_model)[0]
spearman2on1 = stats.spearmanr(model2on1.probability, model2on1.alien_model)[0]
spearman = averagecorr(spearman1on2, spearman2on1)
loss1on2 = accuracy_loss(model1on2)
loss2on1 = accuracy_loss(model2on1)
loss = (loss1on2 + loss2on1) / 2
kl1on2 = kldivergence(model1on2.probability, model1on2.alien_model)
kl2on1 = kldivergence(model2on1.probability, model2on1.alien_model)
kl = (kl1on2 + kl2on1) / 2
return pearson, spearman, loss, kl, spearman1on2, spearman2on1, loss1on2, loss2on1
def measure_sf_divergences():
columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']
if not os.path.isfile('../measuredivergence/sf_divergences.tsv'):
with open('../measuredivergence/sf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
goldstandards = ['iter5_size80_ratio0', 'iter6_size80_ratio0', 'iter7_size80_ratio0']
size = 80
modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\t', index_col = 'name')
for gold in goldstandards:
for itera in [5, 6]:
for pct in range(0, 105, 5):
ratio = pct / 100
testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)
if testname == gold:
continue
else:
row = dict()
row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)
row['name1'] = gold
row['name2'] = testname
row['size'] = size
row['acc1'] = modeldata.loc[gold, 'accuracy']
row['acc2'] = modeldata.loc[testname, 'accuracy']
row['ratiodiff'] = ratio
with open('../measuredivergence/sf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writerow(row)
def measure_fsf_divergences():
columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']
if not os.path.isfile('../measuredivergence/fsf_divergences.tsv'):
with open('../measuredivergence/fsf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
goldstandards = ['iter8_size80_ratio0', 'iter9_size80_ratio0', 'iter10_size80_ratio0']
size = 80
modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\t', index_col = 'name')
for gold in goldstandards:
for itera in [8, 9]:
for pct in range(0, 105, 5):
ratio = pct / 100
testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)
if testname == gold:
continue
# we don't test a model against itself
else:
row = dict()
row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)
row['name1'] = gold
row['name2'] = testname
row['size'] = size
row['acc1'] = modeldata.loc[gold, 'accuracy']
row['acc2'] = modeldata.loc[testname, 'accuracy']
row['ratiodiff'] = ratio
with open('../measuredivergence/fsf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writerow(row)
def measure_fantasy_divergences():
columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']
if not os.path.isfile('../measuredivergence/fantasy_divergences.tsv'):
with open('../measuredivergence/fantasy_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
goldstandards = ['iter11_size80_ratio0', 'iter12_size80_ratio0', 'iter13_size80_ratio0']
size = 80
modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\t', index_col = 'name')
for gold in goldstandards:
for itera in [11, 12]:
for pct in range(0, 105, 5):
ratio = pct / 100
testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)
if testname == gold:
continue
else:
row = dict()
row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)
row['name1'] = gold
row['name2'] = testname
row['size'] = size
row['acc1'] = modeldata.loc[gold, 'accuracy']
row['acc2'] = modeldata.loc[testname, 'accuracy']
row['ratiodiff'] = ratio
with open('../measuredivergence/fantasy_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writerow(row)
def new_experiment():
# The first time I ran this, I used partition 2 to build the
# mixed data, and partition 1 as a gold standard. Now reversing.
outmodelpath = '../measuredivergence/results/newexperimentmodels.csv'
columns = ['name', 'size', 'ratio', 'iteration', 'meandate', 'maxaccuracy', 'features', 'regularization']
if not os.path.isfile(outmodelpath):
with open(outmodelpath, mode = 'w', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writeheader()
c_range = [.00001, .0001, .001, .01, 0.1, 1, 10, 100]
featurestart = 1500
featureend = 6000
featurestep = 300
modelparams = 'logistic', 10, featurestart, featureend, featurestep, c_range
sizecap = 75
for i in range(3, 6):
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
sourcefolder = '../measuredivergence/mix/' + str(ratio) + '/'
metadatapath = '../measuredivergence/partitionmeta/meta' + str(ratio) + '.csv'
name = 'mixeddata_' + str(i) + '_' + str(ratio)
vocabpath = '../lexica/' + name + '.txt'
tags4positive = {'fantasy', 'detective'}
tags4negative = {'random'}
floor = 1800
ceiling = 1930
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')
meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))
row = dict()
row['name'] = name
row['size'] = sizecap
row['ratio'] = ratio
row['iteration'] = i
row['meandate'] = meandate
row['maxaccuracy'] = maxaccuracy
row['features'] = features4max
row['regularization'] = best_regularization_coef
with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writerow(row)
os.remove(vocabpath)
sourcefolder = '../data/'
metadatapath = '../measuredivergence/partitionmeta/part2.csv'
# note that this is changed if you create mix data with
# partition 2
name = 'goldfantasy_' + str(i)
vocabpath = '../lexica/' + name + '.txt'
tags4positive = {'fantasy'}
tags4negative = {'random', 'randomB'}
floor = 1800
ceiling = 1930
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')
meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))
row = dict()
row['name'] = name
row['size'] = sizecap
row['ratio'] = ratio
row['iteration'] = i
row['meandate'] = meandate
row['maxaccuracy'] = maxaccuracy
row['features'] = features4max
row['regularization'] = best_regularization_coef
with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writerow(row)
os.remove(vocabpath)
sourcefolder = '../data/'
metadatapath = '../measuredivergence/partitionmeta/part2.csv'
# depending on which partition you used to create mix data;
# this will be the other one
name = 'golddetective_' + str(i)
vocabpath = '../lexica/' + name + '.txt'
tags4positive = {'detective'}
tags4negative = {'random', 'randomB'}
floor = 1800
ceiling = 1930
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')
meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))
row = dict()
row['name'] = name
row['size'] = sizecap
row['ratio'] = ratio
row['iteration'] = i
row['meandate'] = meandate
row['maxaccuracy'] = maxaccuracy
row['features'] = features4max
row['regularization'] = best_regularization_coef
with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writerow(row)
os.remove(vocabpath)
def accuracy(df, column):
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df[column] > 0.5))
tn = sum((df.realclass <= 0.5) & (df[column] <= 0.5))
fp = sum((df.realclass <= 0.5) & (df[column] > 0.5))
fn = sum((df.realclass > 0.5) & (df[column] <= 0.5))
assert totalcount == (tp + fp + tn + fn)
return (tp + tn) / totalcount
def accuracy_loss(df):
return accuracy(df, 'probability') - accuracy(df, 'alien_model')
def get_divergence(sampleA, sampleB, twodatafolder = '../data/', onedatafolder = '../data/'):
# We start by constructing the paths to the sampleA
# standard model criteria (.pkl) and
# model output (.csv) on the examples
# originally used to train it.
# We're going to try applying the sampleA standard
# versa.
model1 = '../measuredivergence/newmodeloutput/' + sampleA + '.pkl'
meta1 = '../measuredivergence/newmodeloutput/' + sampleA + '.csv'
# Now we construct paths to the test model
# criteria (.pkl) and output (.csv).
model2 = '../measuredivergence/newmodeloutput/' + sampleB + '.pkl'
meta2 = '../measuredivergence/newmodeloutput/' + sampleB + '.csv'
model1on2 = versatiletrainer2.apply_pickled_model(model1, twodatafolder, '.tsv', meta2)
model2on1 = versatiletrainer2.apply_pickled_model(model2, onedatafolder, '.tsv', meta1)
spearman1on2 = np.arctanh(stats.spearmanr(model1on2.probability, model1on2.alien_model)[0])
spearman2on1 = np.arctanh(stats.spearmanr(model2on1.probability, model2on1.alien_model)[0])
spearman = (spearman1on2 + spearman2on1) / 2
loss1on2 = accuracy_loss(model1on2)
loss2on1 = accuracy_loss(model2on1)
loss = (loss1on2 + loss2on1) / 2
alienacc2 = accuracy(model1on2, 'alien_model')
alienacc1 = accuracy(model2on1, 'alien_model')
acc2 = accuracy(model1on2, 'probability')
acc1 = accuracy(model2on1, 'probability')
meandate2 = np.mean(model1on2.std_date)
meandate1 = np.mean(model2on1.std_date)
return spearman, loss, spearman1on2, spearman2on1, loss1on2, loss2on1, acc1, acc2, alienacc1, alienacc2, meandate1, meandate2
def write_a_row(r, outfile, columns):
with open(outfile, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns, delimiter = '\t')
scribe.writerow(r)
def new_divergences():
outcomparisons = '../measuredivergence/results/new_comparisons.tsv'
columns = ['testype', 'name1', 'name2', 'ratio', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'acc1', 'acc2', 'alienacc1', 'alienacc2', 'meandate1', 'meandate2']
if not os.path.isfile(outcomparisons):
with open(outcomparisons, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
# I originally ran this with i and j
# iterating through range(3). Now trying
# on models generated with the partitions
# reversed.
for i in range(3, 6):
for j in range(3, 6):
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
r = dict()
r['testype'] = 'fantasy2mixed'
r['name1'] = 'goldfantasy_' + str(i)
r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)
r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/')
r['ratio'] = ratio
write_a_row(r, outcomparisons, columns)
r = dict()
r['testype'] = 'detective2mixed'
r['name1'] = 'golddetective_' + str(i)
r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)
r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/')
r['ratio'] = 100 - ratio
# note that distance from detective is the complement
# of distance from fantasy
write_a_row(r, outcomparisons, columns)
def new_self_comparisons ():
outcomparisons = '../measuredivergence/results/self_comparisons.tsv'
columns = ['testype', 'name1', 'name2', 'ratio', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'acc1', 'acc2', 'alienacc1', 'alienacc2', 'meandate1', 'meandate2']
if not os.path.isfile(outcomparisons):
with open(outcomparisons, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
for i in range(0, 3):
for j in range(3, 6):
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
r = dict()
r['testype'] = 'selfmixed'
r['name1'] = 'mixeddata_' + str(i) + '_' + str(ratio)
r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)
r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/', onedatafolder = '../measuredivergence/altmix/' + str(ratio) + '/')
r['ratio'] = ratio
write_a_row(r, outcomparisons, columns)
new_self_comparisons()
| true
| true
|
79079492aae6cd19fd5eaa852608c3016f5e6061
| 6,841
|
py
|
Python
|
seleniumbase/plugins/db_reporting_plugin.py
|
Mu-L/SeleniumBase
|
8387e89cfa3bd62a564246c0c00a94b8199b0792
|
[
"MIT"
] | null | null | null |
seleniumbase/plugins/db_reporting_plugin.py
|
Mu-L/SeleniumBase
|
8387e89cfa3bd62a564246c0c00a94b8199b0792
|
[
"MIT"
] | null | null | null |
seleniumbase/plugins/db_reporting_plugin.py
|
Mu-L/SeleniumBase
|
8387e89cfa3bd62a564246c0c00a94b8199b0792
|
[
"MIT"
] | null | null | null |
"""
This plugin is for recording test results in the Testcase Database.
"""
import getpass
import time
import uuid
from nose.plugins import Plugin
from nose.exc import SkipTest
from seleniumbase.core.application_manager import ApplicationManager
from seleniumbase.core.testcase_manager import ExecutionQueryPayload
from seleniumbase.core.testcase_manager import TestcaseDataPayload
from seleniumbase.core.testcase_manager import TestcaseManager
from seleniumbase.fixtures import constants
from seleniumbase.fixtures import errors
class DBReporting(Plugin):
"""
This plugin records test results in the Testcase Database.
"""
name = "db_reporting" # Usage: --with-db_reporting
def __init__(self):
Plugin.__init__(self)
self.execution_guid = str(uuid.uuid4())
self.testcase_guid = None
self.execution_start_time = 0
self.case_start_time = 0
self.testcase_manager = None
self._result_set = False
self._test = None
def options(self, parser, env):
super(DBReporting, self).options(parser, env=env)
parser.add_option(
"--database_env",
"--database-env",
action="store",
dest="database_env",
choices=(
constants.Environment.QA,
constants.Environment.STAGING,
constants.Environment.DEVELOP,
constants.Environment.PRODUCTION,
constants.Environment.MASTER,
constants.Environment.REMOTE,
constants.Environment.LOCAL,
constants.Environment.ALPHA,
constants.Environment.BETA,
constants.Environment.MAIN,
constants.Environment.TEST,
),
default=constants.Environment.TEST,
help="The database environment to run the tests in.",
)
def configure(self, options, conf):
super(DBReporting, self).configure(options, conf)
self.options = options
self.testcase_manager = TestcaseManager(self.options.database_env)
def begin(self):
"""At the start of the run, we want to record the test
execution information in the database."""
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int(time.time() * 1000)
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload)
def startTest(self, test):
"""At the start of the test, set the testcase details."""
data_payload = TestcaseDataPayload()
self.testcase_guid = str(uuid.uuid4())
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
if hasattr(test, "browser"):
data_payload.browser = test.browser
else:
data_payload.browser = "N/A"
data_payload.test_address = test.id()
application = ApplicationManager.generate_application_string(test)
data_payload.env = application.split(".")[0]
data_payload.start_time = application.split(".")[1]
data_payload.state = constants.State.UNTESTED
self.testcase_manager.insert_testcase_data(data_payload)
self.case_start_time = int(time.time() * 1000)
# Make the testcase guid available to other plugins
test.testcase_guid = self.testcase_guid
self._test = test
self._test._nose_skip_reason = None
def finalize(self, result):
"""At the end of the test run, we want to
update the DB row with the total execution time."""
runtime = int(time.time() * 1000) - self.execution_start_time
self.testcase_manager.update_execution_data(
self.execution_guid, runtime
)
def afterTest(self, test):
if not self._result_set:
err = None
try:
err = self._test._nose_skip_reason
if err:
err = "Skipped: " + str(err)
err = (err, err)
except Exception:
pass
if not err:
err = "Skipped: (no reason given)"
err = (err, err)
self.__insert_test_result(constants.State.SKIPPED, self._test, err)
def addSuccess(self, test, capt):
"""
After each test success, record testcase run information.
"""
self.__insert_test_result(constants.State.PASSED, test)
self._result_set = True
def addFailure(self, test, err, capt=None, tbinfo=None):
"""
After each test failure, record testcase run information.
"""
self.__insert_test_result(constants.State.FAILED, test, err)
self._result_set = True
def addError(self, test, err, capt=None):
"""
After each test error, record testcase run information.
(Test errors should be treated the same as test failures.)
"""
self.__insert_test_result(constants.State.FAILED, test, err)
self._result_set = True
def handleError(self, test, err, capt=None):
"""
After each test error, record testcase run information.
"Error" also encompasses any states other than Pass or Fail, so we
check for those first.
"""
if err[0] == errors.BlockedTest:
self.__insert_test_result(constants.State.BLOCKED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
elif err[0] == errors.DeprecatedTest:
self.__insert_test_result(constants.State.DEPRECATED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
elif err[0] == errors.SkipTest:
self.__insert_test_result(constants.State.SKIPPED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
def __insert_test_result(self, state, test, err=None):
data_payload = TestcaseDataPayload()
data_payload.runtime = int(time.time() * 1000) - self.case_start_time
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
data_payload.state = state
if err is not None:
data_payload.message = (
err[1]
.__str__()
.split(
"""-------------------- >> """
"""begin captured logging"""
""" << --------------------""",
1,
)[0]
)
self.testcase_manager.update_testcase_data(data_payload)
| 37.382514
| 79
| 0.612045
|
import getpass
import time
import uuid
from nose.plugins import Plugin
from nose.exc import SkipTest
from seleniumbase.core.application_manager import ApplicationManager
from seleniumbase.core.testcase_manager import ExecutionQueryPayload
from seleniumbase.core.testcase_manager import TestcaseDataPayload
from seleniumbase.core.testcase_manager import TestcaseManager
from seleniumbase.fixtures import constants
from seleniumbase.fixtures import errors
class DBReporting(Plugin):
name = "db_reporting"
def __init__(self):
Plugin.__init__(self)
self.execution_guid = str(uuid.uuid4())
self.testcase_guid = None
self.execution_start_time = 0
self.case_start_time = 0
self.testcase_manager = None
self._result_set = False
self._test = None
def options(self, parser, env):
super(DBReporting, self).options(parser, env=env)
parser.add_option(
"--database_env",
"--database-env",
action="store",
dest="database_env",
choices=(
constants.Environment.QA,
constants.Environment.STAGING,
constants.Environment.DEVELOP,
constants.Environment.PRODUCTION,
constants.Environment.MASTER,
constants.Environment.REMOTE,
constants.Environment.LOCAL,
constants.Environment.ALPHA,
constants.Environment.BETA,
constants.Environment.MAIN,
constants.Environment.TEST,
),
default=constants.Environment.TEST,
help="The database environment to run the tests in.",
)
def configure(self, options, conf):
super(DBReporting, self).configure(options, conf)
self.options = options
self.testcase_manager = TestcaseManager(self.options.database_env)
def begin(self):
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int(time.time() * 1000)
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload)
def startTest(self, test):
data_payload = TestcaseDataPayload()
self.testcase_guid = str(uuid.uuid4())
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
if hasattr(test, "browser"):
data_payload.browser = test.browser
else:
data_payload.browser = "N/A"
data_payload.test_address = test.id()
application = ApplicationManager.generate_application_string(test)
data_payload.env = application.split(".")[0]
data_payload.start_time = application.split(".")[1]
data_payload.state = constants.State.UNTESTED
self.testcase_manager.insert_testcase_data(data_payload)
self.case_start_time = int(time.time() * 1000)
test.testcase_guid = self.testcase_guid
self._test = test
self._test._nose_skip_reason = None
def finalize(self, result):
runtime = int(time.time() * 1000) - self.execution_start_time
self.testcase_manager.update_execution_data(
self.execution_guid, runtime
)
def afterTest(self, test):
if not self._result_set:
err = None
try:
err = self._test._nose_skip_reason
if err:
err = "Skipped: " + str(err)
err = (err, err)
except Exception:
pass
if not err:
err = "Skipped: (no reason given)"
err = (err, err)
self.__insert_test_result(constants.State.SKIPPED, self._test, err)
def addSuccess(self, test, capt):
self.__insert_test_result(constants.State.PASSED, test)
self._result_set = True
def addFailure(self, test, err, capt=None, tbinfo=None):
self.__insert_test_result(constants.State.FAILED, test, err)
self._result_set = True
def addError(self, test, err, capt=None):
self.__insert_test_result(constants.State.FAILED, test, err)
self._result_set = True
def handleError(self, test, err, capt=None):
if err[0] == errors.BlockedTest:
self.__insert_test_result(constants.State.BLOCKED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
elif err[0] == errors.DeprecatedTest:
self.__insert_test_result(constants.State.DEPRECATED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
elif err[0] == errors.SkipTest:
self.__insert_test_result(constants.State.SKIPPED, test, err)
self._result_set = True
raise SkipTest(err[1])
return True
def __insert_test_result(self, state, test, err=None):
data_payload = TestcaseDataPayload()
data_payload.runtime = int(time.time() * 1000) - self.case_start_time
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
data_payload.state = state
if err is not None:
data_payload.message = (
err[1]
.__str__()
.split(
"""-------------------- >> """
"""begin captured logging"""
""" << --------------------""",
1,
)[0]
)
self.testcase_manager.update_testcase_data(data_payload)
| true
| true
|
79079494201782699c4a87242adcde37ce225f93
| 1,577
|
py
|
Python
|
slowfast/datasets/epickitchens_record.py
|
dylan-campbell/Motionformer
|
6c860614a3b252c6163971ba20e61ea3184d5291
|
[
"Apache-2.0"
] | 153
|
2021-06-10T14:00:22.000Z
|
2022-03-31T04:12:54.000Z
|
slowfast/datasets/epickitchens_record.py
|
dylan-campbell/Motionformer
|
6c860614a3b252c6163971ba20e61ea3184d5291
|
[
"Apache-2.0"
] | 10
|
2021-06-30T04:48:50.000Z
|
2022-03-11T15:51:05.000Z
|
slowfast/datasets/epickitchens_record.py
|
dylan-campbell/Motionformer
|
6c860614a3b252c6163971ba20e61ea3184d5291
|
[
"Apache-2.0"
] | 22
|
2021-06-11T13:10:05.000Z
|
2022-03-28T03:42:39.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .video_record import VideoRecord
from datetime import timedelta
import time
def timestamp_to_sec(timestamp):
x = time.strptime(timestamp, '%H:%M:%S.%f')
sec = float(timedelta(hours=x.tm_hour,
minutes=x.tm_min,
seconds=x.tm_sec).total_seconds()) + float(
timestamp.split('.')[-1]) / 100
return sec
class EpicKitchensVideoRecord(VideoRecord):
def __init__(self, tup):
self._index = str(tup[0])
self._series = tup[1]
@property
def participant(self):
return self._series['participant_id']
@property
def untrimmed_video_name(self):
return self._series['video_id']
@property
def start_frame(self):
return int(round(timestamp_to_sec(self._series['start_timestamp']) * self.fps))
@property
def end_frame(self):
return int(round(timestamp_to_sec(self._series['stop_timestamp']) * self.fps))
@property
def fps(self):
is_100 = len(self.untrimmed_video_name.split('_')[1]) == 3
return 50 if is_100 else 60
@property
def num_frames(self):
return self.end_frame - self.start_frame
@property
def label(self):
return {'verb': self._series['verb_class'] if 'verb_class' in self._series else -1,
'noun': self._series['noun_class'] if 'noun_class' in self._series else -1}
@property
def metadata(self):
return {'narration_id': self._index}
| 28.672727
| 91
| 0.637286
|
from .video_record import VideoRecord
from datetime import timedelta
import time
def timestamp_to_sec(timestamp):
x = time.strptime(timestamp, '%H:%M:%S.%f')
sec = float(timedelta(hours=x.tm_hour,
minutes=x.tm_min,
seconds=x.tm_sec).total_seconds()) + float(
timestamp.split('.')[-1]) / 100
return sec
class EpicKitchensVideoRecord(VideoRecord):
def __init__(self, tup):
self._index = str(tup[0])
self._series = tup[1]
@property
def participant(self):
return self._series['participant_id']
@property
def untrimmed_video_name(self):
return self._series['video_id']
@property
def start_frame(self):
return int(round(timestamp_to_sec(self._series['start_timestamp']) * self.fps))
@property
def end_frame(self):
return int(round(timestamp_to_sec(self._series['stop_timestamp']) * self.fps))
@property
def fps(self):
is_100 = len(self.untrimmed_video_name.split('_')[1]) == 3
return 50 if is_100 else 60
@property
def num_frames(self):
return self.end_frame - self.start_frame
@property
def label(self):
return {'verb': self._series['verb_class'] if 'verb_class' in self._series else -1,
'noun': self._series['noun_class'] if 'noun_class' in self._series else -1}
@property
def metadata(self):
return {'narration_id': self._index}
| true
| true
|
79079694c4688c982a416d7263117a1dc63c26a7
| 5,668
|
py
|
Python
|
docs/tests/adhoc_requests.py
|
Siyanda-Mzam/grassroot-platform
|
7130145e7ce46cece31e33cf85748d1777bcf566
|
[
"BSD-3-Clause"
] | 1
|
2020-07-15T23:08:09.000Z
|
2020-07-15T23:08:09.000Z
|
docs/tests/adhoc_requests.py
|
Siyanda-Mzam/grassroot-platform
|
7130145e7ce46cece31e33cf85748d1777bcf566
|
[
"BSD-3-Clause"
] | null | null | null |
docs/tests/adhoc_requests.py
|
Siyanda-Mzam/grassroot-platform
|
7130145e7ce46cece31e33cf85748d1777bcf566
|
[
"BSD-3-Clause"
] | null | null | null |
__author__ = 'aakilomar'
import requests, json, time
from timeit import default_timer as timer
requests.packages.urllib3.disable_warnings()
host = "https://localhost:8443"
def cancel_event(eventid):
post_url = host + "/api/event/cancel/" + str(eventid)
return requests.post(post_url,None, verify=False).json()
def add_user(phone):
post_url = host + "/api/user/add/" + str(phone)
return requests.post(post_url,None, verify=False).json()
def rsvp(eventid,userid,message):
post_url = host + "/api/event/rsvp/" + str(eventid) + "/" + str(userid) + "/" + str(message)
return requests.post(post_url,None, verify=False).json()
def rsvpRequired(userid):
post_url = host + "/api/event/rsvprequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def voteRequired(userid):
post_url = host + "/api/event/voterequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def upcomingVotes(groupid):
post_url = host + "/api/event/upcoming/vote/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def upcomingMeeting(groupid):
post_url = host + "/api/event/upcoming/meeting/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def votesPerGroupForEvent(groupid, eventid):
post_url = host + "/api/event/rsvp/totalspergroup/" + str(groupid) + "/" + str(eventid)
return requests.post(post_url,None, verify=False).json()
def addLogBook(userid, groupid, message):
post_url = host + "/api/logbook/add/" + str(userid) + "/" + str(groupid) + "/" + message
return requests.post(post_url,None, verify=False).json()
def addLogBookWithDate(userid, groupid, message, actionbydate):
post_url = host + "/api/logbook/addwithdate/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + actionbydate
return requests.post(post_url,None, verify=False).json()
def addLogBookWithDateAndAssign(userid, groupid, message, actionbydate, assigntouserid):
post_url = host + "/api/logbook/addwithdateandassign/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + actionbydate + "/" + str(assigntouserid)
return requests.post(post_url,None, verify=False).json()
def addLogBook(userid, groupid, message, replicate):
post_url = host + "/api/logbook/add/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + str(replicate)
return requests.post(post_url,None, verify=False).json()
def listReplicated(groupid):
post_url = host + "/api/logbook/listreplicated/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def listReplicated(groupid, completed):
post_url = host + "/api/logbook/listreplicated/" + str(groupid) + "/" + str(completed)
return requests.get(post_url,None, verify=False).json()
def setInitiatedSession(userid):
post_url = host + "/api/user/setinitiatedsession/" + str(userid)
return requests.post(post_url,None, verify=False).json()
def listReplicatedMessage(groupid, message):
post_url = host + "/api/logbook/listreplicatedbymessage/" + str(groupid) + "/" + message
return requests.get(post_url,None, verify=False).json()
def createAccount(userid,groupid,accountname):
post_url = host + "/api/account/add/" + str(userid) + "/" + str(groupid) + "/" + str(accountname)
return requests.post(post_url,None, verify=False).json()
def ussdStart(phonenumber,enteredUssd):
post_url = host + "/ussd/start?msisdn=" + str(phonenumber)
return requests.get(post_url,None, verify=False)
def add_user_to_group(userid,groupid):
post_url = host + "/api/group/add/usertogroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def remove_user_from_group(userid,groupid):
post_url = host + "/api/group/remove/userfromgroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def get_user_join_group(userid,groupid):
post_url = host + "/api/group/get/userjoingroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).content
def rsvpRequired(userid):
post_url = host + "/api/event/rsvprequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def voteRequired(userid):
post_url = host + "/api/event/voterequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def add_event(userid,groupid, name):
post_url = host + "/api/event/add/" + str(userid) + "/" + str(groupid) + "/" + name
return requests.post(post_url,None, verify=False).json()
#print cancel_event(5166)
#user = add_user("0823333332")
#user = add_user("0821111111")
#print "user-->" + str(user)
#print rsvp(5167,user['id'],"no")
#print rsvpRequired(user['id'])
#print voteRequired(user['id'])
#print upcomingVotes(231)
#print votesPerGroupForEvent(194,5103)
#print addLogBook(1,85,"X must do Y")
#print addLogBook(1,88,"Somebody must Y",True) # has sub groups
#print addLogBook(1,85,"Somebody must do X",True) # no subgroups
#print listReplicated(88,False)
#print addLogBookWithDateAndAssign(1,21,"aakil must do Y","2015-12-13 08:45:00",588)
#print addLogBookWithDate(1,21,"someone must do Y","2015-12-13 08:45:00")
#print setInitiatedSession(588)
#print(listReplicatedMessage(88,"Somebody must X"))
#print(createAccount(1,21,"acc 21"))
#for i in range(1,7,1):
## start = timer()
# print ussdStart("0826607134","")
# end = timer()
# print(end - start)
#print add_user_to_group(588,82)
#print remove_user_from_group(588,82)
#print get_user_join_group(588,82)
#print voteRequired(817)
print rsvpRequired(817)
print "klaarie"
| 40.485714
| 159
| 0.702541
|
__author__ = 'aakilomar'
import requests, json, time
from timeit import default_timer as timer
requests.packages.urllib3.disable_warnings()
host = "https://localhost:8443"
def cancel_event(eventid):
post_url = host + "/api/event/cancel/" + str(eventid)
return requests.post(post_url,None, verify=False).json()
def add_user(phone):
post_url = host + "/api/user/add/" + str(phone)
return requests.post(post_url,None, verify=False).json()
def rsvp(eventid,userid,message):
post_url = host + "/api/event/rsvp/" + str(eventid) + "/" + str(userid) + "/" + str(message)
return requests.post(post_url,None, verify=False).json()
def rsvpRequired(userid):
post_url = host + "/api/event/rsvprequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def voteRequired(userid):
post_url = host + "/api/event/voterequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def upcomingVotes(groupid):
post_url = host + "/api/event/upcoming/vote/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def upcomingMeeting(groupid):
post_url = host + "/api/event/upcoming/meeting/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def votesPerGroupForEvent(groupid, eventid):
post_url = host + "/api/event/rsvp/totalspergroup/" + str(groupid) + "/" + str(eventid)
return requests.post(post_url,None, verify=False).json()
def addLogBook(userid, groupid, message):
post_url = host + "/api/logbook/add/" + str(userid) + "/" + str(groupid) + "/" + message
return requests.post(post_url,None, verify=False).json()
def addLogBookWithDate(userid, groupid, message, actionbydate):
post_url = host + "/api/logbook/addwithdate/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + actionbydate
return requests.post(post_url,None, verify=False).json()
def addLogBookWithDateAndAssign(userid, groupid, message, actionbydate, assigntouserid):
post_url = host + "/api/logbook/addwithdateandassign/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + actionbydate + "/" + str(assigntouserid)
return requests.post(post_url,None, verify=False).json()
def addLogBook(userid, groupid, message, replicate):
post_url = host + "/api/logbook/add/" + str(userid) + "/" + str(groupid) + "/" + message + "/" + str(replicate)
return requests.post(post_url,None, verify=False).json()
def listReplicated(groupid):
post_url = host + "/api/logbook/listreplicated/" + str(groupid)
return requests.get(post_url,None, verify=False).json()
def listReplicated(groupid, completed):
post_url = host + "/api/logbook/listreplicated/" + str(groupid) + "/" + str(completed)
return requests.get(post_url,None, verify=False).json()
def setInitiatedSession(userid):
post_url = host + "/api/user/setinitiatedsession/" + str(userid)
return requests.post(post_url,None, verify=False).json()
def listReplicatedMessage(groupid, message):
post_url = host + "/api/logbook/listreplicatedbymessage/" + str(groupid) + "/" + message
return requests.get(post_url,None, verify=False).json()
def createAccount(userid,groupid,accountname):
post_url = host + "/api/account/add/" + str(userid) + "/" + str(groupid) + "/" + str(accountname)
return requests.post(post_url,None, verify=False).json()
def ussdStart(phonenumber,enteredUssd):
post_url = host + "/ussd/start?msisdn=" + str(phonenumber)
return requests.get(post_url,None, verify=False)
def add_user_to_group(userid,groupid):
post_url = host + "/api/group/add/usertogroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def remove_user_from_group(userid,groupid):
post_url = host + "/api/group/remove/userfromgroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def get_user_join_group(userid,groupid):
post_url = host + "/api/group/get/userjoingroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).content
def rsvpRequired(userid):
post_url = host + "/api/event/rsvprequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def voteRequired(userid):
post_url = host + "/api/event/voterequired/" + str(userid)
return requests.get(post_url,None, verify=False).json()
def add_event(userid,groupid, name):
post_url = host + "/api/event/add/" + str(userid) + "/" + str(groupid) + "/" + name
return requests.post(post_url,None, verify=False).json()
"klaarie"
| false
| true
|
7907975e3a6d3bcdb286b19767c81ff2bf531b32
| 549
|
py
|
Python
|
Server/videoProcessServer/mysqlTools.py
|
kalenforn/video-context-analyze
|
a28c80b861664cfae73568845d753f3efc79c35a
|
[
"MIT"
] | 3
|
2021-05-08T10:28:41.000Z
|
2021-06-23T14:33:07.000Z
|
Server/videoProcessServer/mysqlTools.py
|
kalenforn/video-context-analyze
|
a28c80b861664cfae73568845d753f3efc79c35a
|
[
"MIT"
] | null | null | null |
Server/videoProcessServer/mysqlTools.py
|
kalenforn/video-context-analyze
|
a28c80b861664cfae73568845d753f3efc79c35a
|
[
"MIT"
] | 1
|
2021-05-08T10:28:43.000Z
|
2021-05-08T10:28:43.000Z
|
import pymysql
class SQLHold():
def __init__(self, host: str, user: str, password: str, database: str, port=3306):
self.db = pymysql.connect(host=host, user=user, port=port, database=database, password=password)
self.cursor = self.db.cursor()
def execute_command(self, command: str):
self.cursor.execute(command)
self.cursor.connection.commit()
def fetchall(self):
result = self.cursor.fetchall()
return result
def close(self):
self.cursor.close()
self.db.close()
| 27.45
| 104
| 0.642987
|
import pymysql
class SQLHold():
def __init__(self, host: str, user: str, password: str, database: str, port=3306):
self.db = pymysql.connect(host=host, user=user, port=port, database=database, password=password)
self.cursor = self.db.cursor()
def execute_command(self, command: str):
self.cursor.execute(command)
self.cursor.connection.commit()
def fetchall(self):
result = self.cursor.fetchall()
return result
def close(self):
self.cursor.close()
self.db.close()
| true
| true
|
7907979633bd22bf762087c7749c83e21c751284
| 6,641
|
py
|
Python
|
Project_Plagiarism_Detection/source_pytorch/train.py
|
ngocpc/Project_Plagiarism_Detection
|
d06216d2aafa71e52c528f3ae451a49638e9785d
|
[
"MIT"
] | null | null | null |
Project_Plagiarism_Detection/source_pytorch/train.py
|
ngocpc/Project_Plagiarism_Detection
|
d06216d2aafa71e52c528f3ae451a49638e9785d
|
[
"MIT"
] | null | null | null |
Project_Plagiarism_Detection/source_pytorch/train.py
|
ngocpc/Project_Plagiarism_Detection
|
d06216d2aafa71e52c528f3ae451a49638e9785d
|
[
"MIT"
] | null | null | null |
import argparse
import json
import os
import pandas as pd
import torch
import torch.optim as optim
import torch.nn as nn
import torch.utils.data
# imports the model in model.py by name
from model import BinaryClassifier
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = BinaryClassifier(model_info['input_features'], model_info['hidden_dim'], model_info['output_dim'])
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# set to eval mode, could use no_grad
model.to(device).eval()
print("Done loading model.")
return model
# Gets training data in batches from the train.csv file
def _get_train_data_loader(batch_size, training_dir):
print("Get train data loader.")
train_data = pd.read_csv(os.path.join(training_dir, "train.csv"), header=None, names=None)
train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()
train_x = torch.from_numpy(train_data.drop([0], axis=1).values).float()
train_ds = torch.utils.data.TensorDataset(train_x, train_y)
return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)
# Provided training function
def train(model, train_loader, epochs, criterion, optimizer, device):
"""
This is the training method that is called by the PyTorch training script. The parameters
passed are as follows:
model - The PyTorch model that we wish to train.
train_loader - The PyTorch DataLoader that should be used during training.
epochs - The total number of epochs to train for.
criterion - The loss function used for training.
optimizer - The optimizer to use during training.
device - Where the model and data should be loaded (gpu or cpu).
"""
# training loop is provided
for epoch in range(1, epochs + 1):
model.train() # Make sure that the model is in training mode.
total_loss = 0
for batch in train_loader:
# get data
batch_x, batch_y = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
optimizer.zero_grad()
# get predictions from model
y_pred = model(batch_x)
# perform backprop
loss = criterion(y_pred, batch_y)
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print("Epoch: {}, Loss: {}".format(epoch, total_loss / len(train_loader)))
## TODO: Complete the main code
if __name__ == '__main__':
# All of the model parameters and training parameters are sent as arguments
# when this script is executed, during a training job
# Here we set up an argument parser to easily access the parameters
parser = argparse.ArgumentParser()
# SageMaker parameters, like the directories for training data and saving models; set automatically
# Do not need to change
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
# Training Parameters, given
parser.add_argument('--batch-size', type=int, default=10, metavar='N',
help='input batch size for training (default: 10)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
## TODO: Add args for the three model parameters: input_features, hidden_dim, output_dim
# Model Parameters
parser.add_argument('--input_features', type=int, default=2, metavar='IN',
help='number of input features to model (default: 2)')
parser.add_argument('--hidden_dim', type=int, default=10, metavar='H',
help='hidden dim of model (default: 10)')
parser.add_argument('--output_dim', type=int, default=1, metavar='OUT',
help='output dim of model (default: 1)')
# args holds all passed-in arguments
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {}.".format(device))
torch.manual_seed(args.seed)
# Load the training data.
train_loader = _get_train_data_loader(args.batch_size, args.data_dir)
## --- Your code here --- ##
## TODO: Build the model by passing in the input params
# To get params from the parser, call args.argument_name, ex. args.epochs or ards.hidden_dim
# Don't forget to move your model .to(device) to move to GPU , if appropriate
model = BinaryClassifier(args.input_features, args.hidden_dim, args.output_dim).to(device)
## TODO: Define an optimizer and loss function for training
optimizer = optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.BCELoss()
# Trains the model (given line of code, which calls the above training function)
train(model, train_loader, args.epochs, criterion, optimizer, device)
## TODO: complete in the model_info by adding three argument names, the first is given
# Keep the keys of this dictionary as they are
model_info_path = os.path.join(args.model_dir, 'model_info.pth')
with open(model_info_path, 'wb') as f:
model_info = {
'input_features': args.input_features,
'hidden_dim': args.hidden_dim,
'output_dim': args.output_dim,
}
torch.save(model_info, f)
## --- End of your code --- ##
# Save the model parameters
model_path = os.path.join(args.model_dir, 'model.pth')
with open(model_path, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
| 38.166667
| 110
| 0.657732
|
import argparse
import json
import os
import pandas as pd
import torch
import torch.optim as optim
import torch.nn as nn
import torch.utils.data
from model import BinaryClassifier
def model_fn(model_dir):
print("Loading model.")
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = BinaryClassifier(model_info['input_features'], model_info['hidden_dim'], model_info['output_dim'])
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
model.to(device).eval()
print("Done loading model.")
return model
def _get_train_data_loader(batch_size, training_dir):
print("Get train data loader.")
train_data = pd.read_csv(os.path.join(training_dir, "train.csv"), header=None, names=None)
train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()
train_x = torch.from_numpy(train_data.drop([0], axis=1).values).float()
train_ds = torch.utils.data.TensorDataset(train_x, train_y)
return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)
def train(model, train_loader, epochs, criterion, optimizer, device):
for epoch in range(1, epochs + 1):
model.train()
total_loss = 0
for batch in train_loader:
batch_x, batch_y = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
optimizer.zero_grad()
y_pred = model(batch_x)
loss = criterion(y_pred, batch_y)
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print("Epoch: {}, Loss: {}".format(epoch, total_loss / len(train_loader)))
parser = argparse.ArgumentParser()
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--batch-size', type=int, default=10, metavar='N',
help='input batch size for training (default: 10)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
help='number of input features to model (default: 2)')
parser.add_argument('--hidden_dim', type=int, default=10, metavar='H',
help='hidden dim of model (default: 10)')
parser.add_argument('--output_dim', type=int, default=1, metavar='OUT',
help='output dim of model (default: 1)')
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {}.".format(device))
torch.manual_seed(args.seed)
train_loader = _get_train_data_loader(args.batch_size, args.data_dir)
m, args.output_dim).to(device)
## TODO: Define an optimizer and loss function for training
optimizer = optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.BCELoss()
# Trains the model (given line of code, which calls the above training function)
train(model, train_loader, args.epochs, criterion, optimizer, device)
## TODO: complete in the model_info by adding three argument names, the first is given
# Keep the keys of this dictionary as they are
model_info_path = os.path.join(args.model_dir, 'model_info.pth')
with open(model_info_path, 'wb') as f:
model_info = {
'input_features': args.input_features,
'hidden_dim': args.hidden_dim,
'output_dim': args.output_dim,
}
torch.save(model_info, f)
## --- End of your code --- ##
# Save the model parameters
model_path = os.path.join(args.model_dir, 'model.pth')
with open(model_path, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
| true
| true
|
790797cc5d1cdf58ae01700d892b4288c141b86b
| 6,728
|
py
|
Python
|
userbot/modules/profile.py
|
BintangAlGhifari/WeebProject
|
52e269a50852c26e42159817661cb9573c2f126d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2021-03-31T18:38:45.000Z
|
2021-03-31T18:38:45.000Z
|
userbot/modules/profile.py
|
BintangAlGhifari/WeebProject
|
52e269a50852c26e42159817661cb9573c2f126d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/profile.py
|
BintangAlGhifari/WeebProject
|
52e269a50852c26e42159817661cb9573c2f126d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2022-02-20T15:12:24.000Z
|
2022-02-20T15:12:24.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for changing your Telegram profile details. """
import os
from telethon.errors import ImageProcessFailedError, PhotoCropSizeSmallError
from telethon.errors.rpcerrorlist import PhotoExtInvalidError, UsernameOccupiedError
from telethon.tl.functions.account import UpdateProfileRequest, UpdateUsernameRequest
from telethon.tl.functions.channels import GetAdminedPublicChannelsRequest
from telethon.tl.functions.photos import (
DeletePhotosRequest,
GetUserPhotosRequest,
UploadProfilePhotoRequest,
)
from telethon.tl.types import Channel, Chat, InputPhoto, MessageMediaPhoto, User
from userbot import CMD_HELP, bot
from userbot.events import register
# ====================== CONSTANT ===============================
INVALID_MEDIA = "```The extension of the media entity is invalid.```"
PP_CHANGED = "```Profile picture changed successfully.```"
PP_TOO_SMOL = "```This image is too small, use a bigger image.```"
PP_ERROR = "```Failure occured while processing image.```"
BIO_SUCCESS = "```Successfully edited Bio.```"
NAME_OK = "```Your name was succesfully changed.```"
USERNAME_SUCCESS = "```Your username was succesfully changed.```"
USERNAME_TAKEN = "```This username is already taken.```"
# ===============================================================
@register(outgoing=True, pattern=r"^\.reserved$")
async def mine(event):
""" For .reserved command, get a list of your reserved usernames. """
result = await bot(GetAdminedPublicChannelsRequest())
output_str = ""
for channel_obj in result.chats:
output_str += f"{channel_obj.title}\n@{channel_obj.username}\n\n"
await event.edit(output_str)
@register(outgoing=True, pattern=r"^\.name")
async def update_name(name):
""" For .name command, change your name in Telegram. """
newname = name.text[6:]
if " " not in newname:
firstname = newname
lastname = ""
else:
namesplit = newname.split(" ", 1)
firstname = namesplit[0]
lastname = namesplit[1]
await name.client(UpdateProfileRequest(first_name=firstname, last_name=lastname))
await name.edit(NAME_OK)
@register(outgoing=True, pattern=r"^\.setpfp$")
async def set_profilepic(propic):
""" For .profilepic command, change your profile picture in Telegram. """
replymsg = await propic.get_reply_message()
photo = None
if replymsg.media:
if isinstance(replymsg.media, MessageMediaPhoto):
photo = await propic.client.download_media(message=replymsg.photo)
elif "image" in replymsg.media.document.mime_type.split("/"):
photo = await propic.client.download_file(replymsg.media.document)
else:
await propic.edit(INVALID_MEDIA)
if photo:
try:
await propic.client(
UploadProfilePhotoRequest(await propic.client.upload_file(photo))
)
os.remove(photo)
await propic.edit(PP_CHANGED)
except PhotoCropSizeSmallError:
await propic.edit(PP_TOO_SMOL)
except ImageProcessFailedError:
await propic.edit(PP_ERROR)
except PhotoExtInvalidError:
await propic.edit(INVALID_MEDIA)
@register(outgoing=True, pattern=r"^\.setbio (.*)")
async def set_biograph(setbio):
""" For .setbio command, set a new bio for your profile in Telegram. """
newbio = setbio.pattern_match.group(1)
await setbio.client(UpdateProfileRequest(about=newbio))
await setbio.edit(BIO_SUCCESS)
@register(outgoing=True, pattern=r"^\.username (.*)")
async def update_username(username):
""" For .username command, set a new username in Telegram. """
newusername = username.pattern_match.group(1)
try:
await username.client(UpdateUsernameRequest(newusername))
await username.edit(USERNAME_SUCCESS)
except UsernameOccupiedError:
await username.edit(USERNAME_TAKEN)
@register(outgoing=True, pattern=r"^\.count$")
async def count(event):
""" For .count command, get profile stats. """
u = 0
g = 0
c = 0
bc = 0
b = 0
result = ""
await event.edit("`Processing..`")
dialogs = await bot.get_dialogs(limit=None, ignore_migrated=True)
for d in dialogs:
currrent_entity = d.entity
if isinstance(currrent_entity, User):
if currrent_entity.bot:
b += 1
else:
u += 1
elif isinstance(currrent_entity, Chat):
g += 1
elif isinstance(currrent_entity, Channel):
if currrent_entity.broadcast:
bc += 1
else:
c += 1
else:
print(d)
result += f"`Users:`\t**{u}**\n"
result += f"`Groups:`\t**{g}**\n"
result += f"`Super Groups:`\t**{c}**\n"
result += f"`Channels:`\t**{bc}**\n"
result += f"`Bots:`\t**{b}**"
await event.edit(result)
@register(outgoing=True, pattern=r"^\.delpfp")
async def remove_profilepic(delpfp):
""" For .delpfp command, delete your current profile picture in Telegram. """
group = delpfp.text[8:]
if group == "all":
lim = 0
elif group.isdigit():
lim = int(group)
else:
lim = 1
pfplist = await delpfp.client(
GetUserPhotosRequest(user_id=delpfp.sender_id, offset=0, max_id=0, limit=lim)
)
input_photos = []
for sep in pfplist.photos:
input_photos.append(
InputPhoto(
id=sep.id,
access_hash=sep.access_hash,
file_reference=sep.file_reference,
)
)
await delpfp.client(DeletePhotosRequest(id=input_photos))
await delpfp.edit(f"`Successfully deleted {len(input_photos)} profile picture(s).`")
CMD_HELP.update(
{
"profile": ">`.username <new_username>`"
"\nUsage: Changes your Telegram username."
"\n\n>`.name <firstname>` or >`.name <firstname> <lastname>`"
"\nUsage: Changes your Telegram name.(First and last name will get split by the first space)"
"\n\n>`.setpfp`"
"\nUsage: Reply with .setpfp to an image to change your Telegram profie picture."
"\n\n>`.setbio <new_bio>`"
"\nUsage: Changes your Telegram bio."
"\n\n>`.delpfp` or >`.delpfp <number>/<all>`"
"\nUsage: Deletes your Telegram profile picture(s)."
"\n\n>`.reserved`"
"\nUsage: Shows usernames reserved by you."
"\n\n>`.count`"
"\nUsage: Counts your groups, chats, bots etc..."
}
)
| 34.860104
| 101
| 0.636147
|
import os
from telethon.errors import ImageProcessFailedError, PhotoCropSizeSmallError
from telethon.errors.rpcerrorlist import PhotoExtInvalidError, UsernameOccupiedError
from telethon.tl.functions.account import UpdateProfileRequest, UpdateUsernameRequest
from telethon.tl.functions.channels import GetAdminedPublicChannelsRequest
from telethon.tl.functions.photos import (
DeletePhotosRequest,
GetUserPhotosRequest,
UploadProfilePhotoRequest,
)
from telethon.tl.types import Channel, Chat, InputPhoto, MessageMediaPhoto, User
from userbot import CMD_HELP, bot
from userbot.events import register
INVALID_MEDIA = "```The extension of the media entity is invalid.```"
PP_CHANGED = "```Profile picture changed successfully.```"
PP_TOO_SMOL = "```This image is too small, use a bigger image.```"
PP_ERROR = "```Failure occured while processing image.```"
BIO_SUCCESS = "```Successfully edited Bio.```"
NAME_OK = "```Your name was succesfully changed.```"
USERNAME_SUCCESS = "```Your username was succesfully changed.```"
USERNAME_TAKEN = "```This username is already taken.```"
@register(outgoing=True, pattern=r"^\.reserved$")
async def mine(event):
result = await bot(GetAdminedPublicChannelsRequest())
output_str = ""
for channel_obj in result.chats:
output_str += f"{channel_obj.title}\n@{channel_obj.username}\n\n"
await event.edit(output_str)
@register(outgoing=True, pattern=r"^\.name")
async def update_name(name):
newname = name.text[6:]
if " " not in newname:
firstname = newname
lastname = ""
else:
namesplit = newname.split(" ", 1)
firstname = namesplit[0]
lastname = namesplit[1]
await name.client(UpdateProfileRequest(first_name=firstname, last_name=lastname))
await name.edit(NAME_OK)
@register(outgoing=True, pattern=r"^\.setpfp$")
async def set_profilepic(propic):
replymsg = await propic.get_reply_message()
photo = None
if replymsg.media:
if isinstance(replymsg.media, MessageMediaPhoto):
photo = await propic.client.download_media(message=replymsg.photo)
elif "image" in replymsg.media.document.mime_type.split("/"):
photo = await propic.client.download_file(replymsg.media.document)
else:
await propic.edit(INVALID_MEDIA)
if photo:
try:
await propic.client(
UploadProfilePhotoRequest(await propic.client.upload_file(photo))
)
os.remove(photo)
await propic.edit(PP_CHANGED)
except PhotoCropSizeSmallError:
await propic.edit(PP_TOO_SMOL)
except ImageProcessFailedError:
await propic.edit(PP_ERROR)
except PhotoExtInvalidError:
await propic.edit(INVALID_MEDIA)
@register(outgoing=True, pattern=r"^\.setbio (.*)")
async def set_biograph(setbio):
newbio = setbio.pattern_match.group(1)
await setbio.client(UpdateProfileRequest(about=newbio))
await setbio.edit(BIO_SUCCESS)
@register(outgoing=True, pattern=r"^\.username (.*)")
async def update_username(username):
newusername = username.pattern_match.group(1)
try:
await username.client(UpdateUsernameRequest(newusername))
await username.edit(USERNAME_SUCCESS)
except UsernameOccupiedError:
await username.edit(USERNAME_TAKEN)
@register(outgoing=True, pattern=r"^\.count$")
async def count(event):
u = 0
g = 0
c = 0
bc = 0
b = 0
result = ""
await event.edit("`Processing..`")
dialogs = await bot.get_dialogs(limit=None, ignore_migrated=True)
for d in dialogs:
currrent_entity = d.entity
if isinstance(currrent_entity, User):
if currrent_entity.bot:
b += 1
else:
u += 1
elif isinstance(currrent_entity, Chat):
g += 1
elif isinstance(currrent_entity, Channel):
if currrent_entity.broadcast:
bc += 1
else:
c += 1
else:
print(d)
result += f"`Users:`\t**{u}**\n"
result += f"`Groups:`\t**{g}**\n"
result += f"`Super Groups:`\t**{c}**\n"
result += f"`Channels:`\t**{bc}**\n"
result += f"`Bots:`\t**{b}**"
await event.edit(result)
@register(outgoing=True, pattern=r"^\.delpfp")
async def remove_profilepic(delpfp):
group = delpfp.text[8:]
if group == "all":
lim = 0
elif group.isdigit():
lim = int(group)
else:
lim = 1
pfplist = await delpfp.client(
GetUserPhotosRequest(user_id=delpfp.sender_id, offset=0, max_id=0, limit=lim)
)
input_photos = []
for sep in pfplist.photos:
input_photos.append(
InputPhoto(
id=sep.id,
access_hash=sep.access_hash,
file_reference=sep.file_reference,
)
)
await delpfp.client(DeletePhotosRequest(id=input_photos))
await delpfp.edit(f"`Successfully deleted {len(input_photos)} profile picture(s).`")
CMD_HELP.update(
{
"profile": ">`.username <new_username>`"
"\nUsage: Changes your Telegram username."
"\n\n>`.name <firstname>` or >`.name <firstname> <lastname>`"
"\nUsage: Changes your Telegram name.(First and last name will get split by the first space)"
"\n\n>`.setpfp`"
"\nUsage: Reply with .setpfp to an image to change your Telegram profie picture."
"\n\n>`.setbio <new_bio>`"
"\nUsage: Changes your Telegram bio."
"\n\n>`.delpfp` or >`.delpfp <number>/<all>`"
"\nUsage: Deletes your Telegram profile picture(s)."
"\n\n>`.reserved`"
"\nUsage: Shows usernames reserved by you."
"\n\n>`.count`"
"\nUsage: Counts your groups, chats, bots etc..."
}
)
| true
| true
|
790798a8add54f92b26413127250214a1881274b
| 697
|
py
|
Python
|
DDPG/test_ddpg_puckWorld.py
|
WoShiDongZhiWu/Reinforcement-learning-Algorithm
|
59fdf29e7feb73048b9ddf3b4755b55f0459efcb
|
[
"Apache-2.0"
] | 1
|
2019-12-23T02:59:13.000Z
|
2019-12-23T02:59:13.000Z
|
DDPG/test_ddpg_puckWorld.py
|
WoShiDongZhiWu/reinforcement-learning-algorithm
|
59fdf29e7feb73048b9ddf3b4755b55f0459efcb
|
[
"Apache-2.0"
] | null | null | null |
DDPG/test_ddpg_puckWorld.py
|
WoShiDongZhiWu/reinforcement-learning-algorithm
|
59fdf29e7feb73048b9ddf3b4755b55f0459efcb
|
[
"Apache-2.0"
] | null | null | null |
'''
####################################################################
# author wudong
# date 20190816
# 在连续的puckworld空间中测试DDPG
# 状态空间和行为空间连续
# 状态空间:x,y
# 行为空间:水平和竖直方向上的力的大小[-1,1]
# ps 不知道是计算机的原因还是算法的原因,训练不动
######################################################################
'''
import gym
from puckworld_continuous import PuckWorldEnv
from ddpg_agent import DDPGAgent
from utils import learning_curve
import numpy as np
# 建立env和DDPG agent
env = PuckWorldEnv()
agent = DDPGAgent(env)
# 训练并保存模型
data = agent.learning(max_episode_num=200,display=True,explore=True)
# # 加载训练好的模型,观察angent的表现
# agent.load_models(300)
# data = agent.learning(max_episode_num=100,display=True,explore = False)
| 24.892857
| 73
| 0.625538
|
import gym
from puckworld_continuous import PuckWorldEnv
from ddpg_agent import DDPGAgent
from utils import learning_curve
import numpy as np
env = PuckWorldEnv()
agent = DDPGAgent(env)
data = agent.learning(max_episode_num=200,display=True,explore=True)
| true
| true
|
790798b566f115e99c28b9b7abde16a2d2fc73e5
| 1,325
|
py
|
Python
|
quarantineworkout/workout/schema.py
|
adeoke/django-quarantine-workout-graphql
|
7d53bb17f8ee9e5276b496d00ff92c4b458af31f
|
[
"MIT"
] | 1
|
2020-06-01T11:41:52.000Z
|
2020-06-01T11:41:52.000Z
|
quarantineworkout/workout/schema.py
|
adeoke/django-quarantine-workout-graphql
|
7d53bb17f8ee9e5276b496d00ff92c4b458af31f
|
[
"MIT"
] | 5
|
2020-06-06T15:14:21.000Z
|
2021-06-10T19:25:55.000Z
|
quarantineworkout/workout/schema.py
|
adeoke/django-quarantine-workout-graphql
|
7d53bb17f8ee9e5276b496d00ff92c4b458af31f
|
[
"MIT"
] | 1
|
2022-01-19T22:17:44.000Z
|
2022-01-19T22:17:44.000Z
|
"""Workout schema module"""
import graphene
from exercises.schema import ExerciseType
from exercises.models import Exercise
class Query(graphene.ObjectType):
"""Workout query class"""
workout = graphene.List(ExerciseType,
body_part=graphene.String(),
exercise_name=graphene.String(),
equipment=graphene.String(),
level=graphene.String())
def resolve_workout(self, info, **kwargs):
"""query resolver for workout property"""
all_exercises = Exercise.objects.all()
if kwargs.get('body_part'):
all_exercises = all_exercises.select_related('body_part').filter(
body_part__name=kwargs.get('body_part').lower())
if kwargs.get('level'):
all_exercises = all_exercises.select_related('level').filter(
level__difficulty=kwargs.get('level').lower())
if kwargs.get('exercise_name'):
all_exercises = all_exercises.filter(
name__icontains=kwargs.get('exercise_name').lower())
if kwargs.get('equipment'):
all_exercises = all_exercises.select_related('equipment').filter(
equipment__name=kwargs.get('equipment').lower())
return all_exercises
| 36.805556
| 77
| 0.612075
|
import graphene
from exercises.schema import ExerciseType
from exercises.models import Exercise
class Query(graphene.ObjectType):
workout = graphene.List(ExerciseType,
body_part=graphene.String(),
exercise_name=graphene.String(),
equipment=graphene.String(),
level=graphene.String())
def resolve_workout(self, info, **kwargs):
all_exercises = Exercise.objects.all()
if kwargs.get('body_part'):
all_exercises = all_exercises.select_related('body_part').filter(
body_part__name=kwargs.get('body_part').lower())
if kwargs.get('level'):
all_exercises = all_exercises.select_related('level').filter(
level__difficulty=kwargs.get('level').lower())
if kwargs.get('exercise_name'):
all_exercises = all_exercises.filter(
name__icontains=kwargs.get('exercise_name').lower())
if kwargs.get('equipment'):
all_exercises = all_exercises.select_related('equipment').filter(
equipment__name=kwargs.get('equipment').lower())
return all_exercises
| true
| true
|
790798beeadcf685cc4291097796e2d302a38fec
| 239
|
py
|
Python
|
revise/libs/python/pyste/src/Pyste/__init__.py
|
DD-L/deel.boost.python
|
e32cd62022bbf7c5822d150150330d988e041f02
|
[
"MIT"
] | 198
|
2015-01-13T05:47:18.000Z
|
2022-03-09T04:46:46.000Z
|
thirdparty/boost-python/libs/python/pyste/src/Pyste/__init__.py
|
alexa-infra/negine
|
d9060a7c83a41c95c361c470b56c2ddab3ba04de
|
[
"MIT"
] | 9
|
2015-01-28T16:33:19.000Z
|
2020-04-12T23:03:28.000Z
|
thirdparty/boost-python/libs/python/pyste/src/Pyste/__init__.py
|
alexa-infra/negine
|
d9060a7c83a41c95c361c470b56c2ddab3ba04de
|
[
"MIT"
] | 139
|
2015-01-15T20:09:31.000Z
|
2022-01-31T15:21:16.000Z
|
# Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
| 34.142857
| 70
| 0.74477
| true
| true
|
|
790799489399fbdd8e34504a305e5faaddb7732c
| 339
|
py
|
Python
|
illumio/vulnerabilities/vulnerability.py
|
dsommerville-illumio/illumio-py
|
30e9ee4237b142a62579839ed8a21f2eb35c8b09
|
[
"Apache-2.0"
] | 1
|
2022-01-18T04:55:16.000Z
|
2022-01-18T04:55:16.000Z
|
illumio/vulnerabilities/vulnerability.py
|
dsommerville-illumio/illumio-py
|
30e9ee4237b142a62579839ed8a21f2eb35c8b09
|
[
"Apache-2.0"
] | null | null | null |
illumio/vulnerabilities/vulnerability.py
|
dsommerville-illumio/illumio-py
|
30e9ee4237b142a62579839ed8a21f2eb35c8b09
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""This module is a stub for classes related to vulnerability exposure scores.
Copyright:
(c) 2022 Illumio
License:
Apache2, see LICENSE for more details.
"""
from dataclasses import dataclass
from illumio.util import MutableObject
@dataclass
class Vulnerability(MutableObject):
score: int = None
| 17.842105
| 78
| 0.731563
|
from dataclasses import dataclass
from illumio.util import MutableObject
@dataclass
class Vulnerability(MutableObject):
score: int = None
| true
| true
|
79079993122d1758e73e04158a76617e6210ab69
| 5,798
|
py
|
Python
|
diffxpy/unit_test/test_pairwise.py
|
grst/diffxpy
|
8b9ad605cb11d05b58b3e3f4b2c8255c6e98b80c
|
[
"BSD-3-Clause"
] | null | null | null |
diffxpy/unit_test/test_pairwise.py
|
grst/diffxpy
|
8b9ad605cb11d05b58b3e3f4b2c8255c6e98b80c
|
[
"BSD-3-Clause"
] | null | null | null |
diffxpy/unit_test/test_pairwise.py
|
grst/diffxpy
|
8b9ad605cb11d05b58b3e3f4b2c8255c6e98b80c
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import unittest
import numpy as np
import pandas as pd
import scipy.stats as stats
import diffxpy.api as de
class _TestPairwiseNull:
noise_model: str
def _prepate_data(
self,
n_cells: int,
n_genes: int,
n_groups: int
):
if self.noise_model == "nb":
from batchglm.api.models.glm_nb import Simulator
rand_fn_loc = lambda shape: np.random.uniform(0.1, 1, shape)
rand_fn_scale = lambda shape: np.random.uniform(0.5, 1, shape)
elif self.noise_model == "norm" or self.noise_model is None:
from batchglm.api.models.glm_norm import Simulator
rand_fn_loc = lambda shape: np.random.uniform(500, 1000, shape)
rand_fn_scale = lambda shape: np.random.uniform(1, 2, shape)
else:
raise ValueError("noise model %s not recognized" % self.noise_model)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate_params(
rand_fn_loc=rand_fn_loc,
rand_fn_scale=rand_fn_scale
)
sim.generate_data()
random_sample_description = pd.DataFrame({
"condition": np.random.randint(n_groups, size=sim.nobs)
})
return sim, random_sample_description
def _test_null_distribution_basic(
self,
test: str,
lazy: bool,
quick_scale: bool = False,
n_cells: int = 3000,
n_genes: int = 200,
n_groups: int = 3
):
"""
Test if de.wald() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distriubution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
sim, sample_description = self._prepate_data(
n_cells=n_cells,
n_genes=n_genes,
n_groups=n_groups
)
test = de.test.pairwise(
data=sim.input_data,
sample_description=sample_description,
grouping="condition",
test=test,
lazy=lazy,
quick_scale=quick_scale,
noise_model=self.noise_model
)
_ = test.summary()
# Compare p-value distribution under null model against uniform distribution.
if lazy:
pval_h0 = stats.kstest(test.pval_pairs(groups0=0, groups1=1).flatten(), 'uniform').pvalue
else:
pval_h0 = stats.kstest(test.pval[0, 1, :].flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0=%f is <= 0.05!" % np.round(pval_h0, 5)
return True
class TestPairwiseNullStandard(unittest.TestCase, _TestPairwiseNull):
def test_null_distribution_ttest(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = None
self._test_null_distribution_basic(test="t-test", lazy=False)
def test_null_distribution_rank(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = None
self._test_null_distribution_basic(test="rank", lazy=False)
class TestPairwiseNullNb(unittest.TestCase, _TestPairwiseNull):
def test_null_distribution_ztest(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="z-test", lazy=False, quick_scale=False)
self._test_null_distribution_basic(test="z-test", lazy=False, quick_scale=True)
def test_null_distribution_ztest_lazy(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="z-test", lazy=True, quick_scale=False)
self._test_null_distribution_basic(test="z-test", lazy=True, quick_scale=True)
def test_null_distribution_wald(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="wald", lazy=False, quick_scale=False)
self._test_null_distribution_basic(test="wald", lazy=False, quick_scale=True)
def test_null_distribution_lrt(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="lrt", lazy=False, quick_scale=False)
if __name__ == '__main__':
unittest.main()
| 37.649351
| 104
| 0.657641
|
import logging
import unittest
import numpy as np
import pandas as pd
import scipy.stats as stats
import diffxpy.api as de
class _TestPairwiseNull:
noise_model: str
def _prepate_data(
self,
n_cells: int,
n_genes: int,
n_groups: int
):
if self.noise_model == "nb":
from batchglm.api.models.glm_nb import Simulator
rand_fn_loc = lambda shape: np.random.uniform(0.1, 1, shape)
rand_fn_scale = lambda shape: np.random.uniform(0.5, 1, shape)
elif self.noise_model == "norm" or self.noise_model is None:
from batchglm.api.models.glm_norm import Simulator
rand_fn_loc = lambda shape: np.random.uniform(500, 1000, shape)
rand_fn_scale = lambda shape: np.random.uniform(1, 2, shape)
else:
raise ValueError("noise model %s not recognized" % self.noise_model)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate_params(
rand_fn_loc=rand_fn_loc,
rand_fn_scale=rand_fn_scale
)
sim.generate_data()
random_sample_description = pd.DataFrame({
"condition": np.random.randint(n_groups, size=sim.nobs)
})
return sim, random_sample_description
def _test_null_distribution_basic(
self,
test: str,
lazy: bool,
quick_scale: bool = False,
n_cells: int = 3000,
n_genes: int = 200,
n_groups: int = 3
):
sim, sample_description = self._prepate_data(
n_cells=n_cells,
n_genes=n_genes,
n_groups=n_groups
)
test = de.test.pairwise(
data=sim.input_data,
sample_description=sample_description,
grouping="condition",
test=test,
lazy=lazy,
quick_scale=quick_scale,
noise_model=self.noise_model
)
_ = test.summary()
if lazy:
pval_h0 = stats.kstest(test.pval_pairs(groups0=0, groups1=1).flatten(), 'uniform').pvalue
else:
pval_h0 = stats.kstest(test.pval[0, 1, :].flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0=%f is <= 0.05!" % np.round(pval_h0, 5)
return True
class TestPairwiseNullStandard(unittest.TestCase, _TestPairwiseNull):
def test_null_distribution_ttest(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = None
self._test_null_distribution_basic(test="t-test", lazy=False)
def test_null_distribution_rank(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = None
self._test_null_distribution_basic(test="rank", lazy=False)
class TestPairwiseNullNb(unittest.TestCase, _TestPairwiseNull):
def test_null_distribution_ztest(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="z-test", lazy=False, quick_scale=False)
self._test_null_distribution_basic(test="z-test", lazy=False, quick_scale=True)
def test_null_distribution_ztest_lazy(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="z-test", lazy=True, quick_scale=False)
self._test_null_distribution_basic(test="z-test", lazy=True, quick_scale=True)
def test_null_distribution_wald(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="wald", lazy=False, quick_scale=False)
self._test_null_distribution_basic(test="wald", lazy=False, quick_scale=True)
def test_null_distribution_lrt(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="lrt", lazy=False, quick_scale=False)
if __name__ == '__main__':
unittest.main()
| true
| true
|
79079a504f225a6dad9dbbe420213c27590aff1c
| 5,654
|
py
|
Python
|
src/pm/mpd/test/test1.py
|
raffenet/mpich-CVS
|
2d33e2742e8c00db4f56a373fea051cc6c0ee0d0
|
[
"mpich2"
] | 1
|
2021-11-11T15:42:30.000Z
|
2021-11-11T15:42:30.000Z
|
src/pm/mpd/test/test1.py
|
grondo/mvapich2-cce
|
ec084d8e07db1cf2ac1352ee4c604ae7dbae55cb
|
[
"Intel",
"mpich2",
"Unlicense"
] | null | null | null |
src/pm/mpd/test/test1.py
|
grondo/mvapich2-cce
|
ec084d8e07db1cf2ac1352ee4c604ae7dbae55cb
|
[
"Intel",
"mpich2",
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
#
# (C) 2001 by Argonne National Laboratory.
# See COPYRIGHT in top-level directory.
#
# Note that I repeat code for each test just in case I want to
# run one separately. I can simply copy it out of here and run it.
# A single test can typically be chgd simply by altering its value(s)
# for one or more of:
# PYEXT, NMPDS, HFILE
import os, sys, commands, time
sys.path += [os.getcwd()] # do this once
print "mpd tests---------------------------------------------------"
clusterHosts = [ 'bp4%02d' % (i) for i in range(0,8) ]
print "clusterHosts=", clusterHosts
# test: simple with 1 mpd (mpdboot uses mpd's -e and -d options)
print "TEST -e -d"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\nhello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 3 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test: simple with 2 mpds on same machine (mpdboot uses mpd's -n option)
print "TEST -n"
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for i in range(NMPDS): print >>temph, '%s' % (socket.gethostname())
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 2 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test: simple with 3 mpds on 3 machines
print "TEST simple hello msg on 3 nodes"
PYEXT = '.py'
NMPDS = 3
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\nhello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 3 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test: simple 2 mpds on local machine (-l, -h, and -p option)
print "TEST -l, -h, and -p"
PYEXT = '.py'
NMPDS = 3
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpd%s -d -l 12345" % (PYEXT) )
os.system("mpd%s -d -n -h %s -p 12345" % (PYEXT,socket.gethostname()) )
expout = 'hello\nhello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 3 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test: simple with 2 mpds on 2 machines (--ncpus option)
print "TEST --ncpus"
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, "%s:2" % (host)
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -f %s -n %d --ncpus=2" % (PYEXT,HFILE,NMPDS) )
myHost = socket.gethostname()
expout = '0: %s\n1: %s\n2: %s\n3: %s\n' % (myHost,myHost,clusterHosts[0],clusterHosts[0])
mpdtest.run(cmd="mpiexec%s -l -n 4 /bin/hostname" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test: simple with 2 mpds on 2 machines (--ifhn option)
# this is not a great test, but shows working with real ifhn, then failure with 127.0.0.1
print "TEST minimal use of --ifhn"
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for host in clusterHosts:
hostinfo = socket.gethostbyname_ex(host)
IP = hostinfo[2][0]
print >>temph, '%s ifhn=%s' % (host,IP)
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
hostinfo = socket.gethostbyname_ex(socket.gethostname())
IP = hostinfo[2][0]
os.system("mpdboot%s -f %s -n %d --ifhn=%s" % (PYEXT,HFILE,NMPDS,IP) )
expout = 'hello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 2 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
## redo the above test with a local ifhn that should cause failure
lines = commands.getoutput("mpdboot%s -f %s -n %d --ifhn=127.0.0.1" % (PYEXT,HFILE,NMPDS) )
if len(lines) > 0:
if lines.find('failed to ping') < 0:
print "probable error in ifhn test using 127.0.0.1; printing lines of output next:"
print lines
sys.exit(-1)
# test:
print "TEST MPD_CON_INET_HOST_PORT"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.environ['MPD_CON_INET_HOST_PORT'] = 'localhost:4444'
os.system("mpd.py &")
time.sleep(1) ## time to get going
expout = ['0: hello']
rv = mpdtest.run(cmd="mpiexec%s -l -n 1 echo hello" % (PYEXT), expOut=expout,grepOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
| 35.559748
| 91
| 0.666785
|
import os, sys, commands, time
sys.path += [os.getcwd()]
print "mpd tests---------------------------------------------------"
clusterHosts = [ 'bp4%02d' % (i) for i in range(0,8) ]
print "clusterHosts=", clusterHosts
print "TEST -e -d"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\nhello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 3 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
# test: simple with 2 mpds on same machine (mpdboot uses mpd's -n option)
print "TEST -n"
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for i in range(NMPDS): print >>temph, '%s' % (socket.gethostname())
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 2 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
print "TEST simple hello msg on 3 nodes"
PYEXT = '.py'
NMPDS = 3
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -1 -f %s -n %d" % (PYEXT,HFILE,NMPDS) )
expout = 'hello\nhello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 3 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
print "TEST -l, -h, and -p"
PYEXT = '.py'
NMPDS = 3
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpd%s -d -l 12345" % (PYEXT) )
os.system("mpd%s -d -n -h %s -p 12345" % (PYEXT,socket.gethostname()) )
expout = 'hello\nhello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 3 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
print "TEST --ncpus"
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, "%s:2" % (host)
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
os.system("mpdboot%s -f %s -n %d --ncpus=2" % (PYEXT,HFILE,NMPDS) )
myHost = socket.gethostname()
expout = '0: %s\n1: %s\n2: %s\n3: %s\n' % (myHost,myHost,clusterHosts[0],clusterHosts[0])
mpdtest.run(cmd="mpiexec%s -l -n 4 /bin/hostname" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
print "TEST minimal use of --ifhn"
PYEXT = '.py'
NMPDS = 2
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
temph = open(HFILE,'w')
for host in clusterHosts:
hostinfo = socket.gethostbyname_ex(host)
IP = hostinfo[2][0]
print >>temph, '%s ifhn=%s' % (host,IP)
temph.close()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
hostinfo = socket.gethostbyname_ex(socket.gethostname())
IP = hostinfo[2][0]
os.system("mpdboot%s -f %s -n %d --ifhn=%s" % (PYEXT,HFILE,NMPDS,IP) )
expout = 'hello\nhello\n'
mpdtest.run(cmd="mpiexec%s -n 2 /bin/echo hello" % (PYEXT), chkOut=1, expOut=expout )
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
.1" % (PYEXT,HFILE,NMPDS) )
if len(lines) > 0:
if lines.find('failed to ping') < 0:
print "probable error in ifhn test using 127.0.0.1; printing lines of output next:"
print lines
sys.exit(-1)
print "TEST MPD_CON_INET_HOST_PORT"
PYEXT = '.py'
NMPDS = 1
HFILE = 'temph'
import os,socket
from mpdlib import MPDTest
mpdtest = MPDTest()
os.environ['MPD_CON_EXT'] = 'testing'
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
temph = open(HFILE,'w')
for host in clusterHosts: print >>temph, host
temph.close()
os.environ['MPD_CON_INET_HOST_PORT'] = 'localhost:4444'
os.system("mpd.py &")
time.sleep(1) rv = mpdtest.run(cmd="mpiexec%s -l -n 1 echo hello" % (PYEXT), expOut=expout,grepOut=1)
os.system("mpdallexit%s 1> /dev/null 2> /dev/null" % (PYEXT) )
| false
| true
|
79079a52fc6caccb1e1a414f7a5e105c8b09afe0
| 5,248
|
py
|
Python
|
planner/regressor/models.py
|
aljubrmj/CS342-Final-Project
|
841bab59ca1311faa550c5fce9327a1e65ff5501
|
[
"MIT"
] | null | null | null |
planner/regressor/models.py
|
aljubrmj/CS342-Final-Project
|
841bab59ca1311faa550c5fce9327a1e65ff5501
|
[
"MIT"
] | null | null | null |
planner/regressor/models.py
|
aljubrmj/CS342-Final-Project
|
841bab59ca1311faa550c5fce9327a1e65ff5501
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
def spatial_argmax(logit):
weights = F.softmax(logit.view(logit.size(0), -1), dim=-1).view_as(logit)
return torch.stack(((weights.sum(1) * torch.linspace(-1, 1, logit.size(2)).to(logit.device)[None]).sum(1),
(weights.sum(2) * torch.linspace(-1, 1, logit.size(1)).to(logit.device)[None]).sum(1)), 1)
class CNNClassifier(torch.nn.Module):
class Block(torch.nn.Module):
def __init__(self, n_input, n_output, kernel_size=3, stride=2):
super().__init__()
self.c1 = torch.nn.Conv2d(n_input, n_output, kernel_size=kernel_size, padding=kernel_size // 2,
stride=stride, bias=False)
self.c2 = torch.nn.Conv2d(n_output, n_output, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)
self.c3 = torch.nn.Conv2d(n_output, n_output, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)
self.b1 = torch.nn.BatchNorm2d(n_output)
self.b2 = torch.nn.BatchNorm2d(n_output)
self.b3 = torch.nn.BatchNorm2d(n_output)
self.skip = torch.nn.Conv2d(n_input, n_output, kernel_size=1, stride=stride)
def forward(self, x):
return F.relu(self.b3(self.c3(F.relu(self.b2(self.c2(F.relu(self.b1(self.c1(x)))))))) + self.skip(x))
def __init__(self, layers=[16, 32, 32, 32], n_output_channels=2, kernel_size=3):
super().__init__()
L = []
c = 3
for l in layers:
L.append(self.Block(c, l, kernel_size, 2))
c = l
self.network = torch.nn.Sequential(*L)
self.classifier = torch.nn.Linear(c, n_output_channels)
def forward(self, x):
z = self.network(x)
return self.classifier(z.mean(dim=[2, 3]))
class Planner_reg(torch.nn.Module):
def __init__(self, channels=[16, 32, 32, 32]):
super().__init__()
conv_block = lambda c, h: [torch.nn.BatchNorm2d(h), torch.nn.Conv2d(h, c, 5, 2, 2), torch.nn.ReLU(True)]
h, _conv = 3, []
for c in channels:
_conv += conv_block(c, h)
h = c
self._conv = torch.nn.Sequential(*_conv, torch.nn.Conv2d(h, 1, 1))
# self.classifier = torch.nn.Linear(h, 2)
# self.classifier = torch.nn.Conv2d(h, 1, 1)
def forward(self, img):
"""
Your code here
Predict the aim point in image coordinate, given the supertuxkart image
@img: (B,3,96,128)
return (B,2)
"""
x = self._conv(img)
return spatial_argmax(x[:, 0])
class FCN(torch.nn.Module):
class UpBlock(torch.nn.Module):
def __init__(self, n_input, n_output, kernel_size=3, stride=2):
super().__init__()
self.c1 = torch.nn.ConvTranspose2d(n_input, n_output, kernel_size=kernel_size, padding=kernel_size // 2,
stride=stride, output_padding=1)
def forward(self, x):
return F.relu(self.c1(x))
def __init__(self, layers=[16, 32, 64, 128], n_output_channels=5, kernel_size=3, use_skip=True):
super().__init__()
self.input_mean = torch.Tensor([0.3521554, 0.30068502, 0.28527516])
self.input_std = torch.Tensor([0.18182722, 0.18656468, 0.15938024])
c = 3
self.use_skip = use_skip
self.n_conv = len(layers)
skip_layer_size = [3] + layers[:-1]
for i, l in enumerate(layers):
self.add_module('conv%d' % i, CNNClassifier.Block(c, l, kernel_size, 2))
c = l
for i, l in list(enumerate(layers))[::-1]:
self.add_module('upconv%d' % i, self.UpBlock(c, l, kernel_size, 2))
c = l
if self.use_skip:
c += skip_layer_size[i]
self.classifier = torch.nn.Conv2d(c, n_output_channels, 1)
def forward(self, x):
z = (x - self.input_mean[None, :, None, None].to(x.device)) / self.input_std[None, :, None, None].to(x.device)
up_activation = []
for i in range(self.n_conv):
# Add all the information required for skip connections
up_activation.append(z)
z = self._modules['conv%d'%i](z)
for i in reversed(range(self.n_conv)):
z = self._modules['upconv%d'%i](z)
# Fix the padding
z = z[:, :, :up_activation[i].size(2), :up_activation[i].size(3)]
# Add the skip connection
if self.use_skip:
z = torch.cat([z, up_activation[i]], dim=1)
return self.classifier(z)
model_factory = {
'cnn': CNNClassifier,
'fcn': FCN,
'planner_reg':Planner_reg
}
def save_model(model):
from torch import save
from os import path
for n, m in model_factory.items():
if isinstance(model, m):
return save(model.state_dict(), path.join(path.dirname(path.abspath(__file__)), '%s.th' % n))
raise ValueError("model type '%s' not supported!" % str(type(model)))
def load_model(model):
from torch import load
from os import path
r = model_factory[model]()
r.load_state_dict(load(path.join(path.dirname(path.abspath(__file__)), '%s.th' % model), map_location='cpu'))
return r
| 38.874074
| 120
| 0.59013
|
import torch
import torch.nn.functional as F
def spatial_argmax(logit):
weights = F.softmax(logit.view(logit.size(0), -1), dim=-1).view_as(logit)
return torch.stack(((weights.sum(1) * torch.linspace(-1, 1, logit.size(2)).to(logit.device)[None]).sum(1),
(weights.sum(2) * torch.linspace(-1, 1, logit.size(1)).to(logit.device)[None]).sum(1)), 1)
class CNNClassifier(torch.nn.Module):
class Block(torch.nn.Module):
def __init__(self, n_input, n_output, kernel_size=3, stride=2):
super().__init__()
self.c1 = torch.nn.Conv2d(n_input, n_output, kernel_size=kernel_size, padding=kernel_size // 2,
stride=stride, bias=False)
self.c2 = torch.nn.Conv2d(n_output, n_output, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)
self.c3 = torch.nn.Conv2d(n_output, n_output, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)
self.b1 = torch.nn.BatchNorm2d(n_output)
self.b2 = torch.nn.BatchNorm2d(n_output)
self.b3 = torch.nn.BatchNorm2d(n_output)
self.skip = torch.nn.Conv2d(n_input, n_output, kernel_size=1, stride=stride)
def forward(self, x):
return F.relu(self.b3(self.c3(F.relu(self.b2(self.c2(F.relu(self.b1(self.c1(x)))))))) + self.skip(x))
def __init__(self, layers=[16, 32, 32, 32], n_output_channels=2, kernel_size=3):
super().__init__()
L = []
c = 3
for l in layers:
L.append(self.Block(c, l, kernel_size, 2))
c = l
self.network = torch.nn.Sequential(*L)
self.classifier = torch.nn.Linear(c, n_output_channels)
def forward(self, x):
z = self.network(x)
return self.classifier(z.mean(dim=[2, 3]))
class Planner_reg(torch.nn.Module):
def __init__(self, channels=[16, 32, 32, 32]):
super().__init__()
conv_block = lambda c, h: [torch.nn.BatchNorm2d(h), torch.nn.Conv2d(h, c, 5, 2, 2), torch.nn.ReLU(True)]
h, _conv = 3, []
for c in channels:
_conv += conv_block(c, h)
h = c
self._conv = torch.nn.Sequential(*_conv, torch.nn.Conv2d(h, 1, 1))
def forward(self, img):
x = self._conv(img)
return spatial_argmax(x[:, 0])
class FCN(torch.nn.Module):
class UpBlock(torch.nn.Module):
def __init__(self, n_input, n_output, kernel_size=3, stride=2):
super().__init__()
self.c1 = torch.nn.ConvTranspose2d(n_input, n_output, kernel_size=kernel_size, padding=kernel_size // 2,
stride=stride, output_padding=1)
def forward(self, x):
return F.relu(self.c1(x))
def __init__(self, layers=[16, 32, 64, 128], n_output_channels=5, kernel_size=3, use_skip=True):
super().__init__()
self.input_mean = torch.Tensor([0.3521554, 0.30068502, 0.28527516])
self.input_std = torch.Tensor([0.18182722, 0.18656468, 0.15938024])
c = 3
self.use_skip = use_skip
self.n_conv = len(layers)
skip_layer_size = [3] + layers[:-1]
for i, l in enumerate(layers):
self.add_module('conv%d' % i, CNNClassifier.Block(c, l, kernel_size, 2))
c = l
for i, l in list(enumerate(layers))[::-1]:
self.add_module('upconv%d' % i, self.UpBlock(c, l, kernel_size, 2))
c = l
if self.use_skip:
c += skip_layer_size[i]
self.classifier = torch.nn.Conv2d(c, n_output_channels, 1)
def forward(self, x):
z = (x - self.input_mean[None, :, None, None].to(x.device)) / self.input_std[None, :, None, None].to(x.device)
up_activation = []
for i in range(self.n_conv):
up_activation.append(z)
z = self._modules['conv%d'%i](z)
for i in reversed(range(self.n_conv)):
z = self._modules['upconv%d'%i](z)
z = z[:, :, :up_activation[i].size(2), :up_activation[i].size(3)]
if self.use_skip:
z = torch.cat([z, up_activation[i]], dim=1)
return self.classifier(z)
model_factory = {
'cnn': CNNClassifier,
'fcn': FCN,
'planner_reg':Planner_reg
}
def save_model(model):
from torch import save
from os import path
for n, m in model_factory.items():
if isinstance(model, m):
return save(model.state_dict(), path.join(path.dirname(path.abspath(__file__)), '%s.th' % n))
raise ValueError("model type '%s' not supported!" % str(type(model)))
def load_model(model):
from torch import load
from os import path
r = model_factory[model]()
r.load_state_dict(load(path.join(path.dirname(path.abspath(__file__)), '%s.th' % model), map_location='cpu'))
return r
| true
| true
|
79079a67b0693a62f82930e2f2ea574ff8a1de19
| 2,769
|
py
|
Python
|
satchmo/apps/satchmo_store/shop/templatetags/satchmo_adminapplist.py
|
funwhilelost/satchmo
|
589a5d797533ea15dfde9af7f36e304092d22a94
|
[
"BSD-3-Clause"
] | 16
|
2015-03-06T14:42:27.000Z
|
2019-12-23T21:37:01.000Z
|
satchmo/apps/satchmo_store/shop/templatetags/satchmo_adminapplist.py
|
funwhilelost/satchmo
|
589a5d797533ea15dfde9af7f36e304092d22a94
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/apps/satchmo_store/shop/templatetags/satchmo_adminapplist.py
|
funwhilelost/satchmo
|
589a5d797533ea15dfde9af7f36e304092d22a94
|
[
"BSD-3-Clause"
] | 8
|
2015-01-28T16:02:37.000Z
|
2022-03-03T21:29:40.000Z
|
from django import template
from django.db import models
register = template.Library()
try:
''.rsplit
def rsplit(s, delim, maxsplit):
return s.rsplit(delim, maxsplit)
except AttributeError:
def rsplit(s, delim, maxsplit):
"""
Return a list of the words of the string s, scanning s
from the end. To all intents and purposes, the resulting
list of words is the same as returned by split(), except
when the optional third argument maxsplit is explicitly
specified and nonzero. When maxsplit is nonzero, at most
maxsplit number of splits - the rightmost ones - occur,
and the remainder of the string is returned as the first
element of the list (thus, the list will have at most
maxsplit+1 elements). New in version 2.4.
>>> rsplit('foo.bar.baz', '.', 0)
['foo.bar.baz']
>>> rsplit('foo.bar.baz', '.', 1)
['foo.bar', 'baz']
>>> rsplit('foo.bar.baz', '.', 2)
['foo', 'bar', 'baz']
>>> rsplit('foo.bar.baz', '.', 99)
['foo', 'bar', 'baz']
"""
assert maxsplit >= 0
if maxsplit == 0: return [s]
# the following lines perform the function, but inefficiently.
# This may be adequate for compatibility purposes
items = s.split(delim)
if maxsplit < len(items):
items[:-maxsplit] = [delim.join(items[:-maxsplit])]
return items
class FilterAdminApplistNode(template.Node):
def __init__(self, listname, varname):
self.listname = listname
self.varname = varname
def render(self, context):
all_apps = {}
for app in models.get_apps():
name = len(rsplit(app.__name__, '.', 0))>1 and rsplit(app.__name__, '.', 0)[-2] or app.__name__
all_apps[name] = app.__name__
filtered_app_list = []
for entry in context[self.listname]:
app = all_apps.get(entry['name'].lower(),'')
if not app.startswith('satchmo_'):
filtered_app_list.append(entry)
context[self.varname] = filtered_app_list
return ''
def filter_admin_app_list(parser, token):
"""Filters the list of installed apps returned by
django.contrib.admin.templatetags.adminapplist,
excluding apps installed by satchmo.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError, "'%s' tag requires two arguments" % tokens[0]
if tokens[2] != 'as':
raise template.TemplateSyntaxError, "Second argument to '%s' tag must be 'as'" % tokens[0]
return FilterAdminApplistNode(tokens[1], tokens[3])
register.tag('filter_admin_app_list', filter_admin_app_list)
| 37.931507
| 107
| 0.611051
|
from django import template
from django.db import models
register = template.Library()
try:
''.rsplit
def rsplit(s, delim, maxsplit):
return s.rsplit(delim, maxsplit)
except AttributeError:
def rsplit(s, delim, maxsplit):
"""
Return a list of the words of the string s, scanning s
from the end. To all intents and purposes, the resulting
list of words is the same as returned by split(), except
when the optional third argument maxsplit is explicitly
specified and nonzero. When maxsplit is nonzero, at most
maxsplit number of splits - the rightmost ones - occur,
and the remainder of the string is returned as the first
element of the list (thus, the list will have at most
maxsplit+1 elements). New in version 2.4.
>>> rsplit('foo.bar.baz', '.', 0)
['foo.bar.baz']
>>> rsplit('foo.bar.baz', '.', 1)
['foo.bar', 'baz']
>>> rsplit('foo.bar.baz', '.', 2)
['foo', 'bar', 'baz']
>>> rsplit('foo.bar.baz', '.', 99)
['foo', 'bar', 'baz']
"""
assert maxsplit >= 0
if maxsplit == 0: return [s]
items = s.split(delim)
if maxsplit < len(items):
items[:-maxsplit] = [delim.join(items[:-maxsplit])]
return items
class FilterAdminApplistNode(template.Node):
def __init__(self, listname, varname):
self.listname = listname
self.varname = varname
def render(self, context):
all_apps = {}
for app in models.get_apps():
name = len(rsplit(app.__name__, '.', 0))>1 and rsplit(app.__name__, '.', 0)[-2] or app.__name__
all_apps[name] = app.__name__
filtered_app_list = []
for entry in context[self.listname]:
app = all_apps.get(entry['name'].lower(),'')
if not app.startswith('satchmo_'):
filtered_app_list.append(entry)
context[self.varname] = filtered_app_list
return ''
def filter_admin_app_list(parser, token):
"""Filters the list of installed apps returned by
django.contrib.admin.templatetags.adminapplist,
excluding apps installed by satchmo.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError, "'%s' tag requires two arguments" % tokens[0]
if tokens[2] != 'as':
raise template.TemplateSyntaxError, "Second argument to '%s' tag must be 'as'" % tokens[0]
return FilterAdminApplistNode(tokens[1], tokens[3])
register.tag('filter_admin_app_list', filter_admin_app_list)
| false
| true
|
79079ab7abcc6b005780047d1580727377856806
| 25
|
py
|
Python
|
vyxal/__init__.py
|
kokonut27/Vyxal
|
2277d18f69dc5a4c04b2f0bd4d55c90cdf2faa48
|
[
"MIT"
] | null | null | null |
vyxal/__init__.py
|
kokonut27/Vyxal
|
2277d18f69dc5a4c04b2f0bd4d55c90cdf2faa48
|
[
"MIT"
] | null | null | null |
vyxal/__init__.py
|
kokonut27/Vyxal
|
2277d18f69dc5a4c04b2f0bd4d55c90cdf2faa48
|
[
"MIT"
] | null | null | null |
from .__main__ import *
| 12.5
| 24
| 0.72
|
from .__main__ import *
| true
| true
|
79079afb5049c4952a78491f534997124403c2b1
| 999
|
py
|
Python
|
sdk/communication/azure-communication-networktraversal/azure/communication/networktraversal/_generated/models/_communication_network_traversal_client_enums.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-09T08:59:13.000Z
|
2022-03-09T08:59:13.000Z
|
sdk/communication/azure-communication-networktraversal/azure/communication/networktraversal/_generated/models/_communication_network_traversal_client_enums.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/communication/azure-communication-networktraversal/azure/communication/networktraversal/_generated/models/_communication_network_traversal_client_enums.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-04T06:21:56.000Z
|
2022-03-04T06:21:56.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class RouteType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The routing methodology to where the ICE server will be located from the client. "any" will
have higher reliability while "nearest" will have lower latency. It is recommended to default
to use the "any" routing method unless there are specific scenarios which minimizing latency is
critical.
"""
ANY = "any"
NEAREST = "nearest"
| 43.434783
| 99
| 0.648649
|
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class RouteType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ANY = "any"
NEAREST = "nearest"
| true
| true
|
79079c5b218ef998585d306cd73632cfaf662f01
| 5,011
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/databoxedge/manual/custom.py
|
zackliu/azure-cli
|
680f8339ac010a89d4063566fabc5991abc8a4c2
|
[
"MIT"
] | 7
|
2020-04-26T09:54:05.000Z
|
2021-07-22T16:54:41.000Z
|
src/azure-cli/azure/cli/command_modules/databoxedge/manual/custom.py
|
zackliu/azure-cli
|
680f8339ac010a89d4063566fabc5991abc8a4c2
|
[
"MIT"
] | 2
|
2017-02-11T21:16:40.000Z
|
2017-02-11T21:30:54.000Z
|
src/azure-cli/azure/cli/command_modules/databoxedge/manual/custom.py
|
zackliu/azure-cli
|
680f8339ac010a89d4063566fabc5991abc8a4c2
|
[
"MIT"
] | 13
|
2020-06-30T16:23:36.000Z
|
2022-03-29T17:12:05.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=unused-argument
from azure.cli.core.util import sdk_no_wait
def databoxedge_device_create(client,
device_name,
resource_group_name,
location,
tags=None,
sku=None,
etag=None,
data_box_edge_device_status=None,
description=None,
model_description=None,
friendly_name=None,
no_wait=False):
data_box_edge_device = {}
data_box_edge_device['location'] = location
data_box_edge_device['tags'] = tags
data_box_edge_device['etag'] = etag
data_box_edge_device['data_box_edge_device_status'] = data_box_edge_device_status
data_box_edge_device['description'] = description
data_box_edge_device['model_description'] = model_description
data_box_edge_device['friendly_name'] = friendly_name
if sku:
data_box_edge_device['sku'] = {}
data_box_edge_device['sku']['name'] = sku
return sdk_no_wait(no_wait,
client.create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
data_box_edge_device=data_box_edge_device)
def databoxedge_device_update(client,
device_name,
resource_group_name,
tags=None):
if tags is None:
return client.get(device_name=device_name,
resource_group_name=resource_group_name)
parameters = {'tags': tags}
return client.update(device_name=device_name,
resource_group_name=resource_group_name,
parameters=parameters)
def databoxedge_bandwidth_schedule_update(instance,
device_name,
name,
resource_group_name,
start=None,
stop=None,
rate_in_mbps=None,
days=None,
no_wait=False):
if start is not None:
instance.start = start
if stop is not None:
instance.stop = stop
if rate_in_mbps is not None:
instance.rate_in_mbps = rate_in_mbps
if days is not None:
instance.days = days
return instance
def databoxedge_order_create(client,
device_name,
resource_group_name,
address_line1,
postal_code,
city,
state,
country,
contact_person,
company_name,
phone,
email_list,
status=None,
comments=None,
address_line2=None,
address_line3=None,
no_wait=False):
order = {}
if status:
order['current_status'] = {}
order['current_status']['status'] = status
order['current_status']['comments'] = comments
order['shipping_address'] = {}
order['shipping_address']['address_line1'] = address_line1
order['shipping_address']['address_line2'] = address_line2
order['shipping_address']['address_line3'] = address_line3
order['shipping_address']['postal_code'] = postal_code
order['shipping_address']['city'] = city
order['shipping_address']['state'] = state
order['shipping_address']['country'] = country
order['contact_information'] = {}
order['contact_information']['contact_person'] = contact_person
order['contact_information']['company_name'] = company_name
order['contact_information']['phone'] = phone
order['contact_information']['email_list'] = email_list
return sdk_no_wait(no_wait,
client.create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
order=order)
| 42.109244
| 85
| 0.50908
|
from azure.cli.core.util import sdk_no_wait
def databoxedge_device_create(client,
device_name,
resource_group_name,
location,
tags=None,
sku=None,
etag=None,
data_box_edge_device_status=None,
description=None,
model_description=None,
friendly_name=None,
no_wait=False):
data_box_edge_device = {}
data_box_edge_device['location'] = location
data_box_edge_device['tags'] = tags
data_box_edge_device['etag'] = etag
data_box_edge_device['data_box_edge_device_status'] = data_box_edge_device_status
data_box_edge_device['description'] = description
data_box_edge_device['model_description'] = model_description
data_box_edge_device['friendly_name'] = friendly_name
if sku:
data_box_edge_device['sku'] = {}
data_box_edge_device['sku']['name'] = sku
return sdk_no_wait(no_wait,
client.create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
data_box_edge_device=data_box_edge_device)
def databoxedge_device_update(client,
device_name,
resource_group_name,
tags=None):
if tags is None:
return client.get(device_name=device_name,
resource_group_name=resource_group_name)
parameters = {'tags': tags}
return client.update(device_name=device_name,
resource_group_name=resource_group_name,
parameters=parameters)
def databoxedge_bandwidth_schedule_update(instance,
device_name,
name,
resource_group_name,
start=None,
stop=None,
rate_in_mbps=None,
days=None,
no_wait=False):
if start is not None:
instance.start = start
if stop is not None:
instance.stop = stop
if rate_in_mbps is not None:
instance.rate_in_mbps = rate_in_mbps
if days is not None:
instance.days = days
return instance
def databoxedge_order_create(client,
device_name,
resource_group_name,
address_line1,
postal_code,
city,
state,
country,
contact_person,
company_name,
phone,
email_list,
status=None,
comments=None,
address_line2=None,
address_line3=None,
no_wait=False):
order = {}
if status:
order['current_status'] = {}
order['current_status']['status'] = status
order['current_status']['comments'] = comments
order['shipping_address'] = {}
order['shipping_address']['address_line1'] = address_line1
order['shipping_address']['address_line2'] = address_line2
order['shipping_address']['address_line3'] = address_line3
order['shipping_address']['postal_code'] = postal_code
order['shipping_address']['city'] = city
order['shipping_address']['state'] = state
order['shipping_address']['country'] = country
order['contact_information'] = {}
order['contact_information']['contact_person'] = contact_person
order['contact_information']['company_name'] = company_name
order['contact_information']['phone'] = phone
order['contact_information']['email_list'] = email_list
return sdk_no_wait(no_wait,
client.create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
order=order)
| true
| true
|
79079db03e54484a4981e88387ee2577eda2bd20
| 1,039
|
py
|
Python
|
tests/cmdexpr/ruler.py
|
RLToolsWorkshop/tunnel-arrow
|
f4e8575ed3a7a796cc6c3178165ebb2dd63f35aa
|
[
"Apache-2.0"
] | null | null | null |
tests/cmdexpr/ruler.py
|
RLToolsWorkshop/tunnel-arrow
|
f4e8575ed3a7a796cc6c3178165ebb2dd63f35aa
|
[
"Apache-2.0"
] | null | null | null |
tests/cmdexpr/ruler.py
|
RLToolsWorkshop/tunnel-arrow
|
f4e8575ed3a7a796cc6c3178165ebb2dd63f35aa
|
[
"Apache-2.0"
] | 2
|
2021-07-10T11:35:45.000Z
|
2021-07-14T21:34:10.000Z
|
from lark import Lark, Transformer, v_args
from lark.visitors import Interpreter, visit_children_decor
p = Lark.open("rules.lark", parser="lalr", rel_to=__file__)
code = """
// Firrst win in my book
b = 4;
a = b*2;
print a+1
x = 7;
p = [1, 2, 3, 4]
print p
"""
tree = p.parse(code)
@v_args(inline=True)
class MyEval(Transformer):
from operator import add, mul, neg, sub
from operator import truediv as div
number = float
def __init__(self, ns):
self.ns = ns
def var(self, name):
return self.ns[name]
# def num_list(self, value):
# print(value)
def eval_expr(tree, ns):
return MyEval(ns).transform(tree)
@v_args(inline=True)
class MyInterp(Interpreter):
def __init__(self):
self.namespace = {}
def assign(self, var, expr):
self.namespace[var] = eval_expr(expr, self.namespace)
def print_statement(self, expr):
# print(expr)
res = eval_expr(expr, self.namespace)
print(res)
print(tree.pretty())
# MyInterp().visit(tree)
| 18.553571
| 61
| 0.638114
|
from lark import Lark, Transformer, v_args
from lark.visitors import Interpreter, visit_children_decor
p = Lark.open("rules.lark", parser="lalr", rel_to=__file__)
code = """
// Firrst win in my book
b = 4;
a = b*2;
print a+1
x = 7;
p = [1, 2, 3, 4]
print p
"""
tree = p.parse(code)
@v_args(inline=True)
class MyEval(Transformer):
from operator import add, mul, neg, sub
from operator import truediv as div
number = float
def __init__(self, ns):
self.ns = ns
def var(self, name):
return self.ns[name]
def eval_expr(tree, ns):
return MyEval(ns).transform(tree)
@v_args(inline=True)
class MyInterp(Interpreter):
def __init__(self):
self.namespace = {}
def assign(self, var, expr):
self.namespace[var] = eval_expr(expr, self.namespace)
def print_statement(self, expr):
res = eval_expr(expr, self.namespace)
print(res)
print(tree.pretty())
| true
| true
|
79079e03168c0cf116ebbaac01d749cb566d0117
| 1,458
|
py
|
Python
|
xhtml2pdf/turbogears.py
|
trib3/xhtml2pdf
|
5211b7926ae3183176091f48fbd2e76e29c47095
|
[
"Apache-2.0"
] | null | null | null |
xhtml2pdf/turbogears.py
|
trib3/xhtml2pdf
|
5211b7926ae3183176091f48fbd2e76e29c47095
|
[
"Apache-2.0"
] | null | null | null |
xhtml2pdf/turbogears.py
|
trib3/xhtml2pdf
|
5211b7926ae3183176091f48fbd2e76e29c47095
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from turbogears.decorator import weak_signature_decorator
import xhtml2pdf.pisa as pisa
from six import StringIO
import cherrypy
def to_pdf(filename=None, content_type="application/pdf"):
def entangle(func):
def decorated(func, *args, **kw):
output = func(*args, **kw)
dst = StringIO.StringIO()
result = pisa.CreatePDF(
StringIO.StringIO(output),
dst
)
if not result.err:
cherrypy.response.headers["Content-Type"] = content_type
if filename:
cherrypy.response.headers["Content-Disposition"] = "attachment; filename=" + filename
output = dst.getvalue()
return output
return decorated
return weak_signature_decorator(entangle)
topdf = to_pdf
| 32.4
| 105
| 0.663237
|
from turbogears.decorator import weak_signature_decorator
import xhtml2pdf.pisa as pisa
from six import StringIO
import cherrypy
def to_pdf(filename=None, content_type="application/pdf"):
def entangle(func):
def decorated(func, *args, **kw):
output = func(*args, **kw)
dst = StringIO.StringIO()
result = pisa.CreatePDF(
StringIO.StringIO(output),
dst
)
if not result.err:
cherrypy.response.headers["Content-Type"] = content_type
if filename:
cherrypy.response.headers["Content-Disposition"] = "attachment; filename=" + filename
output = dst.getvalue()
return output
return decorated
return weak_signature_decorator(entangle)
topdf = to_pdf
| true
| true
|
79079e21ec728c3fde4a64fc02ca958ea7756300
| 4,188
|
py
|
Python
|
custom_scripts/hooks.py
|
VPS-Consultancy/custom_scripts
|
c812c8fa670c6e3c0e8d94d5ce22638b0daeb522
|
[
"MIT"
] | null | null | null |
custom_scripts/hooks.py
|
VPS-Consultancy/custom_scripts
|
c812c8fa670c6e3c0e8d94d5ce22638b0daeb522
|
[
"MIT"
] | null | null | null |
custom_scripts/hooks.py
|
VPS-Consultancy/custom_scripts
|
c812c8fa670c6e3c0e8d94d5ce22638b0daeb522
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "custom_scripts"
app_title = "Custom Scripts"
app_publisher = "C.R.I.O"
app_description = "For custom scripts"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "criogroups@gmail.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/custom_scripts/css/custom_scripts.css"
# app_include_js = "/assets/custom_scripts/js/custom_scripts.js"
# include js, css files in header of web template
# web_include_css = "/assets/custom_scripts/css/custom_scripts.css"
# web_include_js = "/assets/custom_scripts/js/custom_scripts.js"
# include custom scss in every website theme (without file extension ".scss")
# website_theme_scss = "custom_scripts/public/scss/website"
# include js, css files in header of web form
# webform_include_js = {"doctype": "public/js/doctype.js"}
# webform_include_css = {"doctype": "public/css/doctype.css"}
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
doctype_js = {"Sales Invoice" : "custom_scripts/custom/js/sales_invoice.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "custom_scripts.install.before_install"
# after_install = "custom_scripts.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "custom_scripts.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# DocType Class
# ---------------
# Override standard doctype classes
override_doctype_class = {
#"Employee Advance": "custom_scripts.custom_scripts.custom.auto_additional_salary.ERPNextEmployeeAdvance",
"POS Invoice Merge Log": "custom_scripts.custom_scripts.custom.sales_invoice.ERPNextPOSInvoiceMergeLog"
}
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "custom_scripts.tasks.all"
# ],
# "daily": [
# "custom_scripts.tasks.daily"
# ],
# "hourly": [
# "custom_scripts.tasks.hourly"
# ],
# "weekly": [
# "custom_scripts.tasks.weekly"
# ]
# "monthly": [
# "custom_scripts.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "custom_scripts.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "custom_scripts.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "custom_scripts.task.get_dashboard_data"
# }
# exempt linked doctypes from being automatically cancelled
#
# auto_cancel_exempted_doctypes = ["Auto Repeat"]
# User Data Protection
# --------------------
user_data_fields = [
{
"doctype": "{doctype_1}",
"filter_by": "{filter_by}",
"redact_fields": ["{field_1}", "{field_2}"],
"partial": 1,
},
{
"doctype": "{doctype_2}",
"filter_by": "{filter_by}",
"partial": 1,
},
{
"doctype": "{doctype_3}",
"strict": False,
},
{
"doctype": "{doctype_4}"
}
]
| 24.635294
| 107
| 0.685769
|
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "custom_scripts"
app_title = "Custom Scripts"
app_publisher = "C.R.I.O"
app_description = "For custom scripts"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "criogroups@gmail.com"
app_license = "MIT"
doctype_js = {"Sales Invoice" : "custom_scripts/custom/js/sales_invoice.js"}
override_doctype_class = {
"POS Invoice Merge Log": "custom_scripts.custom_scripts.custom.sales_invoice.ERPNextPOSInvoiceMergeLog"
}
user_data_fields = [
{
"doctype": "{doctype_1}",
"filter_by": "{filter_by}",
"redact_fields": ["{field_1}", "{field_2}"],
"partial": 1,
},
{
"doctype": "{doctype_2}",
"filter_by": "{filter_by}",
"partial": 1,
},
{
"doctype": "{doctype_3}",
"strict": False,
},
{
"doctype": "{doctype_4}"
}
]
| true
| true
|
79079e5e0ff5a602fbb8710f9e816f9785399d47
| 2,108
|
py
|
Python
|
txcl/utils/deploy_helpers.py
|
digitalepidemiologylab/text-classification
|
8a92a9f6d67857de7de5dcb72a41f75061572e9a
|
[
"MIT"
] | 3
|
2020-06-08T13:49:27.000Z
|
2020-12-01T12:07:29.000Z
|
txcl/utils/deploy_helpers.py
|
crowdbreaks/text-classification
|
8a92a9f6d67857de7de5dcb72a41f75061572e9a
|
[
"MIT"
] | 8
|
2020-06-17T14:21:20.000Z
|
2020-11-03T11:43:57.000Z
|
txcl/utils/deploy_helpers.py
|
crowdbreaks/text-classification
|
8a92a9f6d67857de7de5dcb72a41f75061572e9a
|
[
"MIT"
] | null | null | null |
"""
Deployment helpers
==================
"""
import os
import logging
from ..definitions import ROOT_DIR
from .docker import Docker
from .ecr import ECR
from .s3 import S3
from .sagemaker import Sagemaker
logger = logging.getLogger(__name__)
def build(run, project, model_type):
docker = Docker()
docker_path = os.path.join(ROOT_DIR, 'sagemaker', model_type)
image_name = get_image_name(run, project)
docker.build(docker_path, image_name)
def push(run, project, model_type):
docker = Docker()
s3 = S3()
image_name = get_image_name(run, project)
docker.push(image_name)
s3.upload_model(run, image_name, model_type=model_type)
def build_and_push(run, project, model_type):
build(run, project, model_type)
push(run, project, model_type)
def run_local(run, project, model_type):
# build image
build(run, project, model_type)
# run it
docker = Docker()
image_name = get_image_name(run, project)
docker.run(image_name, run, model_type)
def create_model_and_configuration(run, project, question_tag, model_type, instance_type):
# init helpers
ecr = ECR()
s3 = S3()
sm = Sagemaker()
# build deploy arguments
image_name = get_image_name(run, project)
ecr_image_name = ecr.get_ecr_image_name(image_name)
s3_model_path = s3.get_model_s3_path(image_name)
tags = [{'Key': 'project_name', 'Value': project},
{'Key': 'question_tag', 'Value': question_tag},
{'Key': 'run_name', 'Value': run},
{'Key': 'model_type', 'Value': model_type}]
# create model and endpoint configuration
sm.create_model_and_configuration(ecr_image_name, s3_model_path, tags=tags, instance_type=instance_type)
def deploy(run, project, question_tag, model_type, instance_type):
# initialize stuff
# build image and push to ECR
build_and_push(run, project, model_type)
# create model and endpoint configuration
create_model_and_configuration(run, project, question_tag, model_type, instance_type)
def get_image_name(run, project):
return f'crowdbreaks_{project}_{run}'
| 31.462687
| 108
| 0.70778
|
import os
import logging
from ..definitions import ROOT_DIR
from .docker import Docker
from .ecr import ECR
from .s3 import S3
from .sagemaker import Sagemaker
logger = logging.getLogger(__name__)
def build(run, project, model_type):
docker = Docker()
docker_path = os.path.join(ROOT_DIR, 'sagemaker', model_type)
image_name = get_image_name(run, project)
docker.build(docker_path, image_name)
def push(run, project, model_type):
docker = Docker()
s3 = S3()
image_name = get_image_name(run, project)
docker.push(image_name)
s3.upload_model(run, image_name, model_type=model_type)
def build_and_push(run, project, model_type):
build(run, project, model_type)
push(run, project, model_type)
def run_local(run, project, model_type):
build(run, project, model_type)
docker = Docker()
image_name = get_image_name(run, project)
docker.run(image_name, run, model_type)
def create_model_and_configuration(run, project, question_tag, model_type, instance_type):
ecr = ECR()
s3 = S3()
sm = Sagemaker()
image_name = get_image_name(run, project)
ecr_image_name = ecr.get_ecr_image_name(image_name)
s3_model_path = s3.get_model_s3_path(image_name)
tags = [{'Key': 'project_name', 'Value': project},
{'Key': 'question_tag', 'Value': question_tag},
{'Key': 'run_name', 'Value': run},
{'Key': 'model_type', 'Value': model_type}]
sm.create_model_and_configuration(ecr_image_name, s3_model_path, tags=tags, instance_type=instance_type)
def deploy(run, project, question_tag, model_type, instance_type):
build_and_push(run, project, model_type)
create_model_and_configuration(run, project, question_tag, model_type, instance_type)
def get_image_name(run, project):
return f'crowdbreaks_{project}_{run}'
| true
| true
|
7907a05b1b790d8810def314c40b88b5f6527f37
| 11,713
|
py
|
Python
|
tests/test_xmlparser.py
|
Fake4d/mosk
|
d15c6088a382a51706bd38e3299d00be5c208acc
|
[
"CC0-1.0"
] | 3
|
2021-05-22T11:14:10.000Z
|
2022-02-18T00:32:10.000Z
|
tests/test_xmlparser.py
|
Fake4d/mosk
|
d15c6088a382a51706bd38e3299d00be5c208acc
|
[
"CC0-1.0"
] | 1
|
2021-06-20T07:18:58.000Z
|
2021-09-19T12:24:03.000Z
|
tests/test_xmlparser.py
|
Fake4d/mosk
|
d15c6088a382a51706bd38e3299d00be5c208acc
|
[
"CC0-1.0"
] | 1
|
2021-06-09T07:43:03.000Z
|
2021-06-09T07:43:03.000Z
|
from unittest import TestCase
from unittest.mock import patch
from xmlschema import XMLSchemaException
from xml.dom.minidom import Element, Document, parse
class TestXmlParserInstructionspath(TestCase):
@patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initializemetadata')
def test_instructionspath(self, placeholder_mock, xmlparser_mock, isfile_mock, schema_mock, initmetadata_mock):
"""
Will return the instructions file path set in __init__
"""
from instructionparsers.xmlparser import XmlParser
expected_file = 'test_instructions.xml'
isfile_mock.return_value = True
xml_parser = XmlParser(instructionspath=expected_file, protocol=None)
actual_file = xml_parser.instructionspath
self.assertEqual(expected_file, actual_file)
@patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initializemetadata')
def test_instructionspath_instruction_file_not_there(self, placeholder_mock, xmlparser_mock, isfile_mock,
schema_mock, initmetadata_mock):
"""
Will raise FileNotFound exeption.
"""
from instructionparsers.xmlparser import XmlParser
expected_file = 'test_instructions.xml'
isfile_mock.return_value = True
xml_parser = XmlParser(instructionspath=expected_file, protocol=None)
isfile_mock.return_value = False
with self.assertRaises(FileNotFoundError):
xml_parser.instructionspath = expected_file
class TestXmlParserValidate_schema(TestCase):
def test__validate_schema_valid_instructions(self):
"""
Should do nothing.
"""
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/valid_instructions.xml')
except XMLSchemaException:
self.fail("_validate_schema should not raise exception with valid xml instructions.")
def test__validate_schema_invalid_instructions(self):
"""
Should raise exception.
"""
from instructionparsers.xmlparser import XmlParser
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
self.assertRaises(XMLSchemaException,
XmlParser._validate_schema, './instructions/invalid_instructions.xml')
def test__validate_schema_minimal_valid_instructions(self):
"""
Should do nothing.
"""
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/minimal_valid_instructions.xml')
except XMLSchemaException:
self.fail("_validate_schema should not raise exception with valid xml instructions.")
class TestXmlParserInitializemetadata(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__initializemetadata_valid_instructions(self, path_mock):
"""
Should initialize member 'metadata' with all elements which have the attribute "title".
"""
metadata = ('Examiner', 'Assignment', 'Client', 'Description of Artefact', 'Task Description')
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
xml_parser._initializemetadata()
for data in metadata:
with self.subTest(data):
self.assertIsNotNone(xml_parser.metadata[data])
class TestXmlParserInitInstructions(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
"""
Should initialize collectors for all XML elements which have the attribute "module".
"""
from instructionparsers.xmlparser import XmlParser
from instructionparsers.wrapper import InstructionWrapper
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
instructionstree = xml_parser._init_instructions()
self.assertIsInstance(instructionstree, InstructionWrapper)
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
"""
Should return the instruction tree starting with "Root" node.
"""
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
instructionstree = xml_parser._init_instructions()
self.assertEqual(instructionstree.instructionname,
'Root')
self.assertEqual(instructionstree.instructionchildren[0].instructionname,
'LocalHost')
self.assertEqual(instructionstree.instructionchildren[0].instructionchildren[0].instructionname,
'MachineName')
self.assertEqual(instructionstree.instructionchildren[1].instructionname, 'LocalHost')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[0].instructionname,
'OSName')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[1].instructionname,
'OSVersion')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[2].instructionname,
'OSTimezone')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[3].instructionname,
'AllUsernames')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[4].instructionname,
'CurrentUser')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[5].instructionname,
'SudoVersion')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[6].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[7].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[8].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[9].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[10].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[11].instructionname,
'ShellHistoryOfAllUsers')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[12].instructionname,
'NVRAMCollector')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[13].instructionname,
'TimeFromNTPServer')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[14].instructionname,
'LocalTime')
class TestXmlParserGetFirstInstructionElement(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_first_instruction_element(self, path_mock):
"""
Should return the xml element with the title "Root".
"""
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
element = xml_parser._get_first_instruction_element()
self.assertIsInstance(element, Element)
self.assertEqual(element.localName, 'Root')
class TestXmlParser(TestCase):
def test__get_placeholder_name(self):
"""
If XmlElement contains attribute "placeholder" method should return value of this attribute.
"""
from instructionparsers.xmlparser import XmlParser
document = Document()
element = document.createElement('Demo')
element.setAttribute(XmlParser.PLACEHOLDERNAME_ATTRIBUTE, "test")
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, 'test')
def test__get_placeholder_name_no_placeholder(self):
"""
If XmlElement does not contain attribute "placeholder" method should return an empty string.
"""
from instructionparsers.xmlparser import XmlParser
#from xml.dom.minidom import Element
element = Element('Demo')
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, '')
class TestXmlParserGetParameterAttributes(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict(self, path_mock):
"""
Should return UserDict
"""
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse("./instructions/instructions_stub.xml").documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertIsInstance(actual, UserDict)
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict_with_2_entries(self, path_mock):
"""
Should return dict with two entries
"""
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse("./instructions/instructions_stub.xml").documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertEqual(len(actual), 2)
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_should_return_none_special_attributes(self, path_mock):
"""
Should return dicitionry with "users_with_homedir" key and with "properties" key.
"""
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse("./instructions/instructions_stub.xml").documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertIsNotNone(actual.get("properties"))
self.assertIsNotNone(actual.get("users_with_homedir"))
| 45.753906
| 115
| 0.712029
|
from unittest import TestCase
from unittest.mock import patch
from xmlschema import XMLSchemaException
from xml.dom.minidom import Element, Document, parse
class TestXmlParserInstructionspath(TestCase):
@patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initializemetadata')
def test_instructionspath(self, placeholder_mock, xmlparser_mock, isfile_mock, schema_mock, initmetadata_mock):
from instructionparsers.xmlparser import XmlParser
expected_file = 'test_instructions.xml'
isfile_mock.return_value = True
xml_parser = XmlParser(instructionspath=expected_file, protocol=None)
actual_file = xml_parser.instructionspath
self.assertEqual(expected_file, actual_file)
@patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initializemetadata')
def test_instructionspath_instruction_file_not_there(self, placeholder_mock, xmlparser_mock, isfile_mock,
schema_mock, initmetadata_mock):
from instructionparsers.xmlparser import XmlParser
expected_file = 'test_instructions.xml'
isfile_mock.return_value = True
xml_parser = XmlParser(instructionspath=expected_file, protocol=None)
isfile_mock.return_value = False
with self.assertRaises(FileNotFoundError):
xml_parser.instructionspath = expected_file
class TestXmlParserValidate_schema(TestCase):
def test__validate_schema_valid_instructions(self):
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/valid_instructions.xml')
except XMLSchemaException:
self.fail("_validate_schema should not raise exception with valid xml instructions.")
def test__validate_schema_invalid_instructions(self):
from instructionparsers.xmlparser import XmlParser
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
self.assertRaises(XMLSchemaException,
XmlParser._validate_schema, './instructions/invalid_instructions.xml')
def test__validate_schema_minimal_valid_instructions(self):
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/minimal_valid_instructions.xml')
except XMLSchemaException:
self.fail("_validate_schema should not raise exception with valid xml instructions.")
class TestXmlParserInitializemetadata(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__initializemetadata_valid_instructions(self, path_mock):
metadata = ('Examiner', 'Assignment', 'Client', 'Description of Artefact', 'Task Description')
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
xml_parser._initializemetadata()
for data in metadata:
with self.subTest(data):
self.assertIsNotNone(xml_parser.metadata[data])
class TestXmlParserInitInstructions(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
from instructionparsers.xmlparser import XmlParser
from instructionparsers.wrapper import InstructionWrapper
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
instructionstree = xml_parser._init_instructions()
self.assertIsInstance(instructionstree, InstructionWrapper)
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
instructionstree = xml_parser._init_instructions()
self.assertEqual(instructionstree.instructionname,
'Root')
self.assertEqual(instructionstree.instructionchildren[0].instructionname,
'LocalHost')
self.assertEqual(instructionstree.instructionchildren[0].instructionchildren[0].instructionname,
'MachineName')
self.assertEqual(instructionstree.instructionchildren[1].instructionname, 'LocalHost')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[0].instructionname,
'OSName')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[1].instructionname,
'OSVersion')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[2].instructionname,
'OSTimezone')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[3].instructionname,
'AllUsernames')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[4].instructionname,
'CurrentUser')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[5].instructionname,
'SudoVersion')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[6].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[7].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[8].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[9].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[10].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[11].instructionname,
'ShellHistoryOfAllUsers')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[12].instructionname,
'NVRAMCollector')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[13].instructionname,
'TimeFromNTPServer')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[14].instructionname,
'LocalTime')
class TestXmlParserGetFirstInstructionElement(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_first_instruction_element(self, path_mock):
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
element = xml_parser._get_first_instruction_element()
self.assertIsInstance(element, Element)
self.assertEqual(element.localName, 'Root')
class TestXmlParser(TestCase):
def test__get_placeholder_name(self):
from instructionparsers.xmlparser import XmlParser
document = Document()
element = document.createElement('Demo')
element.setAttribute(XmlParser.PLACEHOLDERNAME_ATTRIBUTE, "test")
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, 'test')
def test__get_placeholder_name_no_placeholder(self):
from instructionparsers.xmlparser import XmlParser
element = Element('Demo')
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, '')
class TestXmlParserGetParameterAttributes(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict(self, path_mock):
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse("./instructions/instructions_stub.xml").documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertIsInstance(actual, UserDict)
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict_with_2_entries(self, path_mock):
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse("./instructions/instructions_stub.xml").documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertEqual(len(actual), 2)
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_should_return_none_special_attributes(self, path_mock):
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse("./instructions/instructions_stub.xml").documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertIsNotNone(actual.get("properties"))
self.assertIsNotNone(actual.get("users_with_homedir"))
| true
| true
|
7907a0c6881573c03f84b97b6b5307726128a7fe
| 3,344
|
py
|
Python
|
plaso/parsers/plist_plugins/launchd.py
|
ddm1004/plaso
|
88d44561754c5f981d4ab96d53186d1fc5f97f98
|
[
"Apache-2.0"
] | 1
|
2020-10-29T18:23:25.000Z
|
2020-10-29T18:23:25.000Z
|
plaso/parsers/plist_plugins/launchd.py
|
joshlemon/plaso
|
9f8e05f21fa23793bfdade6af1d617e9dd092531
|
[
"Apache-2.0"
] | null | null | null |
plaso/parsers/plist_plugins/launchd.py
|
joshlemon/plaso
|
9f8e05f21fa23793bfdade6af1d617e9dd092531
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Launchd plist plugin."""
from __future__ import unicode_literals
from dfdatetime import semantic_time as dfdatetime_semantic_time
from plaso.containers import plist_event
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
class LaunchdPlugin(interface.PlistPlugin):
"""Basic plugin to extract launchd configuration information.
Further details about fields within the key:
Label:
the required key for uniquely identifying the launchd service.
Program:
absolute path to the executable. required in the absence of the
ProgramArguments key.
ProgramArguments:
command-line flags for the executable. required in the absence of the
Program key.
UserName:
the job run as the specified user.
GroupName:
the job run as the specified group.
"""
NAME = 'launchd_plist'
DESCRIPTION = 'Parser for Launchd plist files.'
# The PLIST_PATH is dynamic, the prefix filename is, by default, named using
# reverse-domain notation. For example, Chrome is com.google.chrome.plist.
# /System/Library/LaunchDaemons/*.plist
# /System/Library/LaunchAgents/*.plist
# /Library/LaunchDaemons/*.plist
# /Library/LaunchAgents/*.plist
# ~/Library/LaunchAgents
PLIST_KEYS = frozenset([
'Label',
'Program',
'ProgramArguments',
'UserName',
'GroupName',
])
# pylint: disable=arguments-differ
def Process(self, parser_mediator, plist_name, top_level, **kwargs):
"""Check if it is a valid MacOS plist file name.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
plist_name (str): name of the plist.
top_level (dict[str, object]): plist top-level key.
"""
super(LaunchdPlugin, self).Process(
parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)
# pylint: disable=arguments-differ
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
"""Extracts launchd information from the plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
top_level (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
label = top_level.get('Label')
command = top_level.get('Program', '')
program_arguments = top_level.get('ProgramArguments')
for argument in program_arguments:
command += " %s" % argument
user_name = top_level.get('UserName')
group_name = top_level.get('GroupName')
event_data = plist_event.PlistTimeEventData()
event_data.desc = ('Launchd service config {0:s} points to {1:s} with '
'user:{2:s} group:{3:s}').format(label, command,
user_name, group_name)
event_data.key = 'launchdServiceConfig'
event_data.root = '/'
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
plist.PlistParser.RegisterPlugin(LaunchdPlugin)
| 33.777778
| 78
| 0.704246
|
from __future__ import unicode_literals
from dfdatetime import semantic_time as dfdatetime_semantic_time
from plaso.containers import plist_event
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
class LaunchdPlugin(interface.PlistPlugin):
NAME = 'launchd_plist'
DESCRIPTION = 'Parser for Launchd plist files.'
PLIST_KEYS = frozenset([
'Label',
'Program',
'ProgramArguments',
'UserName',
'GroupName',
])
def Process(self, parser_mediator, plist_name, top_level, **kwargs):
super(LaunchdPlugin, self).Process(
parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
label = top_level.get('Label')
command = top_level.get('Program', '')
program_arguments = top_level.get('ProgramArguments')
for argument in program_arguments:
command += " %s" % argument
user_name = top_level.get('UserName')
group_name = top_level.get('GroupName')
event_data = plist_event.PlistTimeEventData()
event_data.desc = ('Launchd service config {0:s} points to {1:s} with '
'user:{2:s} group:{3:s}').format(label, command,
user_name, group_name)
event_data.key = 'launchdServiceConfig'
event_data.root = '/'
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
plist.PlistParser.RegisterPlugin(LaunchdPlugin)
| true
| true
|
7907a1ebe04301894670a171194a25925fbfc017
| 2,105
|
py
|
Python
|
src/cosmic_ray/tools/filters/operators_filter.py
|
XD-DENG/cosmic-ray
|
d265dd0c7bf65484ee2ff1503129b2b16d0c7f55
|
[
"MIT"
] | 1
|
2020-10-18T11:29:03.000Z
|
2020-10-18T11:29:03.000Z
|
src/cosmic_ray/tools/filters/operators_filter.py
|
XD-DENG/cosmic-ray
|
d265dd0c7bf65484ee2ff1503129b2b16d0c7f55
|
[
"MIT"
] | 4
|
2020-11-21T07:36:24.000Z
|
2020-11-22T03:09:39.000Z
|
src/cosmic_ray/tools/filters/operators_filter.py
|
XD-DENG/cosmic-ray
|
d265dd0c7bf65484ee2ff1503129b2b16d0c7f55
|
[
"MIT"
] | null | null | null |
"""An filter that removes operators based on regular expressions.
"""
from argparse import Namespace
import logging
import re
import sys
from cosmic_ray.config import load_config
from cosmic_ray.work_db import WorkDB
from cosmic_ray.work_item import WorkerOutcome, WorkResult
from cosmic_ray.tools.filters.filter_app import FilterApp
log = logging.getLogger()
class OperatorsFilter(FilterApp):
"Implemenents the operators-filter."
def description(self):
return __doc__
def _skip_filtered(self, work_db, exclude_operators):
if not exclude_operators:
return
re_exclude_operators = re.compile('|'.join('(:?%s)' % e for e in exclude_operators))
for item in work_db.pending_work_items:
if re_exclude_operators.match(item.operator_name):
log.info(
"operator skipping %s %s %s %s %s %s",
item.job_id,
item.operator_name,
item.occurrence,
item.module_path,
item.start_pos,
item.end_pos,
)
work_db.set_result(
item.job_id,
WorkResult(
output="Filtered operator",
worker_outcome=WorkerOutcome.SKIPPED,
),
)
def filter(self, work_db: WorkDB, args: Namespace):
"""Mark as skipped all work item with filtered operator
"""
if args.config is None:
config = work_db.get_config()
else:
config = load_config(args.config)
exclude_operators = config.sub('filters', 'operators-filter').get('exclude-operators', ())
self._skip_filtered(work_db, exclude_operators)
def add_args(self, parser):
parser.add_argument('--config', help='Config file to use')
def main(argv=None):
"""Run the operators-filter with the specified command line arguments.
"""
return OperatorsFilter().main(argv)
if __name__ == '__main__':
sys.exit(main())
| 29.647887
| 98
| 0.59905
|
from argparse import Namespace
import logging
import re
import sys
from cosmic_ray.config import load_config
from cosmic_ray.work_db import WorkDB
from cosmic_ray.work_item import WorkerOutcome, WorkResult
from cosmic_ray.tools.filters.filter_app import FilterApp
log = logging.getLogger()
class OperatorsFilter(FilterApp):
def description(self):
return __doc__
def _skip_filtered(self, work_db, exclude_operators):
if not exclude_operators:
return
re_exclude_operators = re.compile('|'.join('(:?%s)' % e for e in exclude_operators))
for item in work_db.pending_work_items:
if re_exclude_operators.match(item.operator_name):
log.info(
"operator skipping %s %s %s %s %s %s",
item.job_id,
item.operator_name,
item.occurrence,
item.module_path,
item.start_pos,
item.end_pos,
)
work_db.set_result(
item.job_id,
WorkResult(
output="Filtered operator",
worker_outcome=WorkerOutcome.SKIPPED,
),
)
def filter(self, work_db: WorkDB, args: Namespace):
if args.config is None:
config = work_db.get_config()
else:
config = load_config(args.config)
exclude_operators = config.sub('filters', 'operators-filter').get('exclude-operators', ())
self._skip_filtered(work_db, exclude_operators)
def add_args(self, parser):
parser.add_argument('--config', help='Config file to use')
def main(argv=None):
return OperatorsFilter().main(argv)
if __name__ == '__main__':
sys.exit(main())
| true
| true
|
7907a2b8b434229113f51c903ecf29a8bffd7315
| 1,708
|
py
|
Python
|
app/Http/Controllers/Dashboard/Wan_edge_Health.py
|
victornguyen98/luanvan2020
|
b1f9d8fbed1cae5054678217ca069e5c22a05e95
|
[
"MIT"
] | null | null | null |
app/Http/Controllers/Dashboard/Wan_edge_Health.py
|
victornguyen98/luanvan2020
|
b1f9d8fbed1cae5054678217ca069e5c22a05e95
|
[
"MIT"
] | null | null | null |
app/Http/Controllers/Dashboard/Wan_edge_Health.py
|
victornguyen98/luanvan2020
|
b1f9d8fbed1cae5054678217ca069e5c22a05e95
|
[
"MIT"
] | null | null | null |
import requests
import sys
import json
requests.packages.urllib3.disable_warnings()
from requests.packages.urllib3.exceptions import InsecureRequestWarning
SDWAN_IP = "10.10.20.90"
SDWAN_USERNAME = "admin"
SDWAN_PASSWORD = "C1sco12345"
class rest_api_lib:
def __init__(self, vmanage_ip, username, password):
self.vmanage_ip = vmanage_ip
self.session = {}
self.login(self.vmanage_ip, username, password)
def login(self, vmanage_ip, username, password):
"""Login to vmanage"""
base_url_str = 'https://%s:8443/'%vmanage_ip
login_action = 'j_security_check'
login_data = {'j_username' : username, 'j_password' : password}
login_url = base_url_str + login_action
url = base_url_str + login_url
sess = requests.session()
login_response = sess.post(url=login_url, data=login_data, verify=False)
if b'<html>' in login_response.content:
print ("Login Failed")
sys.exit(0)
self.session[vmanage_ip] = sess
def get_request(self, api):
url = "https://%s:8443/dataservice/%s"%(self.vmanage_ip, api)
response = self.session[self.vmanage_ip].get(url, verify=False)
return response
Sdwan = rest_api_lib(SDWAN_IP, SDWAN_USERNAME, SDWAN_PASSWORD)
def Wan_edge_Health():
try:
resp = Sdwan.get_request(api = "device/hardwarehealth/summary?isCached=true")
data = resp.json()
string = str(data['data'][0]['statusList'][0]['count'])+','+str(data['data'][0]['statusList'][1]['count'])+','+str(data['data'][0]['statusList'][2]['count'])
print(string)
except:
print("Wrong")
sys.exit()
Wan_edge_Health()
| 32.226415
| 165
| 0.652225
|
import requests
import sys
import json
requests.packages.urllib3.disable_warnings()
from requests.packages.urllib3.exceptions import InsecureRequestWarning
SDWAN_IP = "10.10.20.90"
SDWAN_USERNAME = "admin"
SDWAN_PASSWORD = "C1sco12345"
class rest_api_lib:
def __init__(self, vmanage_ip, username, password):
self.vmanage_ip = vmanage_ip
self.session = {}
self.login(self.vmanage_ip, username, password)
def login(self, vmanage_ip, username, password):
base_url_str = 'https://%s:8443/'%vmanage_ip
login_action = 'j_security_check'
login_data = {'j_username' : username, 'j_password' : password}
login_url = base_url_str + login_action
url = base_url_str + login_url
sess = requests.session()
login_response = sess.post(url=login_url, data=login_data, verify=False)
if b'<html>' in login_response.content:
print ("Login Failed")
sys.exit(0)
self.session[vmanage_ip] = sess
def get_request(self, api):
url = "https://%s:8443/dataservice/%s"%(self.vmanage_ip, api)
response = self.session[self.vmanage_ip].get(url, verify=False)
return response
Sdwan = rest_api_lib(SDWAN_IP, SDWAN_USERNAME, SDWAN_PASSWORD)
def Wan_edge_Health():
try:
resp = Sdwan.get_request(api = "device/hardwarehealth/summary?isCached=true")
data = resp.json()
string = str(data['data'][0]['statusList'][0]['count'])+','+str(data['data'][0]['statusList'][1]['count'])+','+str(data['data'][0]['statusList'][2]['count'])
print(string)
except:
print("Wrong")
sys.exit()
Wan_edge_Health()
| true
| true
|
7907a3fc0d32d5dd0880905e2a5f1691a3a1ca66
| 159
|
py
|
Python
|
active_learning/heuristics/__init__.py
|
bpanahij/maskal
|
5a565854d43c80cac8a4c5d9996a1042db70633e
|
[
"Apache-2.0"
] | 11
|
2021-12-17T09:12:57.000Z
|
2022-03-23T18:27:17.000Z
|
active_learning/heuristics/__init__.py
|
bpanahij/maskal
|
5a565854d43c80cac8a4c5d9996a1042db70633e
|
[
"Apache-2.0"
] | null | null | null |
active_learning/heuristics/__init__.py
|
bpanahij/maskal
|
5a565854d43c80cac8a4c5d9996a1042db70633e
|
[
"Apache-2.0"
] | 1
|
2022-01-26T23:25:08.000Z
|
2022-01-26T23:25:08.000Z
|
# @Author: Pieter Blok
# @Date: 2021-03-25 15:33:17
# @Last Modified by: Pieter Blok
# @Last Modified time: 2021-03-25 15:36:30
from .uncertainty import *
| 26.5
| 42
| 0.685535
|
from .uncertainty import *
| true
| true
|
7907a4ff5091c058e80a00f676d1dfa90abdc138
| 603
|
py
|
Python
|
equinox/models/terrain.py
|
ProfAndreaPollini/opengl-pyglet-python-game-programming
|
97b07f8f0e9f58da5bde5244a6a2f809fe4bfee4
|
[
"MIT"
] | null | null | null |
equinox/models/terrain.py
|
ProfAndreaPollini/opengl-pyglet-python-game-programming
|
97b07f8f0e9f58da5bde5244a6a2f809fe4bfee4
|
[
"MIT"
] | 2
|
2019-09-05T16:08:42.000Z
|
2019-09-05T16:09:50.000Z
|
equinox/models/terrain.py
|
ProfAndreaPollini/opengl-pyglet-python-game-programming
|
97b07f8f0e9f58da5bde5244a6a2f809fe4bfee4
|
[
"MIT"
] | null | null | null |
from equinox.models import Model,cleanup
import glm
from random import random
from .glutils import bindIndicesToBuffer, storeDataInVBO,createVAO,unbindVAO
class Terrain(Model):
def __init__(self, n_vertex):
self.vertices = (
-1.0, 0.0, 1.0,
-1.0, 0.0, -1.0,
1.0, 0.0, -1.0,
1.0, 0.0, 1.0,
)
self.normals = (
0.0, 1.0, 0.0,
0.0, 1.0, 0.0,
0.0, 1.0, 0.0,
0.0, 1.0, 0.0
)
self.indices = (
0,1,2,
2,3,0
)
| 17.735294
| 76
| 0.434494
|
from equinox.models import Model,cleanup
import glm
from random import random
from .glutils import bindIndicesToBuffer, storeDataInVBO,createVAO,unbindVAO
class Terrain(Model):
def __init__(self, n_vertex):
self.vertices = (
-1.0, 0.0, 1.0,
-1.0, 0.0, -1.0,
1.0, 0.0, -1.0,
1.0, 0.0, 1.0,
)
self.normals = (
0.0, 1.0, 0.0,
0.0, 1.0, 0.0,
0.0, 1.0, 0.0,
0.0, 1.0, 0.0
)
self.indices = (
0,1,2,
2,3,0
)
| true
| true
|
7907a5c807f771b07d497406a9527bb2680943a7
| 1,091
|
py
|
Python
|
challenges/2019/python/d02.py
|
basoares/advent-of-code
|
3b8216f0e73b12fd879aecea56783b8db7a4bc16
|
[
"MIT"
] | null | null | null |
challenges/2019/python/d02.py
|
basoares/advent-of-code
|
3b8216f0e73b12fd879aecea56783b8db7a4bc16
|
[
"MIT"
] | null | null | null |
challenges/2019/python/d02.py
|
basoares/advent-of-code
|
3b8216f0e73b12fd879aecea56783b8db7a4bc16
|
[
"MIT"
] | null | null | null |
'''
Advent of Code - 2019
--- Day 2: 1202 Program Alarm ---
'''
from utils import *
from intcode import IntcodeRunner, HaltExecution
def parse_input(day):
return day_input(day, integers)[0]
def part1(program, noun=12, verb=2):
runner = IntcodeRunner(program)
runner.set_mem(1, noun)
runner.set_mem(2, verb)
while True:
try:
next(runner.run())
except HaltExecution:
break
return runner.get_mem(0)
def part2(program, target=19690720):
runner = IntcodeRunner(program)
for noun in range(100, -1, -1):
for verb in range(100):
runner.set_mem(1, noun)
runner.set_mem(2, verb)
while True:
try:
next(runner.run())
except HaltExecution:
break
if runner.get_mem(0) == target:
return 100*noun+verb
runner.reset()
if __name__ == '__main__':
data = parse_input('02')
print(f'Part One: {part1(data)}')
print(f'Part Two: {part2(data)}')
| 21.82
| 48
| 0.549954
|
from utils import *
from intcode import IntcodeRunner, HaltExecution
def parse_input(day):
return day_input(day, integers)[0]
def part1(program, noun=12, verb=2):
runner = IntcodeRunner(program)
runner.set_mem(1, noun)
runner.set_mem(2, verb)
while True:
try:
next(runner.run())
except HaltExecution:
break
return runner.get_mem(0)
def part2(program, target=19690720):
runner = IntcodeRunner(program)
for noun in range(100, -1, -1):
for verb in range(100):
runner.set_mem(1, noun)
runner.set_mem(2, verb)
while True:
try:
next(runner.run())
except HaltExecution:
break
if runner.get_mem(0) == target:
return 100*noun+verb
runner.reset()
if __name__ == '__main__':
data = parse_input('02')
print(f'Part One: {part1(data)}')
print(f'Part Two: {part2(data)}')
| true
| true
|
7907a62d9c5f4e3cc08661841f9c3579ca113575
| 1,094
|
py
|
Python
|
classes/utility.py
|
pianomanx/Scavenger
|
75907e802e4e2b019b1927fb5ab950a10f7d5798
|
[
"MIT"
] | 465
|
2018-06-24T16:21:44.000Z
|
2022-03-24T11:59:06.000Z
|
classes/utility.py
|
SCR-Hy3n4/Scavenger
|
75907e802e4e2b019b1927fb5ab950a10f7d5798
|
[
"Apache-2.0"
] | 6
|
2018-12-27T15:51:46.000Z
|
2021-04-15T07:32:36.000Z
|
classes/utility.py
|
watchmen-coder/Scavenger
|
75907e802e4e2b019b1927fb5ab950a10f7d5798
|
[
"MIT"
] | 101
|
2018-10-28T10:55:05.000Z
|
2022-03-31T15:29:15.000Z
|
#!/usr/bin/python
import time
import re
import os
class ScavUtility:
def __init__(self):
pass
def check(self, email):
regex = '^(?=.{1,64}@)[A-Za-z0-9_-]+(\\.[A-Za-z0-9_-]+)*@[^-][A-Za-z0-9-]+(\\.[A-Za-z0-9-]+)*(\\.[A-Za-z]{2,})$'
if (re.search(regex, email)):
return 1
else:
return 0
def loadSearchTerms(self):
searchterms = set()
f = open("configs/searchterms.txt", "r")
tmpcontent = f.readlines()
f.close()
for tmpline in tmpcontent:
tmpline = tmpline.strip()
searchterms.add(tmpline)
return searchterms
def archivepastes(self, directory):
pastecount = len([name for name in os.listdir(directory) if os.path.isfile(os.path.join(directory, name))])
if pastecount > 48000:
archivefilename = str(time.time()) + ".zip"
os.system("zip -r pastebin_" + archivefilename + " " + directory)
os.system("mv pastebin_" + archivefilename + " archive/.")
os.system("rm " + directory + "/*")
| 29.567568
| 120
| 0.54479
|
import time
import re
import os
class ScavUtility:
def __init__(self):
pass
def check(self, email):
regex = '^(?=.{1,64}@)[A-Za-z0-9_-]+(\\.[A-Za-z0-9_-]+)*@[^-][A-Za-z0-9-]+(\\.[A-Za-z0-9-]+)*(\\.[A-Za-z]{2,})$'
if (re.search(regex, email)):
return 1
else:
return 0
def loadSearchTerms(self):
searchterms = set()
f = open("configs/searchterms.txt", "r")
tmpcontent = f.readlines()
f.close()
for tmpline in tmpcontent:
tmpline = tmpline.strip()
searchterms.add(tmpline)
return searchterms
def archivepastes(self, directory):
pastecount = len([name for name in os.listdir(directory) if os.path.isfile(os.path.join(directory, name))])
if pastecount > 48000:
archivefilename = str(time.time()) + ".zip"
os.system("zip -r pastebin_" + archivefilename + " " + directory)
os.system("mv pastebin_" + archivefilename + " archive/.")
os.system("rm " + directory + "/*")
| true
| true
|
7907a67411c9823c139a7c3923c2fa5934f7410d
| 2,763
|
py
|
Python
|
facade_project/utils/ml_utils.py
|
gregunz/MasterSemesterProject
|
085f36c58b1cac141b0318657876b796c4dc5101
|
[
"MIT"
] | 5
|
2019-06-10T08:42:00.000Z
|
2021-09-22T08:24:24.000Z
|
facade_project/utils/ml_utils.py
|
gregunz/MasterSemesterProject
|
085f36c58b1cac141b0318657876b796c4dc5101
|
[
"MIT"
] | 1
|
2019-10-31T12:56:27.000Z
|
2019-10-31T12:56:27.000Z
|
facade_project/utils/ml_utils.py
|
gregunz/MasterSemesterProject
|
085f36c58b1cac141b0318657876b796c4dc5101
|
[
"MIT"
] | 2
|
2019-09-13T10:23:34.000Z
|
2021-05-07T14:15:46.000Z
|
class MetricHandler:
"""
Object meant to be used in the training loop to handle metrics logs
"""
def __init__(self):
pass
def add(self, outputs, targets):
"""
Adding metric for each batch
:param outputs: outputs of the model
:param targets: targets of the model
"""
raise NotImplementedError()
def compute(self, phase):
"""
Aggregate accumulated metrics over batches at the end of the epoch
:param phase: either 'train' or 'val'
"""
raise NotImplementedError()
def description(self, phase):
"""
Description of the current metrics
:param phase: either 'train' or 'val'
:return: str
"""
raise NotImplementedError()
def scalar_infos(self, phase):
"""
Return list of tuple to use with tensorboard writer object 'add_scalar' function
:param phase: either 'train' or 'val'
:return: [tuple(str, number)]
"""
raise NotImplementedError()
def description_best(self):
"""
Description of the best metrics
:return: str
"""
raise NotImplementedError()
class Epocher:
"""
An object which is used to print information about training without spamming the console. (WIP)
"""
def __init__(self, n_epoch, epoch_offset=1):
# epoch_offset += 1 # starting at 1 and not zero
self.n_epoch = n_epoch
self.epoch_offset = epoch_offset
self.s_more = ''
self.stats_string = ''
self.ls_string = ''
def __iter__(self):
self.n = self.epoch_offset - 1
self.stats_string = ''
self.ls_string = ''
self.s_more = ''
self.__update_stdout__()
return self
def __next__(self):
self.n += 1
if self.n >= self.n_epoch + self.epoch_offset:
raise StopIteration
self.__update_stdout__()
self.s_more = ''
return self.n
def update_stats(self, s):
self.stats_string = s
self.__update_stdout__()
def update_last_saved(self, s):
self.ls_string = s
self.__update_stdout__()
def print(self, s, sep=' '):
self.s_more = sep + s.replace('\n', '')
self.__update_stdout__()
def __update_stdout__(self):
s0 = 'Epoch [{}/{}]'.format(self.n, self.n_epoch + self.epoch_offset - 1)
s1, s2 = '', ''
if self.stats_string != '':
s1 = ' Stats [{}]'.format(self.stats_string).replace('\n', '')
if self.ls_string != '':
s2 = ' Last Saved [{}]'.format(self.ls_string).replace('\n', '')
print('\r{}'.format(s0), s1, s2, self.s_more, end='', sep='')
| 27.356436
| 99
| 0.565328
|
class MetricHandler:
def __init__(self):
pass
def add(self, outputs, targets):
raise NotImplementedError()
def compute(self, phase):
raise NotImplementedError()
def description(self, phase):
raise NotImplementedError()
def scalar_infos(self, phase):
raise NotImplementedError()
def description_best(self):
raise NotImplementedError()
class Epocher:
def __init__(self, n_epoch, epoch_offset=1):
och
self.epoch_offset = epoch_offset
self.s_more = ''
self.stats_string = ''
self.ls_string = ''
def __iter__(self):
self.n = self.epoch_offset - 1
self.stats_string = ''
self.ls_string = ''
self.s_more = ''
self.__update_stdout__()
return self
def __next__(self):
self.n += 1
if self.n >= self.n_epoch + self.epoch_offset:
raise StopIteration
self.__update_stdout__()
self.s_more = ''
return self.n
def update_stats(self, s):
self.stats_string = s
self.__update_stdout__()
def update_last_saved(self, s):
self.ls_string = s
self.__update_stdout__()
def print(self, s, sep=' '):
self.s_more = sep + s.replace('\n', '')
self.__update_stdout__()
def __update_stdout__(self):
s0 = 'Epoch [{}/{}]'.format(self.n, self.n_epoch + self.epoch_offset - 1)
s1, s2 = '', ''
if self.stats_string != '':
s1 = ' Stats [{}]'.format(self.stats_string).replace('\n', '')
if self.ls_string != '':
s2 = ' Last Saved [{}]'.format(self.ls_string).replace('\n', '')
print('\r{}'.format(s0), s1, s2, self.s_more, end='', sep='')
| true
| true
|
7907a6dab3d5350f45255dc59d81a6c782e2052e
| 917
|
py
|
Python
|
tableauserverclient/server/endpoint/exceptions.py
|
reevery/server-client-python
|
c4ed22ebf62e74707961a77381848ad325d55850
|
[
"MIT"
] | null | null | null |
tableauserverclient/server/endpoint/exceptions.py
|
reevery/server-client-python
|
c4ed22ebf62e74707961a77381848ad325d55850
|
[
"MIT"
] | null | null | null |
tableauserverclient/server/endpoint/exceptions.py
|
reevery/server-client-python
|
c4ed22ebf62e74707961a77381848ad325d55850
|
[
"MIT"
] | 1
|
2020-04-17T15:41:39.000Z
|
2020-04-17T15:41:39.000Z
|
import xml.etree.ElementTree as ET
from .. import NAMESPACE
class ServerResponseError(Exception):
def __init__(self, code, summary, detail):
self.code = code
self.summary = summary
self.detail = detail
super(ServerResponseError, self).__init__(str(self))
def __str__(self):
return "\n\n\t{0}: {1}\n\t\t{2}".format(self.code, self.summary, self.detail)
@classmethod
def from_response(cls, resp):
# Check elements exist before .text
parsed_response = ET.fromstring(resp)
error_response = cls(parsed_response.find('t:error', namespaces=NAMESPACE).get('code', ''),
parsed_response.find('.//t:summary', namespaces=NAMESPACE).text,
parsed_response.find('.//t:detail', namespaces=NAMESPACE).text)
return error_response
class MissingRequiredFieldError(Exception):
pass
| 33.962963
| 99
| 0.642312
|
import xml.etree.ElementTree as ET
from .. import NAMESPACE
class ServerResponseError(Exception):
def __init__(self, code, summary, detail):
self.code = code
self.summary = summary
self.detail = detail
super(ServerResponseError, self).__init__(str(self))
def __str__(self):
return "\n\n\t{0}: {1}\n\t\t{2}".format(self.code, self.summary, self.detail)
@classmethod
def from_response(cls, resp):
parsed_response = ET.fromstring(resp)
error_response = cls(parsed_response.find('t:error', namespaces=NAMESPACE).get('code', ''),
parsed_response.find('.//t:summary', namespaces=NAMESPACE).text,
parsed_response.find('.//t:detail', namespaces=NAMESPACE).text)
return error_response
class MissingRequiredFieldError(Exception):
pass
| true
| true
|
7907a715924fca616c9859e5e80e85ffdcbf6627
| 2,724
|
py
|
Python
|
tests/gis_tests/geoapp/test_sitemaps.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 16
|
2019-08-10T12:24:06.000Z
|
2020-05-21T09:11:14.000Z
|
tests/gis_tests/geoapp/test_sitemaps.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 12
|
2019-08-10T11:55:29.000Z
|
2020-05-21T04:46:30.000Z
|
tests/gis_tests/geoapp/test_sitemaps.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 3
|
2019-08-20T13:29:34.000Z
|
2020-01-30T22:05:10.000Z
|
import zipfile
from io import BytesIO
from xml.dom import minidom
from django.conf import settings
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from .models import City, Country
@modify_settings(
INSTALLED_APPS={"append": ["django.contrib.sites", "django.contrib.sitemaps"]}
)
@override_settings(ROOT_URLCONF="gis_tests.geoapp.urls")
class GeoSitemapTest(TestCase):
@classmethod
def setUpTestData(cls):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
def assertChildNodes(self, elem, expected):
"Taken from syndication/tests.py."
actual = {n.nodeName for n in elem.childNodes}
expected = set(expected)
self.assertEqual(actual, expected)
def test_geositemap_kml(self):
"Tests KML/KMZ geographic sitemaps."
for kml_type in ("kml", "kmz"):
doc = minidom.parseString(
self.client.get("/sitemaps/%s.xml" % kml_type).content
)
# Ensuring the right sitemaps namespace is present.
urlset = doc.firstChild
self.assertEqual(
urlset.getAttribute("xmlns"),
"http://www.sitemaps.org/schemas/sitemap/0.9",
)
urls = urlset.getElementsByTagName("url")
self.assertEqual(2, len(urls)) # Should only be 2 sitemaps.
for url in urls:
self.assertChildNodes(url, ["loc"])
# Getting the relative URL since we don't have a real site.
kml_url = (
url.getElementsByTagName("loc")[0]
.childNodes[0]
.data.split("http://example.com")[1]
)
if kml_type == "kml":
kml_doc = minidom.parseString(self.client.get(kml_url).content)
elif kml_type == "kmz":
# Have to decompress KMZ before parsing.
buf = BytesIO(self.client.get(kml_url).content)
with zipfile.ZipFile(buf) as zf:
self.assertEqual(1, len(zf.filelist))
self.assertEqual("doc.kml", zf.filelist[0].filename)
kml_doc = minidom.parseString(zf.read("doc.kml"))
# Ensuring the correct number of placemarks are in the KML doc.
if "city" in kml_url:
model = City
elif "country" in kml_url:
model = Country
self.assertEqual(
model.objects.count(),
len(kml_doc.getElementsByTagName("Placemark")),
)
| 37.833333
| 83
| 0.565712
|
import zipfile
from io import BytesIO
from xml.dom import minidom
from django.conf import settings
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from .models import City, Country
@modify_settings(
INSTALLED_APPS={"append": ["django.contrib.sites", "django.contrib.sitemaps"]}
)
@override_settings(ROOT_URLCONF="gis_tests.geoapp.urls")
class GeoSitemapTest(TestCase):
@classmethod
def setUpTestData(cls):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
def assertChildNodes(self, elem, expected):
actual = {n.nodeName for n in elem.childNodes}
expected = set(expected)
self.assertEqual(actual, expected)
def test_geositemap_kml(self):
for kml_type in ("kml", "kmz"):
doc = minidom.parseString(
self.client.get("/sitemaps/%s.xml" % kml_type).content
)
urlset = doc.firstChild
self.assertEqual(
urlset.getAttribute("xmlns"),
"http://www.sitemaps.org/schemas/sitemap/0.9",
)
urls = urlset.getElementsByTagName("url")
self.assertEqual(2, len(urls))
for url in urls:
self.assertChildNodes(url, ["loc"])
kml_url = (
url.getElementsByTagName("loc")[0]
.childNodes[0]
.data.split("http://example.com")[1]
)
if kml_type == "kml":
kml_doc = minidom.parseString(self.client.get(kml_url).content)
elif kml_type == "kmz":
# Have to decompress KMZ before parsing.
buf = BytesIO(self.client.get(kml_url).content)
with zipfile.ZipFile(buf) as zf:
self.assertEqual(1, len(zf.filelist))
self.assertEqual("doc.kml", zf.filelist[0].filename)
kml_doc = minidom.parseString(zf.read("doc.kml"))
# Ensuring the correct number of placemarks are in the KML doc.
if "city" in kml_url:
model = City
elif "country" in kml_url:
model = Country
self.assertEqual(
model.objects.count(),
len(kml_doc.getElementsByTagName("Placemark")),
)
| true
| true
|
7907a85cccd5727c5a24c5f3425a7ad6bf030260
| 141,061
|
py
|
Python
|
mapclientplugins/parametricfittingstep/resources_rc.py
|
mahyar-osn/mapclientplugins.parametricfittingstep
|
3b78be6a3cbd99f970f0b28c65350304e446c19e
|
[
"Apache-2.0"
] | null | null | null |
mapclientplugins/parametricfittingstep/resources_rc.py
|
mahyar-osn/mapclientplugins.parametricfittingstep
|
3b78be6a3cbd99f970f0b28c65350304e446c19e
|
[
"Apache-2.0"
] | 2
|
2018-09-28T21:16:39.000Z
|
2018-10-11T00:11:58.000Z
|
mapclientplugins/parametricfittingstep/resources_rc.py
|
mahyar-osn/mapclientplugins.parametricfittingstep
|
3b78be6a3cbd99f970f0b28c65350304e446c19e
|
[
"Apache-2.0"
] | 2
|
2018-09-21T04:05:54.000Z
|
2018-09-28T21:50:32.000Z
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Mon Oct 15 12:53:43 2018
# by: The Resource Compiler for PySide (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = b"\x00\x006x\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x006\x05IDATx\xda\xec\xddOl-Y~\x17\xf0\xaaIG\xf9G\xe27\xd2\xf0O#\xe2\xfbX0j\x08\xb2G\x10\xd1 F\xbe\x1d!\x116\xb1Gb\x93\xd5\xbb\xbd`\x91\xc5\xf0\xdc+f\xf7\xca\x12\x8b\xd9\xb5\x1f-$$\x16\xefzE\xc4\x22m/C\x82\xfaZ\x83\xa0\xc3\x1f\x8d\x1dF\x0aC \xcfF\x84\x89\x84F\xf3\x9c\x88\x10 \x89\xa9\xd3>\x9e\xf6\xbc\xb6OU\xdd\xbfUu?\x1f\xa9t\xdf\xf3-\xdf?\xa7\xae\xef\xfd\xdes\xea\xfcN~}}\x9d\x01i\x1b_yk\xaf\xbc\x18\x5c\xbd\xff\xd1\xa1\xd6\x00\xa0k>\xa3\x09\xa0\x96\xfd\xb8\x01\x80\xc0\x07}\xb3\xf1\x95\xb7\x06\xe5\xc5N\xb9m\xc6\x9e>\x00\x10\xf8\xa0g\x8a;\xff\xd6\xcb\x07@\xe7\xe4\xce\xe1\x83\x87m|\xe5\xadG\xe5\xc5E\xf8\xe7\x9d\x1f?\xbez\xff\xa3\x0b\xad\x03@W\xe8\xe1\x83\xb4\xbd\xd7\xc2^Ph\x16\x00\x04>\xe8\x8f\xfb\xc2\xdd^\xec\xf9\x03\x00\x81\x0f\xba\xac\x0cu\xc3\xf2b\xf3\xbe\xab\xb2\x9b\x9e?\x00\x10\xf8\xa0\xe3R\x134\x0a\xcd\x03@W\x98\xb4\x01\xf7\x88\xa5X^V\xec\xf6\xf6\xd5\xfb\x1fM\xb4\x16\x00m\xa7\x87\x0f\xee7\xaa\xb1\x8f\x12-\x00\x08|\xd0au\xc2\xdcn\xec\x09\x04\x00\x81\x0f\xba\xa4\x0cq\xa3\xec\xd3\xa5X\x1e2\xd2b\x00\x08|\xd0=\xfb\x0b\xda\x17\x00\x04>X\xb5X\x8ae\xab\xc9\xaf\xc4\x1eA\x00\x10\xf8\xa0#\xa6\x09oz\xf9\x00h5eY \x8a\xabg|w\xca_W\xa2\x05\x80\xd6\xd2\xc3\x07\x9f\x98\xa5\xa7n\xa4\xf9\x00\x10\xf8\xa0\xfdf\x09mO\xac\xaf\x0b\x80\xc0\x07-\x16'^l\xcex3\xce\xe5\x03@\xe0\x83\x16\x1b\xb5\xe46\x00@\xe0\x83y\x8b\xabe\xec\xcc\xe1\xa66\x95h\x01@\xe0\x83v*\xe6x[\x02\x1f\x00\xad\xa3,\x0bk-N\xb4\xb8\xc8\xea/\xa5V\xc7\xe3\xab\xf7?\xba\xd0\xba\x00\xb4\x85\x1e>\xd6\xddh\xcea/(4+\x00\x02\x1f\xb4\xc7\x22f\xd6\xee)\xd1\x02\x80\xc0\x07-P\x86\xb2\xbdl\xf6R,\xf7\xdet\xe6\x5c>\x00\x04>h\x85E\x8625\xf9\x00\x10\xf8`\x95b)\x96\xdd\x05\xde\xc5f\xecA\x04\x00\x81\x0fVd\x19=p#\xcd\x0c@\x1b(\xcb\xc2\xdaYP)\x96\x87(\xd1\x02\xc0\xca\xe9\xe1c\x1d\xed-)\xec\x05\xce\xe5\x03@\xe0\x83\x15Xf\x08\x1b)\xd1\x02\x80\xc0\x07KT\x86\xafay\xb1\xb5\xcc\xbb\xccnz\x14\x01@\xe0\x83%\x19\xad\xe0>\x0d\xeb\x02\xb0R&m\xb06b)\x96\x97+\xba\xfb\xb7\xaf\xde\xffh\xe2(\x00\xb0\x0az\xf8X'\xa35\xbdo\x00\x04>\x10\xf8\x96\xe0I\xeca\x04\x00\x81\x0f\x16\xa1\x0c[!\xecm\xae\xf8a\x8c\x1c\x09\x00\x04>\xe8w\xd8\x12\xf8\x00\x10\xf8`\x116\xbe\xf2\xd6vy\xb1\xd3\x82\x87\xb2\x19{\x1a\x01@\xe0\x839kSY\x14\x81\x0f\x80\xa5S\x96\x85^\x8b\xab\x5c|\xb7\xe6\xeeW\xb7\xbf\xd6\xf0n\xc2\xef\xbd\xca\xea\x9f#\xf8\xc5\xab\xf7?:st\x00X\x16=|\xf4\xdd\xa8\xc6>\x97\xe5\xf6N\xb9\x0d\xcam\x9a vV\x06\xb8A\xbc\x8d\xd3\x1a\xfb+\xc4\x0c\x80\xc0\x07s\x94\x0aW'\xd9MA\xe4A\xb9\x8d\xcb\xed\xd5,w\x14ocX\xfe\xf3\x8b\xe5v\x94\xd8\xf5\x89\xf5u\x01\x10\xf8`\x0e\xcaP\x15\xd6\xb0}}\x985\x0c\xbf>/\xb7\xc7e8\xdb[\xc4\xea\x17a\xb8\xb6\xdcF\xe5??[n\x07\xd9M\x0f\xe2\xebF\x8e\x10\x00\xcb\xf2\x86&\xa0\xc7\xee\xf6\xee\x9d\x97\xdba\xe8\x85[\xd6\x9d\xc7\x1e\xc3\x22lqvn\xd8v\xee<\xb6C\x87\x08\x80e\xd0\xc3G/\xc5U-B\xb8\x0aC\xaba\xd8v{\x99a\xef\x9e\xf0w;\xdc\xfb8>\xa6G\xb1\x07\x12\x00\x16N\x0f\x1f}\x16\x86m/\xda\xf4\x80\xe2\xe3\x19\xc5s\xf8\x9c\xc7\x07\x80\xc0\x073\x06\xab6?\xbe0\xdc\xfb\xca\x91\x02`\x19\x0c\xe9\x02\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x02\x1f\x00,I\x9e\xe7\x8f\xcam\xa8%@\xe0\x03\xa0\x9fao\xbb\xbc\xb8\xd0\x12 \xf0\x01\xd0\xcf\xb0\xb7_^|\xa3\xdc6\xb4\x06,\x96\xb5t\x01Xv\xd0{T^\x8c\xcbm\xf7\xf6g\xd7\xd7\xd7\x13-\x03\x02\x1f\x00\xfd\x08{a\x08\xf7\xb8\xdc6\xb5\x06,\x8f!]\x00\x96\x15\xf6n\x87p_\x0f{\xa7Z\x07\x16K\x0f\x1f\x00\x8b\x0ez\x9f\x1a\xc2\x05\x04>\x00\xfa\x13\xf6\xea\x0c\xe1N\xb4\x14,\x96!]\x00\x16\x15\xf6\x1e\x1a\xc2\x05\x96L\x0f\x1f\x00\xf3\x0ezM\x87p'Z\x0d\x04>\x00\xba\x13\xf6\xcc\xc2\x85\x162\xa4\x0b\xc0\xbc\xc2\xde(\xbb\xe9\xadk\x14\xf6\xd4\xe0\x83\xc5\xd3\xc3\x07\xc0\xacA/\x0c\xe1\x1e\x96\xdb\x13\xad\x01\x02\x1f\x00\xfd\x0b{a\x08w\x5cn[S\xde\x84\x1a|\xb0\x04\x86t\x01\x986\xec\x8d\xb2\x9b!\xdc-\xad\x01\xed\xa6\x87\x0f\x80\xa6Ao\x9eC\xb8\x13-\x0a\x02\x1f\x00\xed\x0a{\xb3\x0e\xe1\x02+`H\x17\x80\xbaao\x94\xcd\x7f\x08w\xa2ea\xf1\xf4\xf0\x01P\x15\xf4\xcc\xc2\x05\x81\x0f\x80\x1e\x87\xbdAvSHy!C\xb8j\xf0\xc1r\x18\xd2\x05\xe0\xa1\xb0\xb7W^\x9ce\xce\xd7\x03\x81\x0f\x80^\x86\xbd0\x84\xfbA\xb9m,\xf0n\xd4\xe0\x83%1\xa4\x0b\xc0\xdd\xa07\xc8\x168\x84\x0b\xac\x86\x1e>\x00n\xc3\xde\xb2\x87p'Z\x1d\x04>\x00\x96\x17\xf6\x961\x84\x0b\xac\x88!]\x80\xf5\x0ez\x83luC\xb8\x13G\x00\x96C\x0f\x1f\xc0\xfa\x86=\xb3pA\xe0\x03\xa0\xc7ao\xe5C\xb8j\xf0\xc1\xf2\x18\xd2\x05X\xaf\xa0\x17V\xcd\x08AK\xaf\x1e\xac\x11=|\x00\xeb\x13\xf6\x86\xe5\xc5EK\xc2\x9e\x1a| \xf0\x010\xe7\xb0W\x94\x17\x1fff\xe1\xc2Z2\xa4\x0b\xd0\xef\xa0\x17\x86p\xc3,\xdc\x9d\x96=\xb4\x89\xa3\x03\x02\x1f\x00\xf31\x88\xe1*\xcc\xc6\xdd\x8e?\xdb\xd1, \xf0\x01\xd0\x13\xd7\xd7\xd7g1\xec}J<\xa7/x\xfdr\x19\x81p\xe2\xe8\x80\xc0\x07\xc0\xe2\xc3\xe0$\x15\xbe^\x0b\x84\xc3L\xcf \x08|\x00\xf47\x10\x96\xe1\xefbA\xb7\x0d,\x81Y\xba\x00$\xc5\x19\xbe\x9bZ\x02\x04>\x00\xfa\x19\xf6\xc2,\xdf\xfd9\xdf\xac\x1a| \xf0\x01\xd0\x22a\x09\xb6T\xed\xbe\x83r\xbb\xd2L \xf0\x01\xd0Aq\xd2\xc6\x93\xc4.\xe7\xd7\xd7\xd7Ey9jx\xd3\x13\xad\x0b\x02\x1f\x00\xedPT\x5c\xff\xf1Po\x19\xfaBa\xe7\xe7\x9a\x0b\x04>\x00:$\xcf\xf3Q\x96.\xc3rrw\xa6m\xf9\xef\x10\xfe\xcek\xde\xfcD\x0b\xc3r)\xcb\x02p\x13p\x06\xd9\xcd\xaa\x14\xc3r\x0b\x13\x15\x92\xabR\x94\x01'\xefq[\x84\xe7_T\xecv\xdfD\x8e\xbd\xec\xa6\xc8\xb3\xf5zA\xe0\x03hE\xa8\xd9\xce>)(<lcH\x89\xe7\xd0}\x18\xff{\x1a\xc3\xd4E\xb9M\xe2\x0a\x1a\x8b\x12\xc2\x5c\xaa\x0c\xcbAy\xff\x17\xf7\x84\xe0\x8b\xd83\xf8A\xea\xc6\xd5\xe0\x03\x81\x0f`\xd1!/\x04\x92\xbd\xac\x1bu\xe5\x06w\xfe\xbd\x93\xdd\xe9m,\x9f\xcbm\x08<\x9eg\x00\x8c=\x9d\xa92,\x97\xd9\xcd\xcc\xdd\x87\xc2\xdcqy\x1b\xe1|\xbe\xa7^q \xf0\x01,+\xe4=\x8a\x01/\x84\x98\xad\x8e=\xfcA\xc5\xf5\xdf\x0b\x81\xe5\xf3\x0cAl\x12\x02`\x9cD1\xad\xaa2,Ey\xfb\xafR7\x10\xce\xe7\x8b\xe1\xfa\xbe\xe1p5\xf8`\x05L\xda\x00z\x1b\xf4\xe2\x0a\x11\x17\xe5\xf6\xa2\x83a/\x186\xd87\xf4X\x86\x12*\x1f\x94\xcf\xfbU\xb9\x8d\xcbm\xafa\x9b\x85\xfb\xdbM\xecrZ\x86\xb9q\xcd\x9b\x0b\xf7}_}\xbeW^\x9d \xf0\x01\xcc3\xe8=\xcb\xba=\x81`0\xe5\xefm\xdc\x09\x7f\xe1\xbc\xba\x22\x0e\xd5V9\xac\xb8\xbe\xa8\xfb\x00b/\xe0}\x81\xf3\xcc\xab\x14\x96\xcf\x90.\xd0G\xfb3\x04\xbd\xab\xec\x93\xc9\x11a{\xb5\xc2\x90\xb29\xa7\xdb\x08\xc1\xf7Y\x19\xfaNB\xa8\xbbo\xd2D\x9cl\x91\xea\x05=j:\xd9\x22\xec_\xde\xeeA\xbc\xff[z\xf8@\xe0\x03\x98M\xe8Y*CF8\x87\xedI\xcd_\x09\xe7\x94M\xe2vVu~\xda\x92\xbd\x9d}R\x22f\x18/g\xe9\xb1\x0c\xc3\xb5\xbb\xf1|\xbf\xe2vx6\x9e\xe7xX\x11\x82\xf7\xa7<\x1eE\x1c*\xbe=\x9fO\x0f\x1f\x08|\x00sq\x98\x08|!\xec\xdc\xcel=n\xf3\x93\xb8\xd3\xa3\xf6\xbd\xc7\x19\x87f\x87w\xb6iz\x01\xc3\xef\xbc\x88C\xdfE\x8d y8c\x10\x0eC\xbb\x17\xf1>\xf4\xf0\x81\xc0\x070\x97\xa0tV\x86\x99\xb0\xea\xc3\xdd!\xca\xa3r\x1bw\xbd\x06\x5c\xac\x7f7\x8e\xdb\xddz\x82\xa3\xac\xf9\xc4\x94\x10\xfc\xfea\xb9}>\xb1\xcfe\x5c/w\x96\xc7\xfc*N \xf9p\xc1\xf5\x03\x81\x07\x98\xb4\x01\xf4U\xe8\xe5\x0bC\x91\xe1\x1c\xb2\xcf\x96Ac\xd4\xc7\x82\xbf!@\x95[\xe8\x81\x0b\xc1\xefq\xb9\xbd\x9b\xd5_\xe2,\xa8\xeaq\xdb\x9f\xd3\xe3\x9c\xc4\xc7\x06\x08|\x00s\x0bB\xa17\xefQ\xe8\x9dj\xd9yy\x8b|\xce\x17\xaf\x85\xbfP\x00\xf92\xf1+!\x18\xfe\xa5\xc4\xf5\xa7\xf3\x1c\xf6\x0e\x8f\xcd+\x13\x04>\x00\xe6\x1b\xfe\xf6\xcbmP\xfe\xf7\xcb\xe5vr\xcfn\x9f\xab\xb8\x99\x91\x96\x04\x81\x0f\x80n\x84\xbf\xb0\xfaF8\x87\xee\xb6\xd7/\x0cu\x87\xd9\xc9\xa9s\xf7\x9e\xdf\xb7^. \xf0\x01\xd0\xee\xe0\xf7q\xaf_vS\xd0y;\xb1k\x08\x84\x85\x16\x03\x81\x0f\x80\xee\x9ay\xbd\x5c@\xe0\x03\xa0\xa5b\x19\x97TQ\xeas\x93+@\xe0\x03\xa0\xdb\xaa\xc2\xdc\xbe&\x02\x81\x0f\x80\x8e\x8a\xeb\xe5\xee$v9\xe9c\xadB@\xe0\x03X\x97\xb0\x17\xd6\xcb-*v\xd3\xbb\x07\x02\x1f\x00\x1d\x16\xc2\x5cj\xdd\xdd\x03eX@\xe0\x03\xa0\xa3\xf2<\x1fd\xe9\xde\xbb\xb0\x1a\x87\x89\x1a \xf0\x01,$\x88\x1c\x96\xdbY\x1cndq\x94a\x01\x81\x0f`%aoX^<-\xb7\xadr\xbb\x88\xe5BXL;\xef&v\x09\xeb\xe5\x8e\x17p\xbfca\x1e\x04>\x80\xbbC\x88\xa1\xf7\xe9\x1bq\x16)\x8bk\xe7\xfb\x14\x8b\x08{\xd9M\xad\xbf\x10\xe6'\xc2<\x08|\xc0\x1a*\x03\xc0~\x0c\x03\xaf{\x11\xc3\x02\xf3i\xe7\xd1\x03\xed|\xebh\xdeeX\xee\x84\xbd[B\x1f\x08|\xc0\x1a\x86\x90\xaa\xf2 C\xad4\xb7vN\xf5\xee\xcd}\xbd\xdc{\xc2\xde\xad\x0d\xa1\x0f\x04>`\xbd\xecg\xe9\x09\x04#M\xb4\x94v>\x5c@\x19\x96T\xa0\x13\xfa@\xe0\x03\xd6A\xecuJ\x95\x079\xb2\xd2\xc3\x5c\xdayP^<K\xecrY\xb6s\xb1\x80\xbb\x1e\x96\xdb\xb9\xd0\x07\x02\x1f\xb0\xdeR\xbdNW\x99\x95\x1e\xe6e\x5c\xe38\xcc],\xed\x22\xf4\x81\xc0\x07\xac\xb9Q\xe2\xbaC\xb5\xe0f\x17\xcb\xb0\xa4\xd6\xcb\x0deX\x8e\x17u\xff\x0dC\x9f\x92- \xf0\x01=\x0b\x22!\xec=\xb4\xb4W\xe8\xdd\xb3\xd2\xc3|\x8c+\xae_x/\xaa\xd0\x07\x02\x1f\xb0\xbe\xf6\x12\xd7\xe9\xdd\x9bO\xa8\xaeZ/\xf7y\xd9\xceg\xcbx,\xf1x\x8eb\x98\x7fH(\xd9r\xec\xc8\x81\xc0\x07\xf4#\x88\x0c\xb2\x87W{\xd0\xbb7\x9f6\xae*w3\xf72,5B_\x08\x97\xc3\x8a\xd0\xb7\xa3\xf6\x22\x08|@?\xa4z\xf7\x8e\xf5\xee\xcdE+\xd7\xcb\x8d\xa1\xafj\x18\xf9I\xec\x9d\x04\x04>\xa0\xa7\x81O\xef\xde\x8c\xe2\x8c\xd7'\x89]B\x19\x96\x95\xb5s\x5c\xab\xf7\xa0b\xb7\xf7\xca\xe7\xb1\xe7h\x82\xc0\x07t3\x8c\x84\xa1\xc6\x87f\x8d\x9e/\xeb\x9c\xb2\x9e\xab\x0as\xa3U?\xc0X\xf7\xef\xa4b\xb7q\x1c\xfe\x07\x04>\xa0c\x86\xa9\x0fx\xcd3s\xa0\xde\xcb\xd2eXNZT\xcc:\x04\xcf\xaa\x99\xbb\xc7f\xee\x82\xc0\x07tO\xf2\xfc=\xcd3S\xd8\xabZ/7h\xcd\xb9q\x0df\xee\x1a\xe6\x07\x81\x0f\xe8\x98\xe1\x03??_\xc0Z\xae\xeb\xa6\xaa\x0c\xcbA\xdb\xda8\x0e\xe1\x17\x15\xbb=\x89u\x1b\x01\x81\x0fh\xbb\xd8\x03\xf5P \x99h\xa1\x99\xdav\x90\xa5{\xefZ[\xee&N \xa9:\x9f\xef\xd0\xf2k \xf0\x01\xdd\x90\xfa\xc0\x16\xf8fSd\xe92,\xfb-/w3\xca\xd2C\xbb\xe1\xb9\x8d\x1df\x10\xf8\x80\xf6\x1b\x0a|\xf3\x17\xd7\xcbM\x95a9\x8d\xa5PZ\xeb\xce\xf9|)\xce\xf1\x04\x81\x0f\xe8\x80\x87z\xf8\xce\x15[\x9eI\xd5Pm\xd1\x85'Q\xbe\x06B\xa0\xbboh7\xcc\xe4\xfdb,\xe5\x02\x08|@\xcb=T^\xe3B\xd3L'Nf\xd8J\xecr\xd4\xa22,u\x84\xf3\x10\xef\x0e\xed\x86\x89&\xdb\xea3\x82\xc0\x07t\xc7C\xf5\xe1|\x98O\x17\xf6\xaa\xca\xb0,}\xbd\xdcY\xc5Y\xc4\xe19\xe9\xd5\x03\x81\x0f\xe8h8\xc9\x04\xbe\xb9\x0a\xbda\xa9\x89\x1a\x87],u\x13B\x9e^=X\xac74\x01\xb0@o\x97\xdb n\xc3x\x19\xca\xb48\x7f\xafy\x80\x0em\xf7,\xb1\xcb\xa5\xde1@\xe0\x03\x96*N\xca\x98h\x89\xb9\x19W\x5c\xbf\xaf\x89\x80\x87\x18\xd2\x05h\xb9X\x86%\xb5^\xeei\x9c\xf1\x0a \xf0\x01t\xd4\xb8\xe2z\xbd{\x80\xc0\x07\xd0Uy\x9eW\xad\x97{d\xb2\x03 \xf0\x01t7\xec\x85\x99\xceEb\x97P\x86E\xef\x1e \xf0\x01tX\x08{\xa92,\x85\x15K\x00\x81\x0f\xa0\xa3\xf2<\x0f\xcb\xd2=M\xec\x12\xca\xb0\x1cj)@\xe0\x03\xe8\xae\xaa07\xd2D\x80\xc0\x07\xd0Qy\x9e\xefe\xd5eX&Z\x0a\x10\xf8\x00\xbaK\xef\x1e \xf0\x01\xf4U\x9e\xe7E\x96.\xc3r\xd0\xc5\xf5r\x01\x81\x0f\x80\xec{\xeb\xe5\xa6\xca\xac\x842,&j\x00\x02\x1f@\x87\x15Y\xba\x0c\xcb\xbe2,\x80\xc0\x07\xd0Qq\xbd\xdc'\x89]\xce\xcb\xb07\xd6R\x80\xc0\x07\xd0]E\xc5\xf5V\xd4\x00\x04>\x80\xae\xca\xf3|\x94\xa5\xcb\xb0\x1c)\xc3\x02\x08|\x00\xdd\x0d{a\xbd\xdc\xd4D\x8c0Q\xa3\xd0R\x80\xc0\x07\xd0]a\xa865Q\xe3P\x19\x16@\xe0\x03\xe8\xa8X\x86\xe5Yb\x97\xcbL\x19\x96\xaa6\xdc\x8e\xbd\xa4@\xc2\x1b\x9a\x00`e\xaa\xc2\x9c2,\xdf\x1f\xee\x86\xe5\xc5v\xdcBX\xbe=\xef\xf1\xedr\x9bh!\x10\xf8\x00\xda\x18^v\x13\xbb\x84\xf5r\x8f\xb5\xd4\xf7\xf9\xf0\x81\x9fo\x0b|\x90fH\x17`5\xc6\x15\xd7+\xc3rO\x08~\xe0\xe7\x86tA\xe0\x03h\x97<\xcfC\x98K\xad\x97\x1b\xca\xb0\x9ci\xa9O\xb9x\xe0\xe7CM\x03\x02\x1f@\x9b\xc2^\xe8\x8d*\x12\xbb\x842,z\xf7\x9a\x05>\xa0\x82s\xf8\x80e\x84\x9cavs\x92}\xd8n\xff\x1dz\xb8\xbe\xbc\x86\xe7\xa9\x85\xb0\x97*\xc3R\x98\xa8\xd18\xf0\xedh\x1a\x10\xf8\x80\xd5;~ \xe4\x0c\xe3u\xeb\x12|\xc3\xe4\x82\xa7\x89].\xcb\xb0\xa7\x0cK\xf3\xc0\x07T0\xa4\x0b,+\xf0\xddgo\xcd\xda\xa1*\xcc\x8d\xbcT\x00\x81\x0f\xe8[\xe0\xdb\x8c\xbd^\xbdW>\xcf\x10nSC\x8f\xa7\xd6\xcb\xadt\x91h\xdf\xa1\xe6\x01\x81\x0fX\xa1x\x9e\xde\xd5\x03W\x8f\xd6\xa4\x19\xf4\xee\xcd\xfe:\xba\xd0\x0a \xf0\x01\xed6^\xd7\xa0\x93\xe7y\x91\xa5\xcb\xb0<\x17f\x00\x81\x0f\xe8\x83\x87z\xb86\xca@\xd4\xdb\xd0\x17\xcb\xb0\xa4\xca\xac\x84\x9e\xcf\xc2\xcb\x03\x10\xf8\x80\xce\x8b=X\x0f\xad\x94\xd0\xe7\xc0\x13\x82n\xaa\x0c\x8b\xf5r\x01\x81\x0f\xe8]\xf8\xb9\xcff\x1f{\xf9\xe2D\x82'\x89]\xce\xcb\xb07\xf6\xb2h\xf4\xc5!\x7f`\x9bh\x1d\x10\xf8\x80v|X\x87\xc9\x1b\x97\x0f\x5c]\xf4\xf0)W='+j\x00\x02\x1f\xd0K\x0f\x85\xa0^\xf5\xf2\xc5\xe7\x92*\xc3r\xa2W\x0a\x10\xf8\x80^\x8aC\x98\x0f\xf6\xf2\xc5I\x0e]\x0f{\xd6\xcb\x05\x04>`\xed=\x14\x866{\x12\x84\xf6\xb3t\x19\x96CeX\x00\x81\x0f\xe8\xb5\x8a^\xbegy\x9e\x0f\xba\xfa\xdc\xe2c\x7f\x96\xd8%<o\xeb\xe5\x02\x02\x1f\xb0\x16R=y\xe3\x0e?\xaf\xaa0W(\xc3\x02\x08|\xc0Z\x883v\x1f\xaa\xcb\xb7\x93\xe7y\xe7\x86vc\x19\x96\xdd\xc4.\xa7\xca\xb0\x00\x02\x1f\xb0nF\x89\xeb\x8a\x0e\x0e\xedV\xf5\xee\x99\xa8\x01\x08|\xc0z\x89\x13\x17\x0e\x1e\xb8:\xacN1\xee\xcas\x89=\x92[\x89]\x8e\xca\xe7{\xe6\xa8\x03\x02\x1f\xb0\x8e\xa1\xaf(/\xce\x1f\xb8\xba\x13C\xbb\xca\xb0\x00\x02\x1f@\xb5\xbd\x18\x8a\xee\xf3^\x19\xa8\xb6[\xfe\xf8C\xd8K\xad\x97{h\xa2\x06 \xf0\x01k-\x0e\xed\x8e\x12\xbb\x8c\xdb\xfa\xd8\xe3y\x86O\x13\xbb\x5c\xc6^L\x00\x81\x0fX\xfb\xd0\x17f\xed>\x7f\xe0\xea6\x9f\xfbV\x15FG\x8e. \xf0\x01|\x12\xfa\xc2ynGw~\x14\x86y\xdf)\x7f\xde\xca\xd0\x94\xe7y\x18\x8aN\xad\x97{j\xbd\x5c\xa0\x0d\xde\xd0\x04@\xcbB\xdf(N\x82\x18\x94\xdb\xa8\xe53[\xab\xca\xb0\x8c\x1cQ@\xe0\x03\xb8?\xf4\xed\xb5\xfd1\x96\xa1\xb4\xc8\xd2\xeb\xe5>\xb7^.\xd0\x16\x86t\x01\x9a\x87\xbd\xd0\x03\x99*\xb3\x12\x86\xa2\x0b-\x05\x08|\x00\xdd\x15\x86rSeX\xf6\x95a\x01\x04>\x80\x8e\x8a\xeb\xe5>I\xecrn\xbd\x5c@\xe0\x03\xe8\xb6\xa2\xe2z+j\x00\x02\x1f@W\xe5y>\xca\xd2eXN\x94a\x01\x04>\x80\xee\x86\xbd\xaa\xf5r\x03\xbd{\x80\xc0\x07\xd0a!\xcc\xa5\xca\xb0\x1c(\xc3\x02\x08|\x00\x1d\x15\xd7\xcbM\xf5\xde]f\xd5E\x98\x01\x04>\x80\x16\xab*\xc3R(\xc3\x02\x08|\x00\x1d\x15\xcb\xb0\xec&v9U\x86\x05\x10\xf8\x00\xba\xadj\xa8\xb6\xd0D\x80\xc0\x07\xd0Q\xb1\x0c\xcbVb\x97#eX\x00\x81\x0f\xa0}!n\x5cng\xe5\xb6]\xb1_(\xc3\x92\xea\xdd\x0b\xeb\xe5*\xc3\x02\x08|\x00-\x0b{\xa3\xecfY\xb4\xd0k7)\xff\x9f\x0alE\x96\x9e\xa8qh\xa2\x06 \xf0\x01\xb4+\xec\x85\x1e\xbd\x17w~\x14\xc2\xdc{\xe5\xcf'\xb1\xec\xca\xdd}\xc3\xff\x9f&n\xee\xb2\x0c{\x85V\x05\xba\xe2\x0dM\x00\xacA\xd8\x0b\xc3\xb3\x93\x07\xae\x0eK\xa5\xbd,\xf79\xc8>\xe9\xb5\x1bW\xdc\xe4\xfe\x94\x8fc/\xde\xf6E\xb9\xbd\xde;xV\xe7g\xce\x19\x04\x04>\x80\xfb\x85\x90\xb4Q\xb1\xcf\xb3\x10\xe4\xcaP\xf6O\xb2\xf4z\xb9\xa1\x0c\xcb\xf14\x0f\x22\xfc^,\xf3r\xdf\xe3\xd9\xa9\x19\x1a\xef}L\xaf\xfd\xffU\x0c\x8bU?\xbb\xb0:\x08\x08|\x00}\x11\x02\xda\xa0F\xe8\x0b\xd7\xff\x83\x8a}F\xb3<\x902`\xddN\x18\x09\x8fikN\xcf\xef\xbe\xb0\xb8;e\x80\x0c\xab\x86\xbc\x1e\x02k\xf5>\x86\x9f9\xaf\x11\x04>\x80\x95\x08\xe7\xdb\x85\xd9\xb9\xd9\xcd\xac\xdb\xdd\x19n\xea\xf9<z\xc4\xc2m\xc4\x9e\xbe\xe3\xacf\xcf\xde\x12mf\x9f^3\xb8\xf6c\xbc'@\x9e\xde\xb3\xdb$\x86\xc3c\xafN\x10\xf8\x00\xe6\x19\xfaBP\xdb\x8bAk|O\xa8\xa9\xf2\xff\xca\xed?\xcf\xf1\xf1\x84\x9e\xb0a\x0c\xa2Oz\xdc\xf4\xf7\x85\xc5\xaa\x927\xc0\x9c\x99\xa5\x0b\xac[\xf0\x9b\x94\xdb\xa0\xfcg\x98\xa4q\xd5\xe0W\x7f\xb0\xdc\xfeq\xac\xe1\xf7h\x8e\x8fgT^\xbc\xbbF\x87 \xf4\xf8\x0d\x0d\xfd\x82\xc0\x07\xb0\x8c\xe0Wd7\xe7\xf5\x1d5\xfc\xd5\xdb\x1a~\xdbs|,\xa1\xb7\xeb\x9d\x86\x01\xb4\x8b\xc2\xca$\xc2\x1e\x08|\x00K\x0d}\xafb\x0f\xdb\x7f\x98\x22\xf4}\xa3\x0c}\xc5\xbcz\xfb\xca\xc71./\x86=\x0e}\xcfc[\x03\x02\x1f\xc0r\xc5\xd57\xfe\xca\x94\xbf\x1eJ\xb9\x84\x09\x18\xc5\x9cB_\x98\xf9\x1az\x0e\xcf{\xd6\xcc\xef\x94\xcf\xcd2t \xf0\x01\xac$\xec\x85\xde\xb9Y\xc3Z(\xe52\xb70\x13'\x97\x0c\xb3\xfbg\xb7vM\xe8\xad\xfcr\xec\xbd\x04\x04>\x80\x95\x08A-5[\xf7$\xbb\xa9KWe\xae\xe5E\xe2Ps\x08}G\x1dn\xdb\x10\xf6\x86J\xaf\x80\xc0\x07\xb02q\xbd\xdcT\xcf\x5c\x08z\xa38\xa3\xf7\x9d\x8a\xe0\xb7\x90\x12#\x1d\x9e\xc1{\x1e\xc3\xde\x99W\x1a\x08|\x00\xab\x14BZj\xe5\x8d\xe2v6i\x18\x92\x8c\xc1\xef\xed\xec\xd3\xbdn\xe7\x8b\x0c6\x1d\x9c\xc1+\xec\x81\xc0\x07\xb0z\xb1\xf8rj\xc5\x8d\xd3\xfb\xce;\x8b5\xfcF\xe5?\x1fg\x9f\xd4\xf1[x\x01\xe1\x0e\xcd\xe0=\xc9\xd4\xd8\x03\x81\x0f\xa0%\xaaBZQ\x11\xc0.B\x1d\xbfr{\xb4\xac\x09\x09\x1d\x98\xc1\x1bj\xec\xed\x09{ \xf0\x01\xac\x5c,\xc3\xb2U\x11\x5c&m|\xec-\x9e\xc1\xfb\xae\x1a{\xd0n\xd6\xd2\x05\xd6)\xecU\xad\xe1\x1a\x86L\xf7[\xf8\xb8\xc3c\xbe\xed9\x0b=}E\xb9}\xb5\xdc\xfev\x0b\x1e\xde;\xca\xae\x80\xc0\x07\xd0&!\xcc\xa5&j\x1c\xb6tH2\x0c\xe5\xee\xb4\xec1\x85p\xbc\xd7\xd6\xdeP@\xe0\x03\xd6P,\xc3\xf2,\xb1\xcbe\x5c_\xb7\x8d\xb6[\xf6xnk\xec\x99\x89\x0b\x1d\xe1\x1c>`]\x8c+\xaeo\xf3\xd2_\x1b-z,a\xd2\xc8\xb6\xb0\x07\xdd\xa2\x87\x0f\xe8\xbdX\x86%5$z\xda\xd6\x15!\xe2coS\xd8Sv\x05:H\x0f\x1f\xb0\x0e\xc6\x15\xd7\xb7\xb9w\xefQK\x1e\xc7\x91\xb0\x07\xdd\xa5\x87\x0f\xe8\xb5<\xcf\xab\xd6\xcb}\xde\xf2\xe1\xc9\x10\xb0Nb\xf0\x0b\xdb\xd6*\xc2\x9e\xb2+ \xf0\x01\xb45\xec\x85\x80T$v\xb9\xaa\xb8~\xe5\xe2,\xd8I\xc5s\xfc\xee\x02\x1f\x82\xb2+\xd0\x03\x86t\x81>\xab\xbd^n\x87-r\x06\xaf\xb0\x07=\xa1\x87\x0f\xe8\xa5<\xcfC\x10z\x92\xd8%\x94a9\xec\xc1S]D\xe0Sv\x05zF\x0f\x1f\xd0WUan\xd4\x93\xe79\xef\xc0'\xec\x81\xc0\x07\xd0~y\x9e\xefe\xe92,'=Z!b\x9e\x81/\x94]\x19\x08{ \xf0\x01\xb4=\xecU\xad\x97\x1b\xec\xf7\xe8)\xcfk\xd6\xae\x1a{ \xf0\x01tFU\x19\x96\x832\xd4\x5c\xf4$\xdc\x0e\xe7tS\xa1\xec\xca\xb6\xb0\x07\x02\x1f@\x17\x02\xd0 K\xf7\xde\x85\xf3\xd3\x0e{\xf4\x94\xe71\x9c\xfb\x5c\x8d=\xe8?\xb3t\x81>)\xb2t\x19\x96\xfd\x9e\xf5b\x0df\xfc}eW`M\xe8\xe1\x03z!\x0eo\xa6\xca\xb0\x9c\xf60\xdcL\xdb\xc3w%\xec\xc1z\xd1\xc3\x07\xf4E\xd5Pm\xd1\xc3\xe7\xbc3e\xd8Sv\x05\xd6\x8c\x1e>\xa0\xf3\xf2<\x1fe\xe9\xd9\xaaG=*\xc3r\xfb\x9c\xa7\xe9\xdd\xbb\x14\xf6`=\xe9\xe1\x03\xba\x1e|\xaa\xca\xb0\xb4~\xbd\xdc)5\x0d|\xca\xae\xc0\x1a\xd3\xc3\x07t]\x98\x95\x9b\x9a\xa8q\xd8\x972,\xaf\x194\xd8\xf7D\xd8\x83\xf5\xa6\x87\x0f\xe8\xacX\x86\xe5Yb\x97\xb0^n\xd1\xd3\xa7?\xac\xb9\xdf\x91\xb2+\x80\x1e>\xa0\xcb\xc6\x15\xd7\xef\xf7\xf8\xb9\xd7\x19\xd2=\x10\xf6\x80@\x0f\x1f\xd0I\xb1\x0cKj\x96j(\xc3r\xdc\xd3\xe7>\xc8\xd2\xc3\xd8\x81\xb2+\x80\xc0\x07t^U\x98Y\xd7\xde\xbd0Ie\xd4\xd7\xb0\x0b\x08|\xc0\x9a\xc8\xf3\xbcj\xbd\xdc\xe7=/=\xb2\x9d\x08{\xca\xae\x00\x9f\xe2\x1c>\xa0ka/\x94a)\x12\xbb\xf4\xb5\x0c\xcb]\xc3{~v.\xec\x01\x0f\xd1\xc3\x07tM\x08s\xa9\xf3\xd7\x8a5(?2x \xec)\xbb\x02\xdcK\x0f\x1f\xd0\x19qu\x89\xa7\x89]B\x19\x96\xc3\x9e\xb7A\xe8\xe1\xbc;\x9c}$\xec\x01U\xf4\xf0\x01]R\x15\xe6Fk\xd0\x06w\xcf\xdfSc\x0f\xa8E\x0f\x1f\xd0\x09y\x9e\xefe\xd5eX&k\xd0\x14\xc3x\xf9\xae\xb0\x07\xd4\xa5\x87\x0f\xe8\x0a\xbd{7B\x0f\x9f\x1a{\x80\xc0\x07\xf4K\x9e\xe7E\x96.\xc3r\xd0\xd3\xf5r\xef\xb3\xbfF\xcf\x15\x98\x13C\xba@\xdb\xc3\xde K\x17Q\x0eeX\x0e\xd7\xa5=\x84=@\xe0\x03\xfa\xa8\xc8\xd2eX\xf6\xcdP\x05\x10\xf8\x80\x8e\x8a\xeb\xe5>I\xecr\xee\x5c6\x00\x81\x0f\xe8\xb6\xa2\xe2\xfa}M\x04 \xf0\x01\x1d\x95\xe7\xf9(K\x97a9Z\x932,\x00\x02\x1f\xd0\xcb\xb0\x17V\x93HM\xc4X\x87\xf5r\x01\x04>\xa0\xd7\xc2Pmj\xa2\xc6\xa1\xd9\xaa\x00\x02\x1f\xd0Q\xb1\x0c\xcb\xb3\xc4.\x97\xd9\x1a\x95a\x01\x10\xf8\x80>\xaa\x0as\xca\xb0\x00\x08|@W\xc52,\xbb\x89]\xc2z\xb9\xc7Z\x0a@\xe0\x03\xbak\x5cq\xbd2,\x00\x02\x1f\xd0Uy\x9e\x870\x97Z/7\x94a9\xd3R\x00\x02\x1f\xd0\xcd\xb0\x17\xca\xb0\x14\x89]B\x19\x16\xbd{\x00\x02\x1f\xd0a!\xec\xa5\xca\xb0\x14&j\x00\x08|@G\xe5y\xbe]^<M\xecrY\x86=eX\x00\x04>\xa0\xc3\xaa\xc2\xdcH\x13\x01\x08|@G\xe5y\xbe\x97\xa5\xd7\xcb=\xb5^.\x80\xc0\x07t\x9b\xde=\x00\x81\x0f\xe8\xab<\xcf\x8b,]\x86\xe5\xc0z\xb9\x00\x02\x1f\xd0\xdd\xb07\xc8\xd2eVB\x19\x16\x135\x00\x04>\xa0\xc3\x8a,]\x86\xc5z\xb9\x00\x02\x1f\xd0Uq\xbd\xdc'\x89]\xce\xcb\xb07\xd6R\x00\x02\x1f\xd0]E\xc5\xf5V\xd4\x00\x10\xf8\x80\xae\xca\xf3|\x94\xa5\xcb\xb0\x9c(\xc3\x02 \xf0\x01\xdd\x0d{\xd6\xcb\x05\x10\xf8\x80\x9e\x0ba.U\x86\xe5P\x19\x16\x00\x81\x0f\xe8\xa8X\x86\xe5Yb\x97\xcbL\x19\x16\x00\x81\x0f\xe8\xb4\xaa0W(\xc3\x02 \xf0\x01\x1d\x15\xcb\xb0\xec&v9U\x86\x05@\xe0\x03\xba\xad\xaawom&j\x94\xe1wRn\xd7\x1d\xdd&^\xca\xf4\xe0opTng\xaf\xbd\xb6\x8f\xe3\x17S\x81\x0f`\xca7\xd7\x10\xe6\xb6\x12\xbb\x1c]__\x9fi)`\x09\xefG\xe3\xf2\xe2\xc5=\xefIa\x04\xe2\xc3\xb8\xbe\xb7\xc0\x07\xd0\xf0\xcdU\x19\x16\xa0-\xefG\xe1\xbd\xe8I\xc5n\xcf\xfa\xdc\xd3'\xf0\x01\x8b\x12\xde`S\xeb\xe5\x1e\x9a\xa8\x01,I\xdd/\x97\x85\xc0\x07P\xff\xdb\xf4\xa0\xbcx\x9a\xd8\xe5\xb2\x0c{\x85\x96\x02\x96\xf0~4\xac\xf8\xf2y\xd7\x8e\xc0\x07P\xdf\xb8\xe2\xfa\x91&\x02\x10\xf8\x80\xee~\x9b\xde\xab\xf8\x96|j\xbd\xdcN*4\x01t\xd7\x1b\x9a\x00\x98\xb3\xaa2,\xa35n\x9b6\xcdH\x0e\x93j\xb6j\xee\xfb\x5cH\xa7\xc3.\x1a\xec{%\xf0\x01T\x883\xe16+\x82\xc3\xc5\xba\xb6O\xf9\xdc\xf7[t\xac\x8ek\x06\xbe\xf36=n\x98\xe2\xef\xee\xa2|\xbd\x9ff\xf5\xce\xcf\xeb\xed\x12\x8f\x86t\x81y\x05\x88\xd0c\xb4_\xf1\xcd\xb9\xd0R\xad8V\xe18\xed\xd6\xdc}\xa4\xc5\xe8\x81QV\xdd{w.\xf0\x01\xd4\xfbf\x9c\x9a\x09\xb7\xaf\x0cK+\xc2\xde\xa0A\xf0>P\x18\x9b>\x88#\x0b\xc3r;}`\x97\xa3p}\x9f\xdf\xa3\x0c\xe9\x02\xf3\x08\x11\xe1\x8d4U\xd4\xf4\xdcz\xb9\xad1\xce\xea\x95\xa88W:\x87\x9e\x85\xbe\xf0\xe5eX\xbe_m\x97\x97a\x0b_~&\xe5v\xb6\x0e_F\x05>`\x1e\xaa\x82\x81s\xc0\xda\x11\xcc\xc3q\xa8[gl\xa4\xc5\xe8q\xf0[\xbb\x9ekC\xba\xc0\xac!bT\x11\x22N\xcc\xf0l\xc5q\xda\xce\xea\x0f\xe5\xbek(\x17\x04>\x80\xdb\x10a\xbd\xdc\xee\x18g\xf5\x86rC\x9d\xc4C\xcd\x05\x02\x1f\xc0\xad\x10\xe6ReX\x0e\xd7\xb9\x0cK\x8b\x82y\x08\xe5uJ\xb0\x84\x80>\xd2b \xf0\x01\xdc\x86\x88Ay\xf1,\xb1\xcbe\xd6\xe3\x12\x07\x1d:N\xdb\x15\xc7\xe9\xaeB@\x07\x81\x0f\xe0\xae\xaa0W(\xc3\xd2\x0a\xe3\x9a\xfb\x19\xca\x85\x1e3K\x17h,\x96a\xd9\xad\x08\x0fc-\xb5\xf2\xe3\x14\x02\x5c\xdd\xa1\xdc=-\x06+\xf9;\x1dd7%b\xb2ENp\x13\xf8\x80iT\xf6\xeei\xa2V\x84\xf2\xa75w\x1f\xe9\x8d]\xd9q\x0a\x13\x9f\xc2\xb1\xba\xad\x0d\x17\xfe\xbfS\x11\xce\xcf\xeel\x13\xc3\xf0\x9d:\xd6{\xf1x\x0f\xee;\xce\xe5>\x0f\x1d\xe7\xe3Y\xffF\x05>\xa0\xe9\x9b\xd6(K\xf7\x1a\x1d)\xc3\xd2\x8a\x0f\x96q\xcd\xddC\xd9\x9cc\xad\xb6\xd4\xe3\x13>\xecG\xf1\xc3\x7f\xab\xe1\xafo\xc4\xa0\xb0s\xe7\xf6\xc2\xea\x11\xe3y\xf4\xaa\xc7/\x0a\x1f\xd6\xdc\xfd\xcb\x8bz\xed\xc4\xd7\xf0EVofy\xe5\xe3\xa8\xfb\xbc\xca\xdb\xc9\x17\xf4\xe5+\x1c\xef'3\x1c\xe7\x17\xe5\xed\xdc.\xfd6U\xf8s\x0e\x1f\xd0\xf4M\xf8\xb0\xa2\xf7A\x19\x96\xd5+\xb2\xf4\xec\xe9\xbb\xc7k\xa4\xb9\x96\xf7e\xa9\xdcBo\xcd\xcb\xecf\x22\xcd\xd6\x9cnz'\x06\x82\x8b\x18.\xa6\x16\xbf\xac=\xaf\xb9\xfb8\xbe',\xc2\xb8f\xd8{\xde\xd6/,!\xd8\x97\xdb$\x06\xcd's\xb8\xc9\xf0zy\x11\x82p\x98y\xdf\xb4\xed\x05>\xa0i\x90H\xbd\x09\x1f\x1a\x1a\x5c\xf9\x87L\xe852\x94\xdb\xbe\xa0w\x11?\xac\xb7\x16xW!\xe4\x7f\x18\xcf\xdd\x9c%\xf4\x85/m\xe75v\x0d\xef\x05\xc7\x0bz\x0d\xef\xd6\xd8\xf5<k\xe9\xe9#q$\xe4,\xab\xbf\xb2M\x13\x1b\xf1\x0b\xc3\xb6\xc0\x07,\xe4\xdbjE\x90\xb8\xb4\xf6\xea\xca\x8fQ\x93\xa1\xdc\xe7\x86r\x97rL\x0ec\xd0\xdb\x5c\xe2\xdd>\x0d=K3\xf6\xbe\x8dj\xee\xb7\x13\x97\xec[\xc5k\xb8\x95_X\xee\x1c\xf3\x8d\x05\xde\xcdU\xd3Sg\x04>\xa0\xae\xaa7aC\xb9\xed8Fu>dB\x8dD\xe1|9V\xb5D\xddN\x83\xe0\xf4)qi\xbdwk\xee^\xc4/\x84\xcb|\x0d\xb7r\xf9\xbfX\xe4\xfc\xe9\x12\xee\xaa\xf1\x975\x81\x0f\xa8\xf3&6\xcc\xd2C\x13\xa7z\x8bV~\x8c\xea\x0e\x83\x05\x86r\x97g\x96\xbf\x8b\xf3\x19\xef{7\x06\x90iC_\xe8\xa9:\xad\xb1\xeb\xc6,\xe1r\x8a\xd7p+kF\xc6\xc7\xff\xac\xad\xaf+\x81\x0f\xa8\xfb\xad;\x19 4\xd1J?h\x9a\x0e\xe5N\xb4\xdar\xc4`}\x94\xd8%L\x9c9\xc9nz\xd3\xde.\xb7\xcf\x86\x99\xa2q\xdb\xbe\xfdw\xf9\xf3\xc7\xe5\xf6\xe5x[W\x0d\x1e\xc2\xb3\xb8\xda\xca\xb4F5\xefo\xa6\xa1\xdd\x06\xaf\xe1VN4\xaa1\xa1\xedS\xa1\xf5\xf6\x98\xdf9\xde\xb7\xc7\xf9\xedx\xddC\xc7\xfaj\x9a/\xd8\xca\xb2\x00Uod\xa1\x87`\xb3\x22@\x5ch\xa9\x95\x0ao\xfe\x86r\xdb}|\x9e\xbc\x16Z\xc2\xcf\x0e\xeb\x0eK\xc6\xbf\xb1\xb0\x1d\xc7p\x11\xc2U\xdd\xde\xa4\x10D\x86S\x06\xd6\x8b8\x01\xe1\x83\x1a\xbb\x87\xa1\xdd\xe3)\xdf\x0f\xc65_\xc3\xa3\x96\xbe\xdfT\xad+~+\xf4\xda\xee?\xf4\xa5\xeb\xceq\x9e\xdcy\x0f\x0e=\x87{w^C\xe3i\x1e\xa0\x1e>\xa0\xea[\xeb~\xc5\xb7m\x01b\xb5\xc7(\x1c\x9f\xba3\x01\xf7\x0c\xe5._\xec\x8d\xb9\x8a\xdbA\xb9\x0d\xca\x9f\x8d\xa6=\x07-\x1c\xc38A\xea\xed\xac~\xef\xdb\xf6\x8c\x8f\xff\xa4\xc6\xaeS\x0d\xed6\x18\xca=j\xf1\xa9#\xa3\x9aao\xd8\xb4\x87=<\xe7\xf0z\xc9nz\xffN\x04>`\x11\x0e+\xbeu[/w\xb5ao\xd0 p\x1f\xb4\xf1$\xf752\x8aAon\x7f318\x8cj\xee\xbe?\x87\xc7?\xf7\xa1\xdd\x06C\xb9\x97YK'\x86\xc5s\x9c\xabz\xf7\xaeb\xd8{5\xc3\xf1\xbe(\xb7\xbdi\xff\x8e\x05>\xe0\xa17\xb1\xd0#\x90*\x16z\xde\xc6\x13\xa7\xd7\xcc8\xab7\x0cv\xaed\xcej\xc5^\x9aW\x8b\xb8\xdd,}\x8e\xe0\xad\xbd\x19\xef\xe7U\x83\xdbh2k\xb7\xeek\xb8\xcd\xbd\xd3\xc3:\xcfs\xd5\x8f_\xe0\x03\x1eR\x15\xe6\x94aYm o2\x94;\xd2b\xbdV'\xcco,q\x15\x8eZC\xbb\xf1\xf1\xd4\x19\xcam{\xeft\x9dv]\xf9P\xb4\xc0\x07\xdc\xf7F<\xaa\x08\x13'fz\xae\xf4\xf8\x84\xde\xd7\xf7j\xeen(\xb7\xe7\xe2\x89\xfeu\xce\xb1\x1b\xce\xe1\xeeB\xb8\xacS.&9\xb4\xdb`(\xf7\xb4'\xbd\xd3+\xff\x1b\x14\xf8\x80\xfb\xde\x88\xab\xde`\xf5\xee\xad\xd6\xb8\xe6~\xa7\x86r\xd7F\x9d/`\xdb\xb3\xdeI\x1c\x96\x1c\xd5\x0d\x87\x89\xd5>\xea\xccj\xed\xd3Z\xcf\xdb\xab~\x00\x02\x1f\xd0\xf4\x8d\xf8@\x19\x96\x95\x06\xf2\x10\xe0\xea\xac\xc7\xda\xa7\x0fK\xaa\xd5\xe9Az4\x8f;\x8a=\xc6\x075v\xbdwh7\xf6P\xd7))3\xea\xd1{\xcd@\xe0\x03\xda\x14&\x06Y\xba\xf7.\xcc\x943Qcu\xc7\xa7\xee\x07eP\x08\xe6k\xa5\xce\x84\x80\x9dy\xddY\xec9\xae\xb3\x0a\xc7\xee=\xe7\x0e\xd6y\x0f9\xea\xd9\xea=\x85\xc0\x07\xb4\x892,\xed\x0d{MV\xd385\x83z\xbd\xac\xe8<\xcdQV\xafT\xcb\xf8\xce\xebxT#x\xb6\xb6\x04\xcb\x03\xea\xb4\xfdf\xf9\xdc\xc7\xab|\x90\x02\x1fp\xfbF\x1c\xbe\x85\xefV\x84\x88\xb1\x96Zi\x0f\x81\xa1\x5c\xda\x142/j\x06\xb3\x10v\x8a\x06\xcb\x8fu\xad@\xf8\xa4\xe6~OB\xe8K\x9c\xd7\xb8P\x96V\x03nU\xbd\x11\x17\x9ah\xa5a\xfci\xcd\xdd;q\xdeS\xf9\x9c&K\xb8\x9b\xb3\xb2-\xf6;z\xccC(\xd8\x8e[\xf8\xf70^5\xc8\xea-\xe1\xb5\xac\xd07\xae\xb9R\xc6~|\xecU5\xf7:7\xab<\x0c=\x97mpY\xf3\xb8\x84\xda\xa6\xc3\x10\x80\x97\xfd\x05Z\xe0\x03n\x87YR\xbdGG\xca\xb0\xac\xf4\x83\xbf\xee\x07\xc3I\x87\xce{\xdaqt?u\x9c\xf7b\xb0\x1b\xb6)\xd4\xd5\xf9\x92\x91\xdd\xac\xff\x9a\x0as\x1bY\xba\x90{\xd0\xe5Y\xe5\xe1q\xbf\xa8\xb9o8\xb6/\xe2\x04\xac\xb0\x1d/\xa3G\xd3\x90.\xf8\xa0\xa9\x1af\xb9\xca\x94aY\xf5\x07I\x9d\x0f\x7fC\xb9\x1d\xfd\xb2Un!\xa4\x7f7\x06\x86'\x1d\x0b{MW\xe1\xe8\xe5\xeb7\xf6\xd6\x9d6\xfc\xb5\xcdx\xcc/\xe2P\xefBK\xb7\x08|@Q\xf1\xcd\xfc\xd0D\x8d\x95\x85\x81\xf0!\xdad(\xd7q\xea\xc8\x97\xacxN\xdb\xab\xf8\x81\xbf\xdb\xf5\xe7\xd4`\x15\x8e\x87\xec\xf7`Vy\xf8{=\x9f\xe2\xf7n{?\xbfQ\xbe&\xce\xe2\x97\x80\xb9\x9f\xe7'\xf0\xc1z\x7f\xf0\x0c*\x02\xc5\xa5\xc2\xbd\xab\x0b\x05Y\xfd\xa1\xdc\xbe\x95\xb0\xe8\xf3q\x0d\xbd\xe5!\xd8<\xcb\xea\xad!\xdb\xb5/\x8f\xd3\x04\x9e\x93>L\x08\x8b_\xb8\x86S\xb6\xc1\xad\xad\xec\x93^\xbfb\x9e\xc1O\xe0\x83\xf5V\xf5&k(w\xb5\xc7\xa6N \xb8t\x9c\xba\x11\xe0\xe3D\x95\xf7z\x18\xf4\xee\x06\x9eQ\xc3_\xbb\xcczt*\xc2\x9d\xd0w:\xe3Mm\xc4/\x05\x17\xf1\x5c\xbf\x99\x99\xb4\x01\xeb\xfb\x01\x14\xde\x94R'\xce\x9f\xea5Z\xd9\xb1\xa93\xeb\xf1\x96\xa1\xdc\xf6\x1f\xcfpn\xd6d\xc6\xa0\x17z\x8d\xc2q>\xcb\xbe\xbf\xc8\xf2\xe4\xce\xbf\xc3\xb9\xb8[\xab|\xaea\x86m\xf9|\x8f\xb2\xea\x09\x1a\xb7\xce\xfa\xf6\xfa\xbd\x0d}1\xa8=\x9b\xf1\xe6>\x0e~\xf1=a4\xcb\x0cf\x81\x0f\xd6\xd7\xb8\xe2z\xbdF\xab\x09\x07M\x86r\x9fwu\xf6t\xf9\xb8sa/)\xf4\x10\x1d\xc7@4\xa9y_\xafZ\xf2|\x9f4\xf8\x95\xb0\x12\xc7^\x1f\xbf\x5c\x86\xd3ab\xb1\xe5\xc3l\xf6\xf34C\x90\x9f\x84S\x02\xa6\x1d\xfe\x16\xf8`=CE\xd5z\xb9\xcf\xbbV\x0b\xabG\x8e\xb3\xfaC\xb9\x85\xe6j}xo\x12\xf6\xaeb8\x18wq\x02C|\xbe\xd3\x04\xb7\x8fg\xa8\xf6q)\xc0\xf8\x9c\xf6\xe2\x88\xca\xa8a\x18~]x\x1d\x85r.\xd94\xa1\xcf9|\xb0\x9e\x1fBE\xc5\x87\x8e \xb1\xba ^\xb7>\x9d\xa1\xdc\xfe\x84\xf7\x8f\xbfd\x95\xdb \xf4\x0au8\xf8\x1cf\xd3\x95\x94\x09m4\xee\xf3\x0b!\xf4\xd2\x96[\x08|\x8f\xcb\xed ~a\x9b\xd6\x8b8\xc4+\xf0\x01\x95o\xca\xd6\xcbm_\xd8\x1b4\x08\xda\x07\x0aa\xb7\xfex\x8ej\x86\xf7\xf0\x05\xeb\xed\xb0\x22H\x97\xff\xeeb\x00\x99\xa5\xf7j'~\xe1\xe9\xb5\x10\xe6c\xa8\x0f\x7f\xef_,\xb7\xa3\xac\xdez\xc4\xaf\x1b\xc7\xf7\x0c\x81\x0f\xb8\xf7M\xb9\xea\xfc\x9aP\x86\xe5PK\xad\xc48\xab\xd7\x1bt\xaeTN'\xd4=F\xc3\xae\x87\xf7\x86\xe7\x9d&\xdb\xaci\x88\xe9x\xf8;\x8b\xbd~\xe19\x1f4\x0c~\x1bY\xc3\x91\x18\x81\x0f\xd6KU\x98\x1bi\xa2\x95|`\x867\xee\xdaC\xb9Z\xac\xf5\xc73\xf4v\xd5\x19\xda<\xe8\xc9\xb9\xb2u\x86\xaeOj\x86\x98\xb5\xab\x0c\x10zv\xe3\x97\xb8\x10\xfc\x8e\x1a\xfc\xea\x93&\x01Y\xe0\x83\xf5\xfa\x10J\x85\x8a\x13\xc3\x84+9.\xa1\xd7\xb5n\xe9\x86\x03\x93i:\xa1\xce\xf9UW}\xe8\xa9\xady\xdei\x189\xd8\xab\x19f\xb6\xe6Uw\xae\xa3\xc1/|\xa1{g\xce\xaf5\x81\x0f\xd6(TT\xad\x97\x1b(\xc3\xb2\x1a\xe3\x9a\xfb\x19\xca\xed\x8e\xe1\x1c\x8f{\xdb\xbf\xac\xd4yM\x8e\xee\xbc\xc7\xd4\x19\xb6|\xb6\xe8ue[\x1e\xfc\xc6\x0dB\x9f\xc0\x07|*\xcc\xa5\x86\x98\x0e\xfaX\x12\xa1\x03\x1f\x98\xe1\xc3\xb2N\xa1\xdc\xabl\xf6\xc5\xe9Y\xde\x97\xab:\xc3\xb9\x93\x05\xdc\xfd\xce\x92\x9fn\x08&\x95C\xb9\xb7#\x07qRJ\xd1\xe0\xb6\xd7V\x0c}u\x86\xc1k\x07c\x81\x0f\xfa\xff\x014\xc8\xd2\xbdw\xb7\xb5\xbfX\xeeqi2\x94[\x08\xe4\x9dQ\xf7\x03\xf8b\x01\xaf\xa7e\xbe~\xeb\xac\xeaq\xf5\xfa{O\x9c\x14Vg\xad\xd9\xadx\x1f\xeb\xacN8\xae]\xd0[\xe0\x83\xf5x\xd3H\xbd)\xec+\xc3\xb2\xf4\xb0\xd7\xa4@\xed\xa9\x99\xd3\xfd\xb3\x80s1\x07K|\xfd\x0e\xcb\x8b\xa73|Q\x19\xd5\xbc\xab\xa7\xf1\xbe\xbcF\xe6@\xe0\x83~\x07\x8b\xf0f\xf9\xa4\x22L\x8c\xb5\xd4JBx\x9da\xbf\xab\xcc\xac\xdc\xbe\xfem\xce;\xa0\xed-\xe9q\xd7-\xc1r\xfe\xd0\x17\x95\x18d\x9e\xd7\xbc\xcbq\xbc\xcfuu9\xaf\x1b\x12\xf8\xa0\xdf\xaaz\x86\x0aM\xb4\x92\x10\xfe\xb4\xe6\xee\xfb\x86r{k0\xc7\xd7\xd4 \x9b\xad\xe8q\x13\xe3\x9a_V\xaa\xbe\xa8\x145\xc3\xcc\xe6\x9a\xbfO\xcd-\xec\x0a|\xd0\xdf`\x11\xdepS\xe7\xd8\x1c)\xc3\xb2\xf4c\xd2\xa4@\xed\x89\xde\xd7^\x9bg\x8f\xdcxI\xaf\xdf\xf0\x98wk\xecZY>(\x9eF2\xaay\xd7k9\xb4\x1b\xdf/6\xe6u{\x02\x1f\xf4\xf7\x8d\x22\xd5\xbbg\xbd\xdc\xd5\xa8\xbb\xd6\xa8\xa1\xdc\xee\xaa{\xde\xd5\xde\x9c\xfe\xd6\xc3\xdf\xf1N\xc3\xf7\x86i\xeegP3X^f5'\x81\xc5/\x9cu\x87v\x8f\xd7ph\xb7\xcek\xe4T\xe0\x83\xf5\xb6_\xf1\xcd\xf0\xd0P\xe1\xd2Cx\x93\xb5FG&\xd2tS<n\xb5\x86*g];6\xf6\xe2?k\xf8k\xd3\xce\xe6\x1dg\xf5z\x9b\x9a\xbev\x8b\x9a\xed\xb5\x91\xb5\xb4TK\x08\xa2\xf3\xee\x81\x8c\xe1\xb6\xce\x97\xf2\x89\xc0\x07\xeb\x1b,\x06\x15\x1f\x02\x97\x0a\xf8.\xff\x03!k6\x94{\xac\xd5:\xad\xee\xf1+\xa6-\xa7\x12{\xf6^,\xe9\xf5[g5\x8d\xe0y\xd3\xd3D\x1a\x0e\xed\xee\xc6/Nm\x13\x1e\xd3\x87\xe5c\x9b\xcc#\xf8\xc5\xf7\x8b\xd0\x8euF\x03j\x87\xe07\xfc]B\xefT\xbd\x01XQc5\xc7\xa4\xee\xb98\x83\xf0\xc1\xd1\xd2\xe7\xb1oi\xb7Z\xc2\x90f\x9d\x899\xe15\x11B\xc2\xa8n\xc8\x8f\x81\xa2N\x0d\xbcy\x85\xbd\x10H\xdf\xab\xb1\xeb\xd4\xa7\x89\x84\x90X\xde\xcf\xf3\x9am\x16f\xed\x0eZ\xd6\x03~\x1bBwb\xf0;\x8d\x7f\xf3\xc7M\x1fg\xc3\xe3{\xdad\xa4F\xe0\x83\x1e\x89o\x16;\x15o\x10z\x8f\x96{L\xea\x9e\xe8~k\xab\xc5O\xe7\x91#Z+\xc0\x5c\xc4\x0f\xfd:\xbdb!\xf4}\x90\x0a\x091t\x85\xbf\xedQ\xc5\xeb\xe3*\x9b\xe3I\xfe\x0d{\xa6g=\x0d\xa1\x88\xc1i\xb3F{\x1dg\xf5\x96\xaf[\xc6\xdf\xf7\xa3{\xfe\xbew\xe2\xf6\x22\x1e\xd7I\xdc\xce\xeek\xa3\x06\xc7w\xa6/\xef\x02\x1f\xf4K\xd5\x9b\xb3\xde\xbd\xf6\x1d\x13\xfa)|x\xbfl\xb0\xff\xdd\x900Mx\xbb\x9d\xe8\xf3\xc1\x1c\x9fCQ3\x80\xcc|\x1aB\x08B\xf1\x9c\xc4\x0f\xeb\xb4U\x18fnIA\xf2\xbd\x9a\xc7\xf5Y\x0cw\xb7?\xbf\xcc\xea\x0d\xd9>\xe4\xa0io\xbbs\xf8\xa0'\xe2y6\xa97\x90\xe7\x86\xe3VbC\x13\xac\x9f8\xd4\xf6\xee\x92^7!\xec\x0dk\x86\xaeZ\xe7\x0c6\xa8\x179\xb7\x19\xe5\x0dg\xed\x16\x0b(^\xbd\x88\xc0\xf7\x90Y\xc2\xde\xd14\xe7a\x0b|\xd0\x8f\xb0W5\xa3K\x19\x16X~\xe8\x0b=PG\x0b\xbe\x9b\xdb\xb0W\xf7\xcb\x5c\xe5\xb0|\xc3\xa5\xff\xe6=\xa3\xbc\x88\xcf\xa9N \x1e\xb7\xe0}ww\xc9w\x1b\xc2\xdeT\x01[\xe0\x83~8\xac\xe8\x11(\x94\xf9\x80\x95\x84\xbe\xf0\xe1\xfc|A7\x1f\xce\x0f\x1b\xbc\x16\xf6\xaa\xea\xb2\xd59\x0fs\x9c\xd5\xeba\x9c\xfb9\xc1\x0dg\xed\xee\xc4\xd9\xca\xab2X\xf2\xfd\xbd;m\xd8\x13\xf8\xa0\x07\xe2\x09\xbf\xa9\xfan\x97-9\xd7\x05\xd65\xf4\x85\xd3-\xbe\x9c\xd5\xeb\xb9\xaa#\x9c\xff\xf5Ny\xbb\xc3)\xbe\xc8mW\xbc\x9f\x84@Q\xa7\xd7ja\xc5\xc1c\x88<\xa9\xb9\xfb\xb3iK\xdb\xcc\xe1q\x86\xa0\xfd8[|/n\xb8\xfd\xc7\xb3\xbe\x8f\x0b|\xd0}Uo\x02#M\x04+\x0f}!\xc4\x0c\xca\xed`\x86\xe0w\x1e\x83\xde \xb1\xec^\xd5\xd0\xee \x11\xf6\x06Y\xcdU2\xb2\x9bQ\x83\x8b\x056\xd9\xa8A;\x8dWx\x5c/b\xaf\xdbg\xb3\x9bs6O\xe7t\xd3\xe1\xb9?\x8fAo4\x8f\xb6\xce\xcb\x1b\xf1\x97\x08\xd1\xc6W\xde\x9ad\x0d\x96)\x8aN\xaf\xde\xffh\xb8\x8a\xc7\x1bK~\xa4f\xe5\x85!\x97\xa1#\x0b\xed\x12\xffv\xc3\xdf\xe6v\xe2=\xe72\x06\xb8\xf0\xbetlu\x9c\xce\x1c\xdbG\xf1\xb8\xde\x1e\xdfG5>WBP|\x15\x8f\xf5d\x11\x13\xec\x94e\x81n\xd3\xbb\x07\x1d\x14{\xfc\xd4\xc4\xec\xe7\xb1\xfd^pk\xd3\xe32\xa4\x0b\xdd\xfd\x16Yd\xe9\xa9\xfd\x07z\x04\x00\x10\xf8\xa0\xbbao\x90\xa5\x8b(\x87\xf3?L\xd4\x00@\xe0\x83\x0e+\xb2t\xd9\x84}eX\x00\x10\xf8\xa0\xa3b\x05\xfcT\x19\x96\xf3\xc4\x0c>\x00\x04>\xa0\x03\x8a\x8a\xeb\xad\x97\x0b\x80\xc0\x07]\x15\x8b\xa2\xa6\xa6\xf7\x1f\xc5\xf5(\x01@\xe0\x83\x0e\x86\xbdP\xcb)5\x11\xc3z\xb9\x00\x08|\xd0qa\xa865Q\xe3P\x19\x16\x00\x04>\xe8\xa8X\x86\xe5Yb\x97P\x91_\x19\x16\x00\x04>\xe8\xb0\xaa0\xa7\x0c\x0b\x00\x02\x1ftU,\xc3\xb2\x9b\xd8\xe54.\xd3\x04\x00\x02\x1ft\xd4\xb8\xe2zeX\x00\x10\xf8\xa0\xab\xf2<\x0fa.\xb5^n(\xc3r\xa6\xa5\x00\x10\xf8\xa0\x9ba/\x94a)\x12\xbb\x842,z\xf7\x00\x10\xf8\xa0\xc3B\xd8K\x95a)L\xd4\x00@\xe0\x83\x8e\xca\xf3|\xbb\xbcx\x9a\xd8\xe5\xb2\x0c{\xca\xb0\x00 \xf0A\x87U\x85\xb9\x91&\x02@\xe0\x83\x8e\xca\xf3|/K\xaf\x97{j\xbd\x5c\x00\x04>\xe86\xbd{\x00\x08|\xd0Wy\x9e\x17Y\xba\x0c\xcb\x81\xf5r\x01\x10\xf8\xa0\xbbao\x90\xa5\xcb\xac\x842,&j\x00 \xf0A\x87\x15Y\xba\x0c\x8b\xf5r\x01\x10\xf8\xa0\xab\xe2z\xb9O\x12\xbb\x9c\x97ao\xac\xa5\x00\x10\xf8\xa0\xbb\x8a\x8a\xeb\xad\xa8\x01\x80\xc0\x07]\x95\xe7\xf9(K\x97a9R\x86\x05\x00\x81\x0f\xba\x1b\xf6\xea\xac\x97[h)\x00\x04>\xe8\xae0T\x9b*\xc3r\xa8\x0c\x0b\x00\x02\x1ftT,\xc3\xf2,\xb1\xcbe\xa6\x0c\x0b\x00\x02\x1ftZU\x98+\x94a\x01@\xe0\x83\x8e\x8aeXv\x13\xbb\x9c*\xc3\x02\x80\xc0\x07\xddV\xd5\xbb\xa7\x0c\x0b\x00\x02\x1ftU\x9e\xe7!\xccm%v\x09eX\xce\xb4\x14\x00\x02\x1ft3\xec\xd5)\xc3\xa2w\x0f\x00\x81\x0f:,\x84\xbd\xd4z\xb9\x87&j\x00 \xf0AG\xc52,O\x13\xbb\x5c\x96a\xaf\xd0R\x00\x08|\xd0]\xe3\x8a\xebG\x9a\x08\x00\x81\x0f:*\xcf\xf3\xbd,\xbd^\xee\xa9\xf5r\x01\x10\xf8\xa0\xdb\xaa\xca\xb0\x8c4\x11\x00\x02\x1ftT\x9e\xe7E\x96^/\xf7\xb9\xf5r\x01\x10\xf8\xa0\xbba/\x94aI\x95Y\x09eX\x0a-\x05\x80\xc0\x07\xdd\x15\x86rSeX\xf6\x95a\x01@\xe0\x83\x8e\x8a\xeb\xe5>I\xecrn\xbd\x5c\x00\x04>\xe8\xb6\xa2\xe2z+j\x00 \xf0AW\xe5y>\xca\xd2eXN\x94a\x01@\xe0\x83\xee\x86=\xeb\xe5\x02 \xf0A\xcf\x850\x97*\xc3r\xa8\x0c\x0b\x00\x02\x1ftT\x5c/\xf7Yb\x97\xcb\xac\xba\x083\x00\x08|\xd0bUa\xaeP\x86\x05\x00\x81\x0f:*\x96a\xd9M\xecr\xaa\x0c\x0b\x00\x02\x1ft[U\xef\x9e\x89\x1a\x00\xac\xc4\x1b\x9a\x00f\x17\xcb\xb0l%v9\xba\xbe\xbe>\xd3R\x00\xdd\xf3\xf5\xc7_\x18\xc7\x7f\x16_z\xf9\xad\x8b.>\x07=|0\xa3?\xf9\xc7?\x10\xbe8\xa5z\xf7\x94a\x01\xe8\xb6\xe3\xecf\xe5\xa4\x97e\xf8\x9b\x94\xdbH\xe0\x835\xf3\xf8\xdb?6\xc8\xd2\xeb\xe5\x1e\x9a\xa8\x01\xd0]_z\xf9\xad\x10\xf8.\xe3\x7fCQ\xfd\x17e\xe8\xbb(\xb7\xa2\xdc\x1e\x09|\xd0s\x7f\xee\x0f\xdf\xc8~\xf5\x83_\xf9|b\x97\xcb2\xec\x15Z\x0a\xa0\xf3^\x1f\xc9\x09\xf5VC\x19\xae\xef\x86!\xdfr\xdb\x16\xf8\xa0\xa7>\xf7\xf2\x07\xabv1\x94\x0b\xd0\x0f\xe3\xec\xe6\x14\x9d\xfb\x84\xe1\xdeo\x94\xa1\xef\xac\xad\xc3\xbd\x02\x1fL\xe9\xa7\xfe\xe0G\xb3\xd3_\x9e\xa4v\x09eX\x8e\xb5\x14@\xf7}\xe9\xe5\xb7\xc2\xa99U\xef\xe9a\xf2^\x18\xee}\x15\x87{\x07\x02\x1ft\xdc\xef\xfe\xfb\xefT\xed2\xd2J\x00\xbdRw\xa5\xa4p^w\x18\xee\x0d\x93<\x8e\xcbm(\xf0A\x07\xfd\xb5\xef<\xca\xbey\xf6\xeb\xa9]\x9e[/\x17\xa0_\xbe\xf4\xf2[\xa1\xbc\xd6i\xc3_\x0b\x05\xf9?\x8c\x93<F\xab\x9a\xe4!\xf0AC?\xfe\xc7\x9f\xc9\xbe9I\x86\xbdp\x8eG\xa1\xa5\x00zi<\xe5\xef\x85I\x1e/\xca\xed\x22N\xf2\x18\x08|\xd0bo\xfe\xce\x8fg\xdf\xfe\xf6\xb7S\xbbX/\x17\xa0\xa7\xbe\xf4\xf2[!\xf0]\xcep\x13a\xb8\xf7nM\xbf=\x81\x0fZ\xe6/\xfc\xdf\x1f\xca~\xf5\x97\xfeEj\x97\xf32\xec\x1dj)\x80^\x1b\xcf\xe9vBM\xbf\x0f\xe2p\xef\xfe\x22\x87{\x05>h\xe0\xc7.+\xffd\x94a\x01\xe8\xbfy\x7f\xb1\x0f\xc3\xbd\xefe\x0b\xac\xe9'\xf0AM\x7f\xf5\xf7~\xa2\xaa\x0c\xcb\xc9\xf5\xf5\xf5DK\x01\xf4[,\xd1r\xb4\xa0\x9b\xbf\xad\xe97\xd7%\xdc\xdep\xd8\xa0Z\x98\xa8\xf1\xea\x9b\xff3\xb9\xcf?\xfa\xb9\xbf\xf9\x8bm\x98z\x0f\xc0R\x9c\xc5p\xb6(a\xb8w\xa7\xfc\x5c\x09\xbd\x89a\x1b\x97A\xf3b\xda\x1b\xcb\xaf\xaf\xaf\x1d2\x886\xbe\xf2\xd6$\xfe\x91}\x9fP\x86\xe5W\xfe\xd9/?\xf8{\xc5\xde\xcfd?s\xf6\xdf5 \x00\x8bt\x14\x83\xdf\xa4\xe9/\x1a\xd2\x85{\xfc\xe8\xef}\xee{\xff\x0e\xeb\xe5\xa6\xca\xb0\xec\xbc\xf5\xd3\xd9\xdf\xf8\xadok4\x00\x16-\xf4(~x\xbb\x84[\x93I\x1e\x02\x1f\xbc\xe6\xc7~\xeb/g\x8f\xbe\xf1S\xd9\x0f}\xe7'?\xfe\xff\x9f\xf9\xed\x1fI\x96a\xf9\xf2\x9f\xfa\xe1\xec\x87\x7f\xf7\x8f4\x1c\x00\xcb\xf2\xf1\x12n\xd9MM\xbf\xc3:5\xfd\x0c\xe9\xc2\x1d\x9f\xff\xd9_\xf8\x9d\x9f\xf8\xf6\xc6\x9f\xbe\xfd\xff\xe7\xfe\xe2\xcb\xec_\xfd\xe2?\x7fp\xff\x9f\xff\xd9\xb7\xb3_\xf8O\xbf\xad\xe1\x00X\xb5\x93\xecf\xb8\xf7\xde\xf5~M\xda\x80\xe8\xcd\xad\xaf\x8e\x7f\x22\xfb$\xec\x05\x9f\xf9\xed\xab\xe4\xef\xfcl\xf6\xbf4\x1c\x00m\x10\x96p\xdb\xfd\xfa\xe3/\x84\xa2\xd0\xb7\x93<\xbe\xb7\x08\x80\x1e>\x88a/\xbbg\xb6\xd5\xc6\x8f\xfcQ\xf6'~\xf47\xb3\x7f\xf9\xe1\xc9\xa7~\xe7\xef\xff\xdc\xdf\xca\xfe\xee\xaf\xff7\x8d\x07@\x1b\x85\x1e\x8b\xd0\xdbw\x18\xd6\x00\x16\xf8\x10\xf6\x1e\x08{w\xfd\xd9\xcf\xfeA\xf6\x7f~\xff\xdfd\xff\xfa\xdf\xfd\xdb\x8f\xff\xbf\xb99\xc8\xfe\xe9\xc6\x8f8w\x0f\x80.\x04\xbf}\x81\x0fa\xafA\x1d\xa5\xbf\xf3\x93\xdf\xca~\xe9?\x9ee\xef|\xf1\xcf+\xc3\x02@\x9b}\xdf\xd0\xae\xc0\x87\xb0\xd7\xd0;?\xf8\xcd\xec\xaf\x7f\xe775 \x00mt\xef\xe4\x0d\x81\x0fao\x0a\x7f\xef\x0f\x7f=\xfb\xe9\xdf\xff\xaf\x1a\x12\x806\x08\xc3\xb6\xe1s\xed\xf0\xa1\xd58\x04>\x84=\xa1\x0f\x80n:\xcfn\x86m\x8f\xef\xce\xc8\xbd\x8f\xb2,\x08{S\xfa/?\xf0\xb9\xec\xa73\x81\x0f\x80\xa5k\xbc\xc4\x9a\xc0\x87\xb07\x85\xb7\xaf\xffG\xf6\xf3\xbf\xf7k\x1a\x15\x80e\x09\xc3\xb6\xb7\x930.\x9a\xfe\xb2\xc0\x87\xb0\xd7\xd0\xe7\xf3\xff\xfd\xcb?\xff\xbb\xbf\xf65\xad\x0a\xb0\xd6\x86\xe5\xf6l\x09\xf7s\x1aC\xdex\x96\x1b\x11\xf8\x10\xf6\x9a9\xfa\xd5\xb3\xe7#\xad\x0a\xb0\xde\xbe\xfe\xf8\x0b\xfb\x0b\xbe\x8b0l\xfbq\xd1\xe4y\xdc\x98I\x1b\x08{\x0d\xfe\xf8~\xe3\xfck\xc2\x1e\x80\xb07(/^.\xe0\xa6\xef]\x16m\x1e\xf4\xf0!\xec\x09{\x0043\xef\xde\xbd0l{\xf8z\xed\xbcy\xd2\xc3\x87\xb0'\xec\x01P\xd3\xd7\x1f\x7f\xe1QyqQn\x1b3\xde\xd4\xedZ\xb7\xc54\x930\x9a\xd2\xc3\x87\xb0'\xec\x01P\xdf\xde\x8ca/\x0c\xdb\x16Y\x8d\xday\x02\x1f\x08{\x00\xacF1\xe5\xef\x85%\xcf\x0e\x9b\xd4\xce\x13\xf8@\xd8\x03`\xc9\xbe\xfe\xf8\x0b\xc3\xf2b\xb3\xc1\xaf\xccT;O\xe0\x03a\x0f\x80\xe5\xab\xfb\xd9\xf0\xf1\x92g\xb3\xd6\xce\x9b'\x936\x10\xf6\x84=\x00*\xd4,\xc52\xd7\xday\xf3\xa4\x87\x0faO\xd8\x03\xa0\xdaC\x9f\x0fa\x12\xc68\x06\xbdWm}\xf0\x02\x1f\xc2\x9e\xb0\x07@\xb5\xd7k\xef\xcde\xc93\x81\x0f\x84=\x00Z\xe0\xeb\x8f\xbf\x10>#B)\x96\xa5\xd6\xce\x13\xf8\x10\xf6\x84=\x00\x96gXn\xeff\x0bX\xf2lYL\xda@\xd8\x03\x80\x9e\xfb\x8c&@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x04>\x10\xf6\x00@\xe0\x03a\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00@\xe0\x03a\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81O\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\xd3\xaa\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\x08|\xc2\x9e\xb0\x07\x00\x08|\xc2\x9e\xb0\x07\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\xb0\xb6\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x16\x05\xbe2\x9c=\x12\xf6\x00\x00z\x1c\xf8J\x87eH\x1b\x08{\x00\x00=\x0c|\xb1w/\x04\xb4\xd1\x1cnK\xd8\x03\x00h[\xe0+\xed\xc7\xcb\x99\xc2\x95\xb0\x07\x00\xd0\xde\xc0w\x1b\xac6\xcb\xd0\xb6'\xec\x01\x00\xf4(\xf0\xc5\x80\xb7yO\xf8\x13\xf6\x00\x00\xfa\x10\xf8\xee\x09x\xbbM&o\x08{\x00\x00-\x0e|1\xd8\xed\xd6\x08\x81\xc2\x1e\x00@\x17\x03_\x22\xd8U\x06/a\x0f\x00\xa0\xdb\x81/9yC\xd8\x03\x00\xe8@\xe0+C[\x08Y\x9b\x89]\xf6\x85=\x00\x80\x0e\x07\xbe\xacz\xd8v\xe7\xf5\xc9\x1b\xc2\x1e\x00@G\x02_\x0cr;5v\xdd\x17\xf6\x00\x00:\x18\xf8\xb2\x07\x86k\xef1\x12\xf6\x00\x00V'\xbf\xbe\xbe\x9e\xea\x17\xcb\x00\xf7\xaa\xbc\xd8\xa8\xb9\xfby\xb9m\x09{\x00\x00\xcb7U\x0f_\x9c\xac\xb1\xd1\xe0W\x84=\x00\x80.\x05\xbel\x8a\xa5\xd3\x84=\x00\x80\xd5h<\xa4\x1b'k\xbc\x14\xf6\x00\x00\xbaa\x9a\x1e\xbeB\xd8\x03\x00\xe8\x8eF=|on}\xf5Qyq\x915;\x7fO\xd8\x03\x00X\xa1\xa6=|{\xc2\x1e\x00@\xbf\x03\xdf\xfe\x92\x1e\x97\xb0\x07\x00\xb0\xec\xc0\xf7\xe6\xd6W\xb7\xb3\xf9\x95W\x11\xf6\x00\x00\xda\x16\xf8\xb2\xe5\xf4\xee\x09{\x00\x00\xab\x08|q\xb2\xc6\xde2\x1ePy_C\x87\x05\x00`~j\xcd\xd2\x8d+k\xbcX\xe2\xe3\xba,\xb7\xc3r\x1b\xff\xc6\xf9\xd7^9L\x00\x00\x8b\x0f|\x17\xe5\xc5\xe6\x0a\x1e\xdfU\xb9\x1d\x87\xf0W\x06\xbf3\x87\x0b\x00`\x01\x81/\x0e\xb1~\xd8\x82\xc7z\x9e\xdd\xf4\xfa\x1d\xeb\xf5\x03\x00\xa8\xaf\xce9|\xa3\x96<\xd60C8\x0c+_\x94!t\x1cg\x0d\x03\x00P!\xd9\xc3\x17'k|\xb7\xc5\x8f?\xf4\xfa\x15\xbfq\xfe\xb5c\x87\x12\x00\xe0~U=|\xa3\x96\x87\xbdCa\x0f\x00 \xed\x8d\x8a\xeb\xf7[\xf6xM\xe2\x00\x00\x98W\xe0\x8b\x9356[\xf28M\xd8\x00\x00\x98w\xe0\xcb\xda1\x9c{\x94\xe9\xcd\x03\x00\x98\xc9\xbd\x936\xde\xdc\xfa\xea\xa0\xbcx\xb9\xa2\xc7\xa4\xe82\x00\xc0\x1c=\xd4\xc37Z\xc1c9\x8a!o\xe2\xb0\x00\x00\xf4'\xf0\xe9\xcd\x03\x00Xv\xe0{s\xeb\xab{\xd9\xe2'k\x9c\xc4\x90\xa7\xa4\x0a\x00\xc0\xb2\x03_\xb6\xb8\xde\xbd\xd0\x9b7\x8eA\xefB\xd3\x03\x00,\xc7\xf7M\xdaX\xd0d\x8d\xd3L\x81d\x00\x80\x95y\xbd\x87o4\xa7\xdb\x0d\x05\x92\xc71\xe8]hf\x00\x80\xfe\x04\xbe\xd0\x9b\x17\x86l\xc7\x9a\x16\x00\xa0e\x81\xef\xcd\xad\xaf\x86\xb07\xcdd\x0d\xbdy\x00\x00]\x08|Y\xf3\xde=\xcb\x9d\x01\x00t\xc0\xc7\x936\x1aL\xd6\x08\xbdya\xf2\x85\xe5\xce\x00\x00:\xe2\xb6\x87o\xbfb?\xbdy\x00\x00\x1d\x0f|\xa3\x07\xae\xb7\xdc\x19\x00@\xd7\x03_\x9c\xac\xb1q\xe7g\x96;\x03\x00\xe8S\xe0\xcb>\xe9\xdd\xd3\x9b\x07\x00\xd0\xd3\xc0\x17\x02\xde\x9e\xde<\x00\x80~\xfa\xff\x02\x0c\x00\x1eE:\x8bH\x8b\x05S\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x0f\xb6\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x12t\x00\x00\x12t\x01\xdef\x1fx\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x0f3IDATx\x9c\xe5\x9by|TU\x96\xc7\xbf\xaf*KU\xaa\x0a\xb2\x90\xa4*\x8e\xa3\x01\x82c\x98\x86@l\xb7@\xc0fQ\x16\xa1\xb5\x05\x5c\x80\x06\x84\xb0((\xc8\x08\xd2\x0dQTF\xa4\x05Y\x14\x08K\x90\xc8\x9anA\x09\x81\x04ADtD\x0d\x04\xdbFIB@gHU\xc0\x90\xa4\xeaU\xaa*\xa9zw\xfe\xa8\xa4$\x92\x04*\x9b\xd3\xce\xef\xf3y\x9fT\xee;\xf7\x9c\xdf9\xef\xdc\xe5\xdd{\x9fD\x03HMM\x0du:\x9d\xcf\x02#%IJhH\xe6\x9f\x05B\x88|\xe0}`\xcb\xd2\xa5K/\xfc\xfc\xbe\xf4\xf3\x82y\xf3\xe6\xbd\x08<\x13\x1e\x1e\x1e\x1a\x1f\x1fO||<\x00\x9d;wn[\xa6\xad\x8c\xe2\xe2b\x00\xce\x9c9\xc3\x993g\xb8r\xe5J\x05\xb0r\xe9\xd2\xa5/^-\xe7\x0b@jjj\xa8\xcb\xe5Z\x01L\x188p \x03\x06\x0chO\xbem\x8e\xc3\x87\x0f\xf3\xe1\x87\x1f\x02l\x09\x0e\x0e\x9e\xfd\xd2K/U\x00\x04\xd4\x09\xb8\x5c\xae\x15\x1a\x8dfBJJ\x0a&\x93\xe9\x97\xe2\xd9f\x180`\x00\xf1\xf1\xf1\xa4\xa5\xa5Mp:\x9d\x00\x13\x01\xd4\x00\xf3\xe7\xcf\x7f\x16\x98?c\xc6\x8c_\xa5\xf3u0\x18\x0c\xdcv\xdbm\x9c8q\x22\xa1O\x9f>\x95\xc7\x8f\x1f\xff\x5c\x9d\x9a\x9a\x1a\xeav\xbb\xf7\x0c\x1a4H\xd3\xa3G\x8f_\x9ac\x9b\xc3`0 I\x12\xe7\xce\x9d\xbb{\xe0\xc0\x81\xebU.\x97k\x82V\xab\x0dMJJjW\x22\xa2\xa4\x18w\xe6rDIq\xbb\xda\x05HJJB\xab\xd5\x86\xba\x5c\xae\x09\x01\xc0\xc8\xee\xdd\xbb\xa3\xd1h\xda\xc5\xb8p\xc8\xb8?X\x8f\xe7\xabC\x00\xb8\xbf\xc8E}\xc7 \x02FLE\xd2\xea\xdb\x85\x83F\xa3\xa1{\xf7\xee\xe4\xe5\xe5\x8d\x0c\x00\xfa\xd7\x0dum\x8d\x9a\x83[q\x1f\xdb\x03N{\xbdr\xf7\x89\x1c\xdc\xa7\x8f\x13\x90\xfc\x10\x81\x0f\x8co\x17.\xf1\xf1\xf1\xe4\xe5\xe5\xf5W\x01m\xfe\xf4=\x85\xa7\xa9z\xf1\x09\xaa\xb3\xb7\xa2\xc86\x14\xb7B\xc0\xa0\xb1\xf5\xfe*\xb2\x8d\xea\xec\xadT\xbd\xf8\x04\x9e\xc2\xd3m\xca\x07~\xf29\x00 &&\xa6M\x8c(e\x16\x5c\x19K\xf1\x14\xe6\xfb\xca\x02z\xf6!\xe8\x0fO\xa1\x8a0\xe2\xcaJ'p\xc8x\xd4w\x0e\xa6\xfaoo\xe1>}\x1cq\xb9\x04\xc7\x9b\xcf\xa2\x8eK x\xdc<T\x11\xc66\xe1V\xe7s\x00\xb4~\x06\x88*\x19W\xd6\x16\xaa\x0fg\xfa\xca\xd47wE3z&\xean?\xcd\xac\x15\xb7\x02\x80*\xc2\x88&\xe5e<\x05\xf98w\xaf\xc6\xf3\xdfE(\xdf\x9e\xa4f\xc1\x18\x82\x06\x8c\x22x\xf8\x04\xa4\x90\xd6\xed\x1f\xeae@k\xc2uh7\xae\x0f\xd2\x11\x0e\x19\x00)D\x8f\xe6\xd1Y\x04\xdd;\xe4\x1aY\xe1Q\xea\xfd\xaf\xee\x96\x80\xee\xcf\x9b\xa8\xfe\xec\x00\xce\x9d\xab\xbc\x81\xcc\xddE\xf5'\xfb\x09\x1e1\x91\xe0A\xa3[\x9bn\xeb\x05\xc0\xfd\xddI\xec\x1b\x97 ~4\xfb\xca\x82\xef\x1f\x83f\xe4\xa4F\x9f\x9ep+\x0d\x96\x07\xdd;\x84\xc0\x84\xbe8\xdf\xdf\x8c+g\x17\xc2f\xc5\xb1m%\xce\x9c\xdd\xe8&/ \xe0\xdfz\xb7\x16\xed\x96\x07@\xb9l\xa6\xea\xdd7\xa9>y\xccW\x16x{ot)\x7fF\xd5\xa9\xe9Y\xa5\xe2i8\x00\xe0\xcd\x1c\xedc\xb3\x08\x1e4\x0a{\xda+\xd4|{\x12J/b}\xf5)\x82z'\x132\xf6YT\x91-\x9f\xb56;\x00\xc2n\xc3q`'\x8e\xbfm\xf4\x95\xa9\x22M\xe8\xa7-\x220\xfe\xc6\x9ePc\x19p5T\x9dL\x18\x16\xbcE\xcd\x99\x93\xc8\xeb\x16\xa3\x5c6\xe3\xfa\xe2(\xae/\x8e\xa2\xfd\xc3d\xb4C\x1eE\xd2\x19\x9a\xebF\xf3\x02\xe0<\xba\x0f\xf9\x9d\xe5\x08{m;\xd7\x19\xd0=2\x05\xed\xb0\xc7\xea\xc9\xd5\xfc#\xcf\xf7;\xb0{\xe25z\x9a\xca\x80\x9f#0\xbe7a\xab\xf6\xe2\xd8\xbf\x03\xfb_7 \xec6\xec\xbb\xd3\xa8\xda\xbf\x1d\xfd\x1f\xe7\xa0\xe9\xff`s\x5c\xf1/\x00\xd5\xff\xc8CN_\x8e\xfbB\x81\xaf,d\xf8c\xe8FM\xa9\xf7\x14\xaa\xb2v\xe0\xc8\xde\x81\xe7\x92\xb9^}\xcd}\xc3\xd1\x8dJA\x1d\xe5M]q\xe3\xfe\xfb\xa0\x1d\xf6\x18\x9a\xfe\xc3\xb1gn\xa0*k\x07\xc2&c]\xb3\x98\xaa\xac\x9d\xe8'\xce!\xa8\x81@7\x05\x95?\xc2\xe5\x8b\xa6Ss\xbe\x10!$\x02\xe3\x13\x09_\xb6\x0d\xfd\x849\xf5\x9c\xb7\xaeY\x8c-}\x05\xeeR\x0bBH\xf5.\xc7\x91\xfd\x94\xcd\x1d\x8b\xe3\xa3,\x00\x84\xb8f=\xe6\x86 \xe9\x0c\xe8'\xcc!|\xd96\x02\xe3\x13\x11B\xa2\xe6|!\xe5\x8b\xa6\xfb\xad\xcb\xaf\x0c\xa8#\x1c:\xefu4w\xf5\xbb\xe6\xbe\xbcs\x03UG\xf6S\xb7\xce\xa2\x8e4\xa1\x1f3\x99\xaa\xac]\xd4\x5c(@\xa5\xd3\xa3\xc82\x95\xab_F\x1d\x19\xd3\xec\x00\xf8\xc8\xc7v#l\xf1:\x9c'>\xa6b\xe9\xf3\xcd\xd2\xe1W\x06\x08\xe1\xbd\x1ar^\xb1\xdb\xb0\xef\xdb\x85\x10\x10|g?:>\xbd\x10Ig\xa0b\xf5\xcb\xb8/\x99\x89x#\x83\x887\xdeE\x15iB\x08\xa8X\xb5\x18!\x9a\xc5\xf9\x1ah\xee\xea\xe7\xe3\xe6/\xfc\x0b\x80\x22!\x94\x86\x9f\x9a\xe3\xf0~<\xb2\x8cP$\x02n\xe9F\xf0]\xfdp[,\x04\xdcr\x1bB\x91\xa8:\x92\x8d:\xcaD\xc7\x99\x8b\x10\x8a\xe4m\x22\x8d\xe8j\x0e\x9a\xe2\xd6\x14\x9a\xd5\x04\x1a\x82\xe3\xc4'\xbe\xfb\xf6\xc3\xd98N\x1cC\x15e\x22\xe2\xd5\xb7q~\xfe1\xe5+_A\x0a\xd1\xe3<q\xac\xc5\xa9\xef/\xb7\xa6\xe0g\x13\x90\x1a5Tw/\xe4w\xc3q\x97Zp[,\x84\xcdZ\x88J\xa7G\x1deB\x08\x09\xeb\xf6\xcd\xa8#cjS\xb6q]\xcdAs\xf5\xb5Z\x06\xb8K\xcd\x08!\xe1*\xf6\x8e\x12\x1e\xd9N\xd5\xe7\xc7\x08(.\xe4\xca\x9b\xaf\xa2\xbd;\x19\xd7\xdfO\x12\x10\x1b\xe7\xd5\xf5_\x9f\x5c\xa3\xa3\xba\xb8\x10\xc7\xe7\xde\x19e\xf0oz\xa3\xf9M\xafV\xe1\xd6\x14Z-\x00\xea\xc8\x18j,\xa5T\x9f+B{O2!w\xf7\xa5l\xc5\x12\x00t\x03\x87\x121\xfbOTn\xdb\x84uo&\xa1OL\xf4\xe9\x92?\xcc& \xcaD\xe5\xf6\xcd\xb8\xfe~\xea*\x8d\xe9\xb5\xf5\x16\xb4\x98[S\xf03\x00\x8d\xdf\xd3\xdc\xdd\x17\xc7\xd7^\x07\xb4w\xf5E{O2\xaaw\xd3\x91$\x90\x0f\x1d@\x0a1\x10:v\x12\xd6\xbd\x99\x5cY\xbf\xdaW\xafl\xf9\x92\xfa\x84\xa2M\xdeE\x13\xbb\x8c|(\x9b\xc0\xd8\xaet\xf8\xfd\xf5\xdf\x02\x9b;\xa2\xb4Z\x1f\xa0\x1f4\xd4\xd7\xd6\xcb\xd6\xaf\xa6l\xdd*\xdc\xa5\x16Lk\xd2\x09K\x99\x85|\xe8\x00\xa5/-\xc0#\xdb\x11B\xa2\xc3\xe3\x93\x08\x8c\xed\xe6\xd3\xa9\x8e2\x111\xfbO\xdc\x94\x9eI`\xe7\x9f\xca\xcb\xdfMG\x91\xe5\x16qk\x0a\xad6\x0c\xaatz\xa2\x16.A\x0a1\xa0\xd8\xec\xc8\xb9\x07\x09\x88\xed\x06\x02:\xfc~4\xa1OL\xc2y:\x1f\xdd\xc0\xa1\xe8\x06\x0e\xa5\x22c\x0b\x8a\xf0\xea\x0b\x8c\xedF\xcc\x9at\xf4\x83\x86\xa0\xc82\x8e\xfc|\xc2Sf!\x14\x09\xc5f\xc7v\xe8@\x8b\xb85\x05\xbf\x02\xa0\x08\x09\xa5\x89(\x07u\x89\xc3\xf4\xfaJTQ&\x14!\xe1**\xe2\xfb\xf1c\xa8|/\x13k\xeeA\x14!\x11\xf9\xdc\x0bD>\xf7\x02\xc1=z\xe1**\x22\xb8G/\x9c\xb5r\xf6\xcf>\xa1bO&\xe8\x0ctxh\x14\xda{\x92Q\x84\x84\xfd\xb3\xe3-\xe6\xd6\x18Z\xad\x13\xacCP\x978n^\xbb\x89+\x19[\xa8x\xef\xafxlv.\xaf}\xab\xf6\xae\x845\xf7 \xb6\xdc\x83\xb8\xce\x15\xf1/k7\x11\xdc\xa5+\x8a,\xf3\xe3\xda5\x98S\x17\xa2\xd2\xeb\x09}\xe8\x11\x00B\xeeMB\xfe\xf48U\xf9\xd7_$m\xa7y\xc0\x8du6*\xbd\x9eN\xd3\x9f\xe6\x96\x8c\x9d\x04u\xee\x8a\x10\x10\x10mD\xd3#\x81\xd2\xd7_\xa3*?\x9f\xa8\xb9/\x10\xdc\xa5+\x005\xa5\x16$\xbd\x1e!\xc0c\x93\x09\x1f?\x11\x80\xe0.q>\x9b\xaesE\xad\xc2\xed\xe7h\xf5\x0c\xb8\x1a\x81F#\xd1\xff1\x9f\xef\xa7N\xa1\xda\x5c\x8a\xdbf' \xda\x84G\x96)Y\xb4\x10}R\x1f\xaaN\xe7\xa3\xc82\x81F#\x86\xc1C\xb0\xe6\x1c\xa4\xeat>\x9a.]){\xe7\x1d\x9fM\xcfu:\xc2_|&\xd8\x18<\xb2L@\xb4wt\xf0\xd8\xecDN\x7f\x0a\x95\xce\x80\x10\x12\xb6\xe3\x9f\xd2a\xf0\x03\xc4n\xdbA\xec\xb6\x1dt\x18|\xbfw2Ux\x8e\x0b))\xd8\xf3O\xffd\xf3:O\xb7}F\x01?\x8c\xd4X,\x5c\x5c\xb8\x88\x1ff?G@\xb4\xd1W\xd7\xbct\x19*\x9d\x81[\xd3\xd2\x10BB\x9f\xd4\x87@\xa3\x11\x8f,S\x99\x93\x8b\x10\x12\xa5o\xbd\x8d\xb6g\x02]\xb6o\xbf\xcaf\xd3v\xdbi*|}\x99\x1a\x8b\x85\xcb[\xb6Ry0\x87@\xa3\x91\x7f]\xb1\x9c\x90\x84\x9e\x94\xbc\xf6:\x95\x07sp\xdbdnzy\x06\xa2V\x9f\xb3\xb0\x88\x8a\x839T\x1e\xcc\x01\xc0\xd0'\x89\xe8\xa7g\xf8\x82Rg\xf3\xf2\x96w\x88\x99\xff<\x81\xc6\x867J\xdae\x22\x84\x90\xbcW\x03\xf0\xc82\x97\xd3\xb7R4f,U\xa7\xbe&f\xde\xf3t\xdd\xb9\x8d\x90\x84\x9e\x00\x18\x92\x92|\xf5\xed\xa7NS\xf2\x9f\xcb@H\x94\xaeY\x8b\x22\xdb1\xf4\xe9\x03B\xf29\x0fP\xbaf\xad\xaf\x8eb\xb3S4f,%\xaf-\xa3\xc6b\xf1\x8b[Shq'\xe8\x91e\xcav\xbf\xc7\x95\xcc=H\x12D?=\x83\xf0Q\x0f_#g\xe8\x9b\xe4\xab_q \x97 \x93\xb7Y\xdc\x96\xbd\x07\xb5^\xcf\x0f\x0b^D\xdf\xc7\xdb\x1cj,\xa5\x98W\xadE>\xfe) \x11>\xeaa\x8c3\xa7Sq \x97\xcb\xe9[\xb1~\xfc\x19\xe1\xa3\x1e\x22b\xf4\xc3\xa8\xf5\xfaF\xb9\xdd\x08Z\x14\x80\xf2\xec\x5c,\xab\xd7!!\xae!\xd4\xa0\xb1h\xafs\x1d\x1f\xb8\x1fM\x5c\x17\xe4\x93_\xfb\xe4\xab-\xa5\x04\x19\xa3\xb9\xb8\xe4/T\x1c\xc8\xad\xad!a\xe8{/\xc6\x99\xde\xb5\xbe\xd0!\x83\x09\x1d2\x98K\x9b\xb7r%s\x0fe\x99{\x89\x18\xf5\x10Q\x93\xc6\xb5\xef\xcbPyv.\x977o\xa5\xdar\x89\x88\xd1\x0f\x135il\x93\x8e\x03\x5c\xda\xb4\x95js)\x005f\x0b5f\x0b\x81\xd1Q\x98W\xad\xc5Yp\xcewi\xe2\xba\xa0\xd2\xe9\xf1\xc82A\xa6hnZ0\xf7\x1a]Q\x93\xc6\x131\xfaa\xcav\xef\xa1l\xf7\x1e*\xb2s\x9a\xdd\x07\xf8\xb7/\xa0x\xd7\xb1/\xbe\xf2:a\xc3\xee\xe7\xd6I\xe3\x082\xdd\xd8\xee\xad\xfd\xd4i_\xfd\xf2\xfd\xde\x0eO\xad\xd7\xe3,(B\xd7\xab'\xf6\x93\xf9\xc4\xaey\x03\xb5AG\xd1\x1f\xa7\x01`\x9c9\xad\xd1\xc0\xaa\xf5z\xa2&\x8d#t\xc8 ~\x98\x9f\xea\xd3\xed/\xfc\x1c\x06\x95\xab.\xffB.\x84@\x08\x85\xc8I\xe3\xf8\xf7\xcf>$r\xd28\x82\xe3:\x13\xbb\xe6\x0dB\x87\x0eF\x08\x05\xb5AG\xe5\xc7\x9f\x22\x84B\xa01\x8a\x0e\xc9M\x1f\xdb\xa96[\xb8\xb4i+\x8e\x82B\x1f/\x7f\xe1W\x00nym1A\xc6(\x10\x0a\xe5Y\xd9\x14\x8d\x9fL\xe9\xc6-xl\xd7\x7f]U\xebC@(T\xec?P+/|;#5f\x0b\x08\x05M\x5cW\x9c\x85\x85 \x14t\xbd\x1a?\xb0\xe5\xb1\xc9\x94n\xdcB\xd1\xf8\xc9\x94ge\x83P\x082Fq\xcbk\x8b\xfdq\x07\xf0\xb3\x09t\xe8\xd7\x07]\xef\x04~\xdc\x99I\xd9\xaeL<V+\x976l\xa6b_6\xa693\xe9\xd0\xafo\xa3u;\x8dy\x04\xeb\xd1cT\x97\x94\xf0\xfd\xf3\x7f\x02\xa0\xa6\xc4L\xe9\x86t_\x00\x8a\xa7?\x83\xfd\xa4wQ%\xd0\x18\xdd\xa0\x1e\xeb\xc7\x9f`^\xbe\x9a\xea\xda\xa1Pm\xd0\x131f\x14\x9d\x1e\x1d\x85\xda\xe0\xff\x19\x02\xff\xe6\x01\xb5\x06\xa3\xa7L\xa4k\xc6fB\x87>\x80P\x14\x5c%%\x5c\x98\xfb\x02\xc5\xd3f\xe2((l\xb0\x9e\xaew\x82O\xdeq\xf6,\x8e\xb3g\x11B`\xcf;\x89\xe3l\x01BQ\xd0\xf5\xee\x89P\x94\xda\xdf\xf5\x8f(;\x0a\x0a)\x9e6\x93\x0bs_\xc0UR\x82P\x14B\x87>@\xd7\x8c\xcdDO\x99\xd8,\xe7\xa1\x05\xbb\xc3A\xa7. l\xf8\x10.\xa5mB\xce;\x85\xfc\xd5I\x0a\x1f\x9f@\xd8\x83C\x89\x99\xf3\xcc5\xa4\x82L\xd1 \x04\xda8\xef[\xa0.\xb1\x17\xd1)O\x22\xe7\x9d\xa2x\xea\xd3\xe8z\xf7\xf2\x0d5\xdan\xde\xc5S\x8fM\xa6d\xf9J\xca\xf7e\xfb\xf4\xe8\x13{\x11\x95\xf2$\xfa\xc4\x1b_4m\x0c->\x1f\xa0O\xec\x85~\xfd\x1a\xca\xf7\xed\xa74m\x13\xd5%f\xca?\xc8\xc2z\xe4(\x9d\x1e\x1fC\xf4\xd4\xc9>Y\xc7\xd9\x02\x10\x0a\x81%\xe6\xab\xe6\xaf\xde\xfe\xc0y\xf6\xac\xaf_P\x1b\xf4\x94\xae\xdf\xc8\x8f\xdbw\xf9\xde\x04\x83bLD\xa7<I\xd8\x83\xc3ZJ\xdb\x87V;!\x12\xf6\xe00:\xf4O\xe6\xf2\xf6\x9d\x94\xae\xdb\x88\xdbj\xc5\xb2n\x03W>\xd8G\xcc\xdc9t\xbc\xaf\x1f\x1e\xab\x15\xa1(\xc8_~\x89\xc7&\xe3\xba\xf8?\xc8_\xe5\xe1\xb1\xd9\x10\x8a\xc2\xc5e+\x00\x08\x8a1\xf2\xed\xb0\x91T\x97\xfc4\xe5\x8d\x9e6\x99\xc8\xc7\x1fEmh\xfeY\x80\x86\xe0w\x1f\xd0\x14\xd4\x06\x03\xc6\xa9S\xb8}\xff^:\xf6O\x06\xa1P}\xb1\x84\x0b\xb3\xe7rn\xf24<V\xab\xb7\xc76\x99\xf0X\xad\xe8\x13{\xa3K\xec\xe5Mw\xa1\xd4\xabS}\xb1\xc4Wv\xfb\xfe\xbd\x18\xa7Niu\xe7\xa1\x95\x03P\x87\xa0\x18\x13\xb7\xaeXF\x97\x8d\xeb|\xce\xc9_~\x89\xe3\xbb\xef@(DO\x9b\x02B!l\xc4p\x8c\xd3R\x08\x1b1\x1c\x84B\xe5\x91#\xde& \x14\xb4\xdd\xe2\xe8\xb2q\x1d\xb7\xaeXFPL\xdb\x1d\xe0n\xf5SbWC\x7fG\x22\xddvo\xe7\xca\xfb\x1fpq\xe9_\xf0\xd8l\x00\x9c\x9f5\x1b\xa1( \x04\x973\xb6aY\xbb\xde\xfb?\xde,\xbai\xde\x5c\xc2G\x8ehKj>\xb4i\x00\xea\x10>r\x04\x1d\x7fw\x9f\xd7\xd9\xb7\xd7\xe2\xb1V\x02P4\xf1\xc9zr\xc6\x19\xd3\x89\x1c\xf7D\x9b\xa4zch\x93&\xd0\x10\xd4\x06\x03\xc6\x19\xd3\x88\xcf9\x80\xfe\x8eD_\xaa#\x14\xf4w$\x12\x9fs\x00\xe3\x8ci\xed\xea<\xb4S\x06\x5c\x8d\xa0\x9bb\xe8\xbae3\xf2\x17_b~\xebmLO\xcd@\x7f\xe7o\xdb\x9b\x86\x0f\x01\x00f\xb3\xb9\xdd\xbf\x14\xd1\xdf\xf9[\xe2\xeeLoW\x9bW\xa3\xee\xa3*\x15\x80\xc3\xe1\xf8\xc5\x88\xfc\xd2Pi\xb5\xda\x0a\xb3\xd9|}\xc9_\x19j}\xbe\xa0R\xab\xd5G\xf3\xf2\xf2\xae'\xff\xabC\xad\xcfGU\xd1\xd1\xd1+\xcdf\xb3\xafM\xfc\x7f@qq1f\xb3\x19\x95J\xb5R5e\xca\x94\xa3Z\xad\xf6hFF\x06\xb5\xdf\xd3\xfd\xaa\xe1t:\xc9\xc8\xc8@\x08\xb1w\xc9\x92%\xf9*\x80\xe4\xe4\xe4\x89\x1a\x8d\xa6\x22--\xedW\x1d\x04\xa7\xd3IZZ\x1a\x0e\x87#_\xa3\xd1L\x84\xab\xf6\x9b\xbe\xf9\xe6\x9b\x84\xac\xac\xac\x8f\x9cNg\xe8\xf0\xe1\xc3IL\xf4\xef\xcc\xed\xffu\xe4\xe5\xe5\x91\x95\x95U\xe7\xfc}u\x9f\xce\xd6[L\xff\xe8\xa3\x8fn-((H?\x7f\xfe|\xff\xb0\xb00\x12\x13\x13\x89\x8d\x8d\x05\xfe\xb9>\x9ev:\x9d\x94\x94\x94\x00p\xfe\xfcy\xf2\xf2\xf2(//G\x08\xf1\xa6F\xa3y\xa9\xceyhd\xc7\xf1\xd0\xa1C\xfd\xcf\x9c9\xf3LEEE\x7f\x87\xc3\x11\xdaN\xbc\xdb\x0a\x17\x84\x10{\xf1~9~\xe1\xe77\xff\x17\xd7q\x00\x14\xc6\xb0\x7f\x82\x00\x00\x00\x00IEND\xaeB`\x82\x00\x008k\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x007\xf8IDATx\xda\xec\xddOllY~\x17\xf0[\x93\x8e\xf2\x8f\xc4o\xa4\xe1\x9fF\xc4~,\x185\x04\xd9#\x88h\x10#WGH\x84M\x9eGb\x93\x95\xab\x17,\xb2\x18\x9e{\xc5\xec\xde\xb5\xc4bv\xe3\xc7\x08\x09\x89\xc5+\xaf\x88X\xa4\xedeHP\xdb\x1a\x04\x1d\xfe\xa8\xed0R\x18\x02y6\x22L$4\x9a\xe7D\x84\x00I\xcc=\xed\xe3i\xf7\xeb\xaa{\xcf\xad\xbaUu\xef\xad\xcfG\xba\xaa\xf7\x5c\xd7\xf5\xe7\xdcr\xd5\xb7\xce\xb9\xe7w\x06\xb7\xb7\xb7\x19Pn\xe3+o\xed\x15\x17[7\xdf\xf8\xe0Hk\x00\xd05\x9f\xd1\x04\x90\xe4 n\x00 \xf0A\xdfl|\xe5\xad\xad\xe2b\xb7\xd86cO\x1f\x00\x08|\xd03\xf9\x83\x7f\xeb\xe5\x03\xa0s\x06\xce\xe1\x83\xe96\xbe\xf2\xd6\xa3\xe2\xe2*\xfc\xf3\xc1\x8f\x1f\xdf|\xe3\x83+\xad\x03@W\xe8\xe1\x83r{\xaf\x85\xbd \xd7,\x00\x08|\xd0\x1f\x93\xc2\xdd^\xec\xf9\x03\x00\x81\x0f\xba\xac\x08u\xc3\xe2bs\xd2U\xd9]\xcf\x1f\x00\x08|\xd0qe\x134r\xcd\x03@W\x98\xb4\x01\x13\xc4R,/+v{\xfb\xe6\x1b\x1f\x9ci-\x00\xdaN\x0f\x1fL6J\xd8G\x89\x16\x00\x04>\xe8\xb0\x940\xf7$\xf6\x04\x02\x80\xc0\x07]R\x84\xb8Q\xf6\xe9R,\xd3\x8c\xb4\x18\x00\x02\x1ft\xcf\xc1\x82\xf6\x05\x00\x81\x0fV-\x96b\xd9\xae\xf3+\xb1G\x10\x00\x04>\xe8\x88Y\xc2\x9b^>\x00ZMY\x16\x88\xe2\xea\x19\xdf\x9b\xf1\xd7\x95h\x01\xa0\xb5\xf4\xf0\xc1\xc7\xe6\xe9\xa9\x1bi>\x00\x04>h\xbfyB\xdb\xbe\xf5u\x01\x10\xf8\xa0\xc5\xe2\xc4\x8b\xcd9o\xc6\xb9|\x00\x08|\xd0b\xa3\x96\xdc\x06\x00\x08|\xd0\xb4\xb8Z\xc6n\x037\xb5\xa9D\x0b\x00\x02\x1f\xb4S\xde\xe0m\x09|\x00\xb4\x8e\xb2,\xac\xb58\xd1\xe2*K_J-\xc5\xe3\x9bo|p\xa5u\x01h\x0b=|\xac\xbbQ\xc3a/\xc85+\x00\x02\x1f\xb4\xc7\x22f\xd6\xee)\xd1\x02\x80\xc0\x07-P\x84\xb2\xbdl\xfeR,\x13o:s.\x1f\x00\x02\x1f\xb4\xc2\x22C\x99\x9a|\x00\x08|\xb0J\xb1\x14\xcb\x93\x05\xde\xc5f\xecA\x04\x00\x81\x0fVd\x19=p#\xcd\x0c@\x1b(\xcb\xc2\xdaYP)\x96i\x94h\x01`\xe5\xf4\xf0\xb1\x8e\xf6\x96\x14\xf6\x02\xe7\xf2\x01 \xf0\xc1\x0a,3\x84\x8d\x94h\x01@\xe0\x83%*\xc2\xd7\xb0\xb8\xd8^\xe6]fw=\x8a\x00 \xf0\xc1\x92\x8cVp\x9f\x86u\x01X)\x936X\x1b\xb1\x14\xcb\xcb\x15\xdd\xfd\xdb7\xdf\xf8\xe0\xccQ\x00`\x15\xf4\xf0\xb1NFkz\xdf\x00\x08| \xf0-\xc1~\xeca\x04\x00\x81\x0f\x16\xa1\x08[!\xecm\xae\xf8a\x8c\x1c\x09\x00\x04>\xe8w\xd8\x12\xf8\x00\x10\xf8`\x116\xbe\xf2\xd6Nq\xb1\xdb\x82\x87\xb2\x19{\x1a\x01@\xe0\x83\x86\xb5\xa9,\x8a\xc0\x07\xc0\xd2)\xcbB\xaf\xc5U.\xbe\x97\xb8\xfb\xcd\xfd\xaf\xd5\xbc\x9b\xf0{\xaf\xb2\xf4s\x04\xbfx\xf3\x8d\x0f.\x1c\x1d\x00\x96E\x0f\x1f}7J\xd8\xe7\xba\xd8\xde)\xb6\xadb\x9b%\x88]\x14\x01n+\xde\xc6y\xc2\xfe\x0a1\x03 \xf0A\x83\xca\xc2\xd5ivW\x10y\xab\xd8\xc6\xc5\xf6j\x9e;\x8a\xb71,\xfe\xf9\xc5b;.\xd9u\xdf\xfa\xba\x00\x08|\xd0\x80\x22T\x855l_\x1ff\x0d\xc3\xaf\xcf\x8b\xedq\x11\xce\xf6\x16\xb1\xfaE\x18\xae-\xb6Q\xf1\xcf\xcf\x16\xdbav\xd7\x83\xf8\xba\x91#\x04\xc0\xb2\xbc\xa1\x09\xe8\xb1\x87\xbd{\x97\xc5v\x14z\xe1\x96u\xe7\xb1\xc70\x0f[\x9c\x9d\x1b\xb6\xdd\x07\x8f\xed\xc8!\x02`\x19\xf4\xf0\xd1KqU\x8b\x10\xae\xc2\xd0j\x18\xb6\xddYf\xd8\x9b\x10\xfe\xee\x87{\x1f\xc7\xc7\xf4(\xf6@\x02\xc0\xc2\xe9\xe1\xa3\xcf\xc2\xb0\xedU\x9b\x1eP|<\xa3x\x0e\x9f\xf3\xf8\x00\x10\xf8`\xce`\xd5\xe6\xc7\x17\x86{_9R\x00,\x83!]\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00@\xe0\x03\x80%\x19\x0c\x06\x8f\x8am\xa8%@\xe0\x03\xa0\x9fao\xa7\xb8\xb8\xd2\x12 \xf0\x01\xd0\xcf\xb0wP\x5c|Xl\x1bZ\x03\x16\xcbZ\xba\x00,;\xe8=*.\xc6\xc5\xf6\xe4\xfeg\xb7\xb7\xb7gZ\x06\x04>\x00\xfa\x11\xf6\xc2\x10\xeeI\xb1mj\x0dX\x1eC\xba\x00,+\xec\xdd\x0f\xe1\xbe\x1e\xf6\xce\xb5\x0e,\x96\x1e>\x00\x16\x1d\xf4>5\x84\x0b\x08|\x00\xf4'\xec\xa5\x0c\xe1\x9ei)X,C\xba\x00,*\xecM\x1b\xc2\x05\x96L\x0f\x1f\x00M\x07\xbd\xbaC\xb8gZ\x0d\x04>\x00\xba\x13\xf6\xcc\xc2\x85\x162\xa4\x0b@Sao\x94\xdd\xf5\xd6\xd5\x0a{j\xf0\xc1\xe2\xe9\xe1\x03`\xde\xa0\x17\x86p\x8f\x8am_k\x80\xc0\x07@\xff\xc2^\x18\xc2\x1d\x17\xdb\xf6\x8c7\xa1\x06\x1f,\x81!]\x00f\x0d{\xa3\xecn\x08w[k@\xbb\xe9\xe1\x03\xa0n\xd0kr\x08\xf7L\x8b\x82\xc0\x07@\xbb\xc2\xde\xbcC\xb8\xc0\x0a\x18\xd2\x05 5\xec\x8d\xb2\xe6\x87p\xcf\xb4,,\x9e\x1e>\x00\xaa\x82\x9eY\xb8 \xf0\x01\xd0\xe3\xb0\xb7\x95\xdd\x15R^\xc8\x10\xae\x1a|\xb0\x1c\x86t\x01\x98\x16\xf6\xf6\x8a\x8b\x8b\xcc\xf9z \xf0\x01\xd0\xcb\xb0\x17\x86p\xdf+\xb6\x8d\x05\xde\x8d\x1a|\xb0$\x86t\x01x\x18\xf4\xb6\xb2\x05\x0e\xe1\x02\xab\xa1\x87\x0f\x80\xfb\xb0\xb7\xec!\xdc3\xad\x0e\x02\x1f\x00\xcb\x0b{\xcb\x18\xc2\x05V\xc4\x90.\xc0z\x07\xbd\xadluC\xb8g\x8e\x00,\x87\x1e>\x80\xf5\x0d{f\xe1\x82\xc0\x07@\x8f\xc3\xde\xca\x87p\xd5\xe0\x83\xe51\xa4\x0b\xb0^A/\xac\x9a\x11\x82\x96^=X#z\xf8\x00\xd6'\xec\x0d\x8b\x8b\xab\x96\x84=5\xf8@\xe0\x03\xa0\xe1\xb0\x97\x17\x17\xefgf\xe1\xc2Z2\xa4\x0b\xd0\xef\xa0\x17\x86p\xc3,\xdc\xdd\x96=\xb43G\x07\x04>\x00\x9a\xb1\x15\xc3U\x98\x8d\xbb\x13\x7f\xb6\xabY@\xe0\x03\xa0'noo/b\xd8\xfb\x94xN_\xf0\xfa\xe52\x02\xe1\x99\xa3\x03\x02\x1f\x00\x8b\x0f\x83ge\xe1\xeb\xb5@8\xcc\xf4\x0c\x82\xc0\x07@\x7f\x03a\x11\xfe\xae\x16t\xdb\xc0\x12\x98\xa5\x0b@\xa98\xc3wSK\x80\xc0\x07@?\xc3^\x98\xe5{\xd0\xf0\xcd\xaa\xc1\x07\x02\x1f\x00-\x12\x96`+\xab\xddwXl7\x9a\x09\x04>\x00:(N\xda\xd8/\xd9\xe5\xf2\xf6\xf66/.G5o\xfaL\xeb\x82\xc0\x07@;\xe4\x15\xd7\x7f4\xd4[\x84\xbeP\xd8\xf9\xb9\xe6\x02\x81\x0f\x80\x0e\x19\x0c\x06\xa3\xac\xbc\x0c\xcb\xe9\xc3\x99\xb6\xc5\xbfC\xf8\xbbL\xbc\xf93-\x0c\xcb\xa5,\x0b\xc0]\xc0\xd9\xca\xeeV\xa5\x18\x16[\x98\xa8P\xba*E\x11p\x06=n\x8b\xf0\xfc\xf3\x8a\xdd&M\xe4\xd8\xcb\xee\x8a<[\xaf\x17\x04>\x80V\x84\x9a\x9d\xec\xe3\x82\xc2\xc36\x86\x94x\x0e\xdd\xfb\xf1\xbf\xe71L]\x15\xdbY\x5cAcQB\x98++\xc3rX\xdc\xff\xd5\x84\x10|\x15{\x06\xdf+\xbbq5\xf8@\xe0\x03Xt\xc8\x0b\x81d/\xebF]\xb9\xad\x07\xff\xde\xcd\x1e\xf46\x16\xcf\xe5>\x04\x9e4\x19\x00cOgY\x19\x96\xeb\xecn\xe6\xee\xb40wR\xdcF8\x9f\xef\xa9W\x1c\x08|\x00\xcb\x0ay\x8fb\xc0\x0b!f\xbbc\x0f\x7f\xab\xe2\xfa\xef\x87\xc0\xe2y\x86 v\x16\x02`\x9cD1\xab\xaa2,yq\xfb\xaf\xcan \x9c\xcf\x17\xc3\xf5\xa4\xe1p5\xf8`\x05L\xda\x00z\x1b\xf4\xe2\x0a\x11W\xc5\xf6\xa2\x83a/\x18\xd6\xd87\xf4X\x86\x12*\xef\x15\xcf\xfbU\xb1\x8d\x8bm\xaff\x9b\x85\xfb{R\xb2\xcby\x11\xe6\xc6\x897\x17\xee{R}\xbeW^\x9d \xf0\x014\x19\xf4\x9ee\xdd\x9e@\xb05\xe3\xefm<\x08\x7f\xe1\xbc\xba<\x0e\xd5V9\xaa\xb8>O}\x00\xb1\x17pR\xe0\xbc\xf0*\x85\xe53\xa4\x0b\xf4\xd1\xc1\x1cA\xef&\xfbxrD\xd8^\xad0\xa4l6t\x1b!\xf8>+B\xdfi\x08u\x93&M\xc4\xc9\x16e\xbd\xa0\xc7u'[\x84\xfd\x8b\xdb=\x8c\xf7\x7fO\x0f\x1f\x08|\x00\xf3\x09=KE\xc8\x08\xe7\xb0\xed'\xfeJ8\xa7\xec,n\x17U\xe7\xa7-\xd9\xdb\xd9\xc7%b\x86\xf1r\x9e\x1e\xcb0\x5c\xfb$\x9e\xef\x97\xdf\x0f\xcf\xc6\xf3\x1c\x8f*B\xf0\xc1\x8c\xc7#\x8fC\xc5\xf7\xe7\xf3\xe9\xe1\x03\x81\x0f\xa0\x11G%\x81/\x84\x9d\xfb\x99\xad'm~\x12\x0fz\xd4\xbe\xff8\xe3\xd0\xec\xf0\xc16K/`\xf8\x9d\x17q\xe8;O\x08\x92Gs\x06\xe10\xb4{\x15\xefC\x0f\x1f\x08|\x00\x8d\x04\xa5\x8b\x22\xcc\x84U\x1f\x1e\x0eQ\x1e\x17\xdb\xb8\xeb5\xe0b\xfd\xbbq\xdc\x1e\xd6\x13\x1ce\xf5'\xa6\x84\xe0\xf7\x0f\x8b\xed\xf3%\xfb\x5c\xc7\xf5r\xe7y\xcc\xaf\xe2\x04\x92\xf7\x17\x5c?\x10\x98\xc2\xa4\x0d\xa0\xafB/_\x18\x8a\x0c\xe7\x90}\xb6\x08\x1a\xa3>\x16\xfc\x0d\x01\xaa\xd8B\x0f\x5c\x08~\x8f\x8b\xed\xdd,}\x89\xb3\xa0\xaa\xc7\xed\xa0\xa1\xc7y\x16\x1f\x1b \xf0\x014\x16\x84Bo\xde\xa3\xd0;\xd5\xb2\xf3\xf2\x16\xf9\x9c\xaf^\x0b\x7f\xa1\x00\xf2u\xc9\xaf\x84`\xf8\x97J\xae?or\xd8;<6\xafL\x10\xf8\x00h6\xfc\x1d\x14\xdbV\xf1\xdf/\x17\xdb\xe9\x84\xdd>Wq3#-\x09\x02\x1f\x00\xdd\x08\x7fa\xf5\x8dp\x0e\xdd}\xaf_\x18\xea\x0e\xb3\x93\xcb\xce\xdd{>i\xbd\x5c@\xe0\x03\xa0\xdd\xc1\xef\xa3^\xbf\xec\xae\xa0\xf3N\xc9\xae!\x10\xe6Z\x0c\x04>\x00\xbak\xee\xf5r\x01\x81\x0f\x80\x96\x8ae\x5c\xca\x8aR_\x9a\x5c\x01\x02\x1f\x00\xddV\x15\xe6\x0e4\x11\x08|\x00tT\x5c/w\xb7d\x97\xd3>\xd6*\x04\x04>\x80u\x09{a\xbd\xdc\xbcb7\xbd{ \xf0\x01\xd0a!\xcc\x95\xad\xbb{\xa8\x0c\x0b\x08|\x00t\xd4`0\xd8\xca\xca{\xef\xc2j\x1c&j\x80\xc0\x07\xb0\x90 rTl\x17q\xb8\x91\xc5Q\x86\x05\x04>\x80\x95\x84\xbdaq\xf1\xb4\xd8\xb6\x8b\xed*\x96\x0ba1\xed\xfc\xa4d\x97\xb0^\xeex\x01\xf7;\x16\xe6A\xe0\x03x8\x84\x18z\x9f>\x8c\xb3HY\x5c;O\x92/\x22\xecew\xb5\xfeB\x98?\x13\xe6A\xe0\x03\xd6P\x11\x00\x0eb\x18x\xdd\x8b\x18\x16h\xa6\x9dGS\xda\xf9\xdeq\xd3eX\x1e\x84\xbd{B\x1f\x08|\xc0\x1a\x86\x90\xaa\xf2 C\xad\xd4X;\x97\xf5\xee5\xbe^\xee\x84\xb0woC\xe8\x03\x81\x0fX/\x07Y\xf9\x04\x82\x91&ZJ;\x1f-\xa0\x0cKY\xa0\x13\xfa@\xe0\x03\xd6A\xecu*+\x0frl\xa5\x87F\xday\xab\xb8xV\xb2\xcbu\xd1\xce\xf9\x02\xeezXl\x97B\x1f\x08|\xc0z+\xebu\xba\xc9\xac\xf4\xd0\x94q\xc2qh\x5c,\xed\x22\xf4\x81\xc0\x07\xac\xb9Q\xc9uGj\xc1\xcd/\x96a)[/7\x94a9Y\xd4\xfd\xd7\x0c}J\xb6\x80\xc0\x07\xf4,\x88\x84\xb07mi\xaf\xd0\xbbg\xa5\x87f\x8c+\xae_x/\xaa\xd0\x07\x02\x1f\xb0\xbe\xf6J\xae\xd3\xbb\xd7L\xa8\xaeZ/\xf7y\xd1\xce\x17\xcbx,\xf1x\x8eb\x98\x9f&\x94l9q\xe4@\xe0\x03\xfa\x11D\xb6\xb2\xe9\xab=\xe8\xddk\xa6\x8d\xab\xca\xdd4^\x86%!\xf4\x85p9\xac\x08}\xbbj/\x82\xc0\x07\xf4CY\xef\xde\x89\xde\xbdF\xb4r\xbd\xdc\x18\xfa\xaa\x86\x91\xf7c\xef$ \xf0\x01=\x0d|z\xf7\xe6\x14g\xbc\xee\x97\xec\x12\xca\xb0\xac\xac\x9d\xe3Z\xbd\x87\x15\xbb}\xbdx\x1e{\x8e&\x08|@7\xc3H\x18j\x9c6k\xf4rY\xe7\x94\xf5\x5cU\x98\x1b\xad\xfa\x01\xc6\xba\x7f\xa7\x15\xbb\x8d\xe3\xf0? \xf0\x01\x1d3,\xfb\x80\xd7<s\x07\xea\xbd\xac\xbc\x0c\xcbi\x8b\x8aY\x87\xe0Y5s\xf7\xc4\xcc]\x10\xf8\x80\xee)=\x7fO\xf3\xcc\x15\xf6\xaa\xd6\xcb\x0dZsn\x5c\x8d\x99\xbb\x86\xf9A\xe0\x03:f8\xe5\xe7\x97\x0bX\xcbu\xddT\x95a9l[\x1b\xc7!\xfc\xbcb\xb7\xfdX\xb7\x11\x10\xf8\x80\xb6\x8b=P\xd3\x02\xc9\x99\x16\x9a\xabm\xb7\xb2\xf2\xde\xbb\xd6\x96\xbb\x89\x13H\xaa\xce\xe7;\xb2\xfc\x1a\x08|@7\x94}`\x0b|\xf3\xc9\xb3\xf22,\x07-/w3\xca\xca\x87v\xc3s\x1b;\xcc \xf0\x01\xed7\x14\xf8\x9a\x17\xd7\xcb-+\xc3r\x1eK\xa1\xb4\xd6\x83\xf3\xf9\xca8\xc7\x13\x04>\xa0\x03\xa6\xf5\xf0]*\xb6<\x97\xaa\xa1\xda\xbc\x0bO\xa2x\x0d\x84@7ih7\xcc\xe4\xfdb,\xe5\x02\x08|@\xcbM+\xafq\xa5if\x13'3l\x97\xecr\xdc\xa22,)\xc2y\x88\x0f\x87v\xc3D\x93\x1d\xf5\x19A\xe0\x03\xbacZ}8\x1f\xe6\xb3\x85\xbd\xaa2,K_/w^q\x16qxNz\xf5@\xe0\x03:\x1aN2\x81\xafQ\xa17\xacl\xa2\xc6Q\x17K\xdd\x84\x90\xa7W\x0f\x16\xeb\x0dM\x00,\xd0\xdb\xc5\xb6\x15\xb7a\xbc\x0ceZ\x9c\xbfW?@\x87\xb6{V\xb2\xcb\xb5\xde1@\xe0\x03\x96*N\xca8\xd3\x12\x8d\x19W\x5c\x7f\xa0\x89\x80i\x0c\xe9\x02\xb4\x5c,\xc3R\xb6^\xeey\x9c\xf1\x0a \xf0\x01t\xd4\xb8\xe2z\xbd{\x80\xc0\x07\xd0U\x83\xc1\xa0j\xbd\xdcc\x93\x1d\x00\x81\x0f\xa0\xbba/\xcct\xceKv\x09eX\xf4\xee\x01\x02\x1f@\x87\x85\xb0WV\x86%\xb7b\x09 \xf0\x01t\xd4`0\x08\xcb\xd2=-\xd9%\x94a9\xd2R\x80\xc0\x07\xd0]Uan\xa4\x89\x00\x81\x0f\xa0\xa3\x06\x83\xc1^V]\x86\xe5LK\x01\x02\x1f@w\xe9\xdd\x03\x04>\x80\xbe\x1a\x0c\x06yV^\x86\xe5\xb0\x8b\xeb\xe5\x02\x02\x1f\x00\xd9\xf7\xd7\xcb-+\xb3\x12\xca\xb0\x98\xa8\x01\x08|\x00\x1d\x96g\xe5eX\x0e\x94a\x01\x04>\x80\x8e\x8a\xeb\xe5\xee\x97\xecrY\x84\xbd\xb1\x96\x02\x04>\x80\xee\xca+\xae\xb7\xa2\x06 \xf0\x01t\xd5`0\x18e\xe5eX\x8e\x95a\x01\x04>\x80\xee\x86\xbd\xb0^n\xd9D\x8c0Q#\xd7R\x80\xc0\x07\xd0]a\xa8\xb6l\xa2\xc6\x912,\x80\xc0\x07\xd0Q\xb1\x0c\xcb\xb3\x92]\xae3eX\xaa\xdap'\xf6\x92\x02%\xde\xd0\x04\x00+S\x15\xe6\x94a\xf9d\xb8\x1b\x16\x17;q\x0ba\xf9\xfe\xbc\xc7\xb7\x8b\xedL\x0b\x81\xc0\x07\xd0\xc6\xf0\xf2\xa4d\x97\xb0^\xee\x89\x96\xfa\x84\xf7\xa7\xfc|G\xe0\x83r\x86t\x01Vc\x5cq\xbd2,\x13B\xf0\x94\x9f\x1b\xd2\x05\x81\x0f\xa0]\x06\x83A\x08se\xeb\xe5\x862,\x17Z\xeaS\xae\xa6\xfc|\xa8i@\xe0\x03hS\xd8\x0b\xbdQy\xc9.\xa1\x0c\x8b\xde\xbdz\x81\x0f\xa8\xe0\x1c>`\x19!g\x98\xdd\x9dd\x1f\xb6\xfb\x7f\x87\x1e\xae/\xaf\xe1yj!\xec\x95\x95a\xc9M\xd4\xa8\x1d\xf8v5\x0d\x08|\xc0\xea\x9dL\x099\xc3x\xdd\xba\x04\xdf0\xb9\xe0i\xc9.\xd7E\xd8S\x86\xa5~\xe0\x03*\x18\xd2\x05\x96\x15\xf8&\xd9[\xb3v\xa8\x0as#/\x15@\xe0\x03\xfa\x16\xf86c\xafW\xef\x15\xcf3\x84\xdb\xb2\xa1\xc7s\xeb\xe5V\xba*i\xdf\xa1\xe6\x01\x81\x0fX\xa1x\x9e\xde\xcd\x94\xabGk\xd2\x0cz\xf7\xe6\x7f\x1d]i\x05\x10\xf8\x80v\x1b\xafk\xd0\x19\x0c\x06yV^\x86\xe5\xb90\x03\x08|@\x1fL\xeb\xe1\xda(\x02QoC_,\xc3RVf%\xf4|\xe6^\x1e\x80\xc0\x07t^\xec\xc1\x9a\xb6RB\x9f\x03O\x08\xbaeeX\xac\x97\x0b\x08|@\xef\xc2\xcf$\x9b}\xec\xe5\x8b\x13\x09\xf6Kv\xb9,\xc2\xde\xd8\xcb\xa2\xd6\x17\x87\xc1\x94\xedL\xeb\x80\xc0\x07\xb4\xe3\xc3:L\xde\xb8\x9eru\xde\xc3\xa7\x5c\xf5\x9c\xac\xa8\x01\x08|@/M\x0bA\xbd\xea\xe5\x8b\xcf\xa5\xac\x0c\xcb\xa9^)@\xe0\x03z)\x0eaN\xed\xe5\x8b\x93\x1c\xba\x1e\xf6\xac\x97\x0b\x08|\xc0\xda\x9b\x16\x866{\x12\x84\x0e\xb2\xf22,G\xca\xb0\x00\x02\x1f\xd0k\x15\xbd|\xcf\x06\x83\xc1VW\x9f[|\xec\xcfJv\x09\xcf\xdbz\xb9\x80\xc0\x07\xac\x85\xb2\x9e\xbcq\x87\x9fWU\x98\xcb\x95a\x01\x04>`-\xc4\x19\xbb\xd3\xea\xf2\xed\x0e\x06\x83\xce\x0d\xed\xc62,OJv9W\x86\x05\x10\xf8\x80u3*\xb9.\xef\xe0\xd0nU\xef\x9e\x89\x1a\x80\xc0\x07\xac\x978q\xe1p\xca\xd5au\x8aqW\x9eK\xec\x91\xdc.\xd9\xe5\xb8x\xbe\x17\x8e: \xf0\x01\xeb\x18\xfa\xf2\xe2\xe2r\xca\xd5\x9d\x18\xdaU\x86\x05\x10\xf8\x00\xaa\xed\xc5P4\xc9\xd7\x8b@\xb5\xd3\xf2\xc7\x1f\xc2^\xd9z\xb9G&j\x00\x02\x1f\xb0\xd6\xe2\xd0\xee\xa8d\x97q[\x1f{<\xcf\xf0i\xc9.\xd7\xb1\x17\x13@\xe0\x03\xd6>\xf4\x85Y\xbb\xcf\xa7\x5c\xdd\xe6s\xdf\xaa\xc2\xe8\xc8\xd1\x05\x04>\x80\x8fC_8\xcf\xed\xf8\xc1\x8f\xc20\xef;\xc5\xcf[\x19\x9a\x06\x83A\x18\x8a.[/\xf7\xdcz\xb9@\x1b\xbc\xa1\x09\x80\x96\x85\xbeQ\x9c\x04\xb1Ul\xa3\x96\xcfl\xad*\xc32rD\x01\x81\x0f`r\xe8\xdbk\xfbc,Bi\x9e\x95\xaf\x97\xfb\xdcz\xb9@[\x18\xd2\x05\xa8\x1f\xf6B\x0fdY\x99\x950\x14\x9dk)@\xe0\x03\xe8\xae0\x94[V\x86\xe5@\x19\x16@\xe0\x03\xe8\xa8\xb8^\xee~\xc9.\x97\xd6\xcb\x05\x04>\x80n\xcb+\xae\xb7\xa2\x06 \xf0\x01t\xd5`0\x18e\xe5eXN\x95a\x01\x04>\x80\xee\x86\xbd\xaa\xf5r\x03\xbd{\x80\xc0\x07\xd0a!\xcc\x95\x95a9T\x86\x05\x10\xf8\x00:*\xae\x97[\xd6{w\x9dU\x17a\x06\x10\xf8\x00Z\xac\xaa\x0cK\xae\x0c\x0b \xf0\x01tT,\xc3\xf2\xa4d\x97seX\x00\x81\x0f\xa0\xdb\xaa\x86jsM\x04\x08|\x00\x1d\x15\xcb\xb0l\x97\xecr\xac\x0c\x0b \xf0\x01\xb4/\xc4\x8d\x8b\xed\xa2\xd8v*\xf6\x0beX\xcaz\xf7\xc2z\xb9\xca\xb0\x00\x02\x1f@\xcb\xc2\xde(\xbb[\x16-\xf4\xda\x9d\x15\xff/\x0blyV>Q\xe3\xc8D\x0d@\xe0\x03hW\xd8\x0b=z/\x1e\xfc(\x84\xb9\xaf\x17??\x8beW\x1e\xee\x1b\xfe\xff\xb4\xe4\xe6\xae\x8b\xb0\x97kU\xa0+\xde\xd0\x04\xc0\x1a\x84\xbd0<{6\xe5\xea\xb0T\xda\xcbb\x9f\xc3\xec\xe3^\xbbq\xc5M\x1e\xcc\xf88\xf6\xe2m_\x15\xdb\xeb\xbd\x83\x17)?s\xce \xf0\x01L\x16B\xd2F\xc5>\xcfB\x90+B\xd9?\xc9\xca\xd7\xcb\x0deXNfy\x10\xe1\xf7b\x99\x97I\x8fg714N|L\xaf\xfd\xffU\x0c\x8bU?\xbb\xb2:\x08\x08|\x00}\x11\x02\xdaVB\xe8\x0b\xd7\xff\x83\x8a}F\xf3<\x90\x22`\xddO\x18\x09\x8fi\xbb\xa1\xe77),>\x991@\x86UC^\x0f\x81I\xbd\x8f\xe1g\xcek\x04\x81\x0f`%\xc2\xf9vavnv7\xeb\xf6\xc9\x1c7\xf5\xbc\x89\x1e\xb1p\x1b\xb1\xa7\xef$K\xec\xd9[\xa2\xcd\xec\xd3k\x06'?\xc6\x09\x01\xf2|\xc2ng1\x1c\x9exu\x82\xc0\x07\xd0d\xe8\x0bAm/\x06\xad\xf1\x84PS\xe5\xff\x15\xdb\x7fn\xf0\xf1\x84\x9e\xb0a\x0c\xa2\xfb=n\xfaIa\xb1\xaa\xe4\x0d\xd00\xb3t\x81u\x0b~g\xc5\xb6U\xfc3L\xd2\xb8\xa9\xf1\xab?Xl\xff8\xd6\xf0{\xd4\xe0\xe3\x19\x15\x17\xef\xae\xd1!\x08=~CC\xbf \xf0\x01,#\xf8\xe5\xd9\xddy}\xc75\x7f\xf5\xbe\x86\xdfN\x83\x8f%\xf4v\xbdS3\x80vQX\x99D\xd8\x03\x81\x0f`\xa9\xa1\xefU\xeca\xfb\x0f3\x84\xbe\x0f\x8b\xd0\x977\xd5\xdbW<\x8eqq1\xecq\xe8{\x1e\xdb\x1a\x10\xf8\x00\x96+\xae\xbe\xf1Wf\xfc\xf5P\xca%L\xc0\xc8\x1b\x0a}a\xe6k\xe89\xbc\xecY3\xbfS<7\xcb\xd0\x81\xc0\x07\xb0\x92\xb0\x17z\xe7\xe6\x0dk\xa1\x94Kca&N.\x19f\x93g\xb7vM\xe8\xad\xfcr\xec\xbd\x04\x04>\x80\x95\x08A\xadl\xb6\xeeivW\x97\xaeJ\xa3\xe5E\xe2Ps\x08}\xc7\x1dn\xdb\x10\xf6\x86J\xaf\x80\xc0\x07\xb02q\xbd\xdc\xb2\x9e\xb9\x10\xf4FqF\xef;\x15\xc1o!%F:<\x83\xf72\x86\xbd\x0b\xaf4\x10\xf8\x00V)\x84\xb4\xb2\x957\xf2\xfb\xd9\xa4aH2\x06\xbf\xb7\xb3O\xf7\xba].2\xd8tp\x06\xaf\xb0\x07\x02\x1f\xc0\xea\xc5\xe2\xcbe+n\x9cO:\xef,\xd6\xf0\x1b\x15\xff|\x9c}\x5c\xc7o\xe1\x05\x84;4\x83\xf74Sc\x0f\x04>\x80\x96\xa8\x0aiyE\x00\xbb\x0au\xfc\x8a\xed\xd1\xb2&$t`\x06o\xa8\xb1\xb7'\xec\x81\xc0\x07\xb0r\xb1\x0c\xcbvEp9k\xe3co\xf1\x0c\xdew\xd5\xd8\x83v\xb3\x96.\xb0Na\xafj\x0d\xd70dz\xd0\xc2\xc7\x1d\x1e\xf3}\xcfY\xe8\xe9\xcb\x8b\xed\xab\xc5\xf6\xb7[\xf0\xf0\xdeQv\x05\x04>\x806\x09a\xael\xa2\xc6QK\x87$\xc3P\xeen\xcb\x1eS\x08\xc7{m\xed\x0d\x05\x04>`\x0d\xc52,\xcfJv\xb9\x8e\xeb\xeb\xb6\xd1N\xcb\x1e\xcf}\x8d=3q\xa1#\x9c\xc3\x07\xac\x8bq\xc5\xf5m^\xfak\xa3E\x8f%L\x1a\xd9\x11\xf6\xa0[\xf4\xf0\x01\xbd\x17\xcb\xb0\x94\x0d\x89\x9e\xb7uE\x88\xf8\xd8\xdb\x14\xf6\x94]\x81\x0e\xd2\xc3\x07\xac\x83q\xc5\xf5m\xee\xdd{\xd4\x92\xc7q,\xecAw\xe9\xe1\x03zm0\x18T\xad\x97\xfb\xbc\xe5\xc3\x93!`\x9d\xc6\xe0\x17\xb6\xedU\x84=eW@\xe0\x03hk\xd8\x0b\x01)/\xd9\xe5\xa6\xe2\xfa\x95\x8b\xb3`\xcf*\x9e\xe3\xf7\x16\xf8\x10\x94]\x81\x1e0\xa4\x0b\xf4Y\xf2z\xb9\x1d\xb6\xc8\x19\xbc\xc2\x1e\xf4\x84\x1e>\xa0\x97\x06\x83A\x08B\xfb%\xbb\x842,G=x\xaa\x8b\x08|\xca\xae@\xcf\xe8\xe1\x03\xfa\xaa*\xcc\x8dz\xf2<\x9b\x0e|\xc2\x1e\x08|\x00\xed7\x18\x0c\xf6\xb2\xf22,\xa7=Z!\xa2\xc9\xc0\x17\xca\xael\x09{ \xf0\x01\xb4=\xecU\xad\x97\x1b\x1c\xf4\xe8)75kW\x8d=\x10\xf8\x00:\xa3\xaa\x0c\xcba\x11j\xaez\x12n\x87\x0d\xddT(\xbb\xb2#\xec\x81\xc0\x07\xd0\x85\x00\xb4\x95\x95\xf7\xde\x85\xf3\xd3\x8ez\xf4\x94\x9b\x18\xce}\xae\xc6\x1e\xf4\x9fY\xba@\x9f\xe4Yy\x19\x96\x83\x9e\xf5bm\xcd\xf9\xfb\xca\xae\xc0\x9a\xd0\xc3\x07\xf4B\x1c\xde,+\xc3r\xde\xc3p3k\x0f\xdf\x8d\xb0\x07\xebE\x0f\x1f\xd0\x17UC\xb5y\x0f\x9f\xf3\xee\x8caO\xd9\x15X3z\xf8\x80\xce\x1b\x0c\x06\xa3\xac|\xb6\xeaq\x8f\xca\xb0\xdc?\xe7Yz\xf7\xae\x85=XOz\xf8\x80\xae\x07\x9f\xaa2,\xad_/wFu\x03\x9f\xb2+\xb0\xc6\xf4\xf0\x01]\x17f\xe5\x96M\xd48\xeaK\x19\x96\xd7l\xd5\xd8\xf7T\xd8\x83\xf5\xa6\x87\x0f\xe8\xacX\x86\xe5Y\xc9.a\xbd\xdc\xbc\xa7O\x7f\x98\xb8\xdf\xb1\xb2+\x80\x1e>\xa0\xcb\xc6\x15\xd7\x1f\xf4\xf8\xb9\xa7\x0c\xe9\x1e\x0a{@\xa0\x87\x0f\xe8\xa4X\x86\xa5l\x96j(\xc3r\xd2\xd3\xe7\xbe\x95\x95\x0fc\x07\xca\xae\x00\x02\x1f\xd0yUaf]{\xf7\xc2$\x95Q_\xc3. \xf0\x01kb0\x18T\xad\x97\xfb\xbc\xe7\xa5GvJ\xc2\x9e\xb2+\xc0\xa78\x87\x0f\xe8Z\xd8\x0beX\xf2\x92]\xfaZ\x86\xe5\xa1\xe1\x84\x9f]\x0a{\xc04z\xf8\x80\xae\x09a\xae\xec\xfc\xb5|\x0d\xca\x8flM\x09{\xca\xae\x00\x13\xe9\xe1\x03:#\xae.\xf1\xb4d\x97P\x86\xe5\xa8\xe7m\x10z8\x1f\x0eg\x1f\x0b{@\x15=|@\x97T\x85\xb9\xd1\x1a\xb4\xc1\xc3\xf3\xf7\xd4\xd8\x03\x92\xe8\xe1\x03:a0\x18\xece\xd5eX\xce\xd6\xa0)\x86\xf1\xf2]a\x0fH\xa5\x87\x0f\xe8\x0a\xbd{wB\x0f\x9f\x1a{\x80\xc0\x07\xf4\xcb`0\xc8\xb3\xf22,\x87=]/w\x92\x835z\xae@C\x0c\xe9\x02m\x0f{[Yy\x11\xe5P\x86\xe5h]\xdaC\xd8\x03\x04>\xa0\x8f\xf2\xac\xbc\x0c\xcb\x81\x19\xaa\x00\x02\x1f\xd0Qq\xbd\xdc\xfd\x92].\x9d\xcb\x06 \xf0\x01\xdd\x96W\x5c\x7f\xa0\x89\x00\x04>\xa0\xa3\x06\x83\xc1(+/\xc3r\xbc&eX\x00\x04>\xa0\x97a/\xac&Q6\x11c\x1d\xd6\xcb\x05\x10\xf8\x80^\x0bC\xb5e\x135\x8e\xccV\x05\x10\xf8\x80\x8e\x8aeX\x9e\x95\xecr\x9d\xadQ\x19\x16\x00\x81\x0f\xe8\xa3\xaa0\xa7\x0c\x0b\x80\xc0\x07tU,\xc3\xf2\xa4d\x97\xb0^\xee\x89\x96\x02\x10\xf8\x80\xee\x1aW\x5c\xaf\x0c\x0b\x80\xc0\x07t\xd5`0\x08a\xael\xbd\xdcP\x86\xe5BK\x01\x08|@7\xc3^(\xc3\x92\x97\xec\x12\xca\xb0\xe8\xdd\x03\x10\xf8\x80\x0e\x0ba\xaf\xac\x0cKn\xa2\x06\x80\xc0\x07t\xd4`0\xd8).\x9e\x96\xecr]\x84=eX\x00\x04>\xa0\xc3\xaa\xc2\xdcH\x13\x01\x08|@G\x0d\x06\x83\xbd\xac|\xbd\xdcs\xeb\xe5\x02\x08|@\xb7\xe9\xdd\x03\x10\xf8\x80\xbe\x1a\x0c\x06yV^\x86\xe5\xd0z\xb9\x00\x02\x1f\xd0\xdd\xb0\xb7\x95\x95\x97Y\x09eXL\xd4\x00\x10\xf8\x80\x0e\xcb\xb3\xf22,\xd6\xcb\x05\x10\xf8\x80\xae\x8a\xeb\xe5\xee\x97\xecrY\x84\xbd\xb1\x96\x02\x10\xf8\x80\xee\xca+\xae\xb7\xa2\x06\x80\xc0\x07t\xd5`0\x18e\xe5eXN\x95a\x01\x10\xf8\x80\xee\x86=\xeb\xe5\x02\x08|@\xcf\x850WV\x86\xe5H\x19\x16\x00\x81\x0f\xe8\xa8X\x86\xe5Y\xc9.\xd7\x992,\x00\x02\x1f\xd0iUa.W\x86\x05@\xe0\x03:*\x96ayR\xb2\xcb\xb92,\x00\x02\x1f\xd0mU\xbd{\xbd\x9e\xa8Q\x04\xde\xbdb\xbbM\xdc\x8eV\xf88\x0fj<\xce\x91\x975\x1d\xfd{\x1c\x15\xdb\xc5k\xaf\xe7\x93\xf8\xc5T\xe0\x03\x985D\x14\x17\xdb%\xbb\x1c\xdf\xde\xde^\xf4\xb9\x0d\x8a\xe7wR\x5c\x9c'\xee\xfe4\x9e\xef\xb8\xec\xe3T5\x83\xfa!=\xb2t\xf5\xfd(\xbcn_LxO\x0a#\x10\xef\xc7\xf5\xbd\x05>\x80\x86C\xc4:\x95a\x19\xd5\xd8w\x15a*\x1c\xa7\x8d\x05<\x17h\xcb\xfbQx\x8d\xefW\xec\xf6\xac\xcf=}\x02\x1f\xb0\xaa\x10q\xb4.\x135b\xb9\x99\xc3\xc4\xddw\x97\xf9\xa1S\xdc\xd7Nq\xf14q\xf7C\xa5s\xe8\xa8\xd4/\x97\xb9\xc0\x07\x90\x1e\x22\xb6*B\xc4u\x11\x1c\xf25k\x96p~\xdeu\x8d}\x97\xf9\xb8R\x5c\xae\xe11\xa3\x1f\xefG\xc3,\xbd\x07{W\xe0\x03H7\xae\xb8~\xb4n\x0d\x12{3S{\x19\xb6\x9711\x22L(\xa9\xf1\x01g\x15\x14\x10\xf8\x00\x92C\xc4\xf9\xba\xae\x97[s\x02\xc7Q<\x0frQ\xc7)\xdcvj\xef\xdesk\x1c\x83\xc0\x07\xf0\x89\xa0Rq\xfdh\xcd\xdb'\xf5\xf9\x87!\xa8E\xf6\xaaU-uw/L\xae\xc9\xbd\xac\xe9\xb0\xab\x1a\xfb\xde\x08|\x00\x15\xe2L\xb8\xb2\x10\xf1|\xddO\xfa\xaf9\x81\xe3\xd9\x22\xca\xb4\xc4\xdbL\x0d\x93#\xab\xa0\xd0\x83\xbf\xb9\xe4\x9eu\x81\x0f\xa0<D<\xaa\x08\x11z\x8a>\xf9\xa1\x92:\x81#_\xd0\xfd\xa7\x9c\xc4~\x1e\x87\xa1\xa1\xebFYu\xef\xdd\xa5\xc0\x070\x7f\x888\xd0St'\xb6Cj\x90\xdbo\xb2LK\xc2Rw\x0f\x03\xfa\xc8\xd1\xa2'\x7fsW\xc5Ex\xedO\xeb\xe9;\x0e\xd7\xf7\xf9=\xea\x0d/\x03\xa0\xa1\x10QV\xd4\xf4\xd2\xea\x0c\x9f\xfa\x00\x1a\xc7\x99\xb8)\xb3d\xf3\xf8a\xd5\x84\xd4\xe3p\xa4\xe6\x1e=\xfb\x9b\x0b\xab\xfa\x0cc\xed\xc9\xb0m\x15\xdbY\xb1]\xac\xc3\x97Q\x81\x0fhB^q\xbd\x92\x1e\xd3\xdb\xe5\xc3\x84\xfdB1\xe6\xd1\xbc\xa19.u\x972QC\xcd=\xfa\x1e\xfc.\xd6\xedy\x1b\xd2\x05\xe6\x92\xd0Ku\xaa\xa4G\xe9\x07\xcf\xf3\xd4P=O\x99\x96\x9a\xeb\xe5\x0a\xe8 \xf0\x01$\x87\x88uZ/wVy\x96V\x0abs\xce\xb6L\x9d\xa8\xa1\xe6\x1e\x08|\x00\x9fP5D\xe8<\xb0\x0a5W\xe08\x98\xa5\x97/\x9e\xb3\xb4\x9f\xb0\xab\x99\xd4 \xf0\x01|\x22Dl\x15\x17\xcfJv\x09eG\x8e\xb4TR\xe8\x1bgiu\xc26fl\xd3\xd4\xdfQs\x0f\x04>\x80Z!\x22\x17\x1ejI\xed\xe5\xdb\x8f=v\xa9\xc1|\x94\xa5\xcd\x04Vs\x0f\x04>\x80O\x84\x88aV^\xcb\xed\x5c\x19\x96zjN\xe08J<N\xa9\x135\xd4\xdc\x83\xd5\xbd\x9fn\x85\xf7\xd4&\xebmN\xa2,\x0b0\x8b\xca\xde=M4\x93<\x06\xaf\xaa\xc9\x15\xa1L\xcb^B\x8f\x5cj\x19\x16\xe7Z\xae\xee\xc3>\x84\xf2\xf0A\x7f_\x1b.\xfc\x7f\xb7\x22\x9c_<\xd8\xce\x1c\xbbN\x1d\xeb\xbdx\xbc\xb7&\x1d\xe7b\x9fi\xc7\xf9d\xde\x11\x13\x81\x0f\xa8\xfb\xa6\x15\x02\xc9v\xc9.\xc7fy\xce&\xbc\xa1\xc7Zy/\x12C\xf7I\xc5\x87K\xca0\xb1\x9a{\xcb\xff\x1b\xda\x8a\xc1~\xaf\xe2oi\x92\x8d\x18\x14v\x1f\xdc^8\xffs\xdcD\xafz\xecez?q\xf7//\xea4\x80\xf8\xfa\xbd\xca\xd2f\x96W>\x8e\xd4\xe7U\xdc\xce`\x01\xcfe\x18\x8f\xf7\xfe\x1c\xc7\xf9Eq;\xf7K\xbf\xcd\x14\xfe\x0c\xe9\x02u\xdf\x84\x8f*z\x1f\x94a\x99/\xf4\x85\x0f\xed\x94\x09\x1c\x9b1\x1c\x96\x05\xc2\x94\x0fK\xc7k\x89_\x96\x8a-\xf4\xd6\xbc\xcc\xee&<m7t\xd3\xbb1\x10\x5c\xcd;,\x18\xbf\xac\xa5\x9eZ0\x9e\xa76d\xd5mg\xe9e\x84NZz\xbc\xc3P\xedY\x0c\x9a\xfb\x0d\xdc\xe4v\xfc2\x18\x8es\xed\xba\x9c\x02\x1fPG^\xf1&|d\xa2Fc\xed\x9c\xb4\xdf\xa47\xfd\xd8\x83\x94\xf2\x01\xa3\xe6\xde\xf2\x82\xdeU\xfc\xb0\xde^\xe0]\x85\xe1\xfb\xf7\x8b\xfb\x9akv|\xf1\x9a\x08_\x02.\x13v\x0d\xef\x05'\x0bh\xaf\xd0\xf3\x99\xb2\xde\xf3e\xd6\xd2\xd3G\xe2H\xc8E\x966a\xaa\xae\x8d\xf8\x85a\xa7\xce/\x09|@\xf2\xb7\xd5\xe2\xe2i\xc9.\xd7\x86\x06\x9b\x11C\xd8q\xe2\x1b\xff\xc1\x8c\x81Q\xcd\xbd\xe5\xfc\xdd\x1c\xc5\xa0\xb7\xb9\xc4\xbb}\x1az\x96\xe6\xec}\x1b%\xee\xb7[\xd1\xd3\x5c\xb7\xbd\xc2c\x1e\xa7>\xc66~\xc1|p\xcc7\x16x77u\xbf\xac\x09|@\xaa\xaa7aC\x83\xcd:\xc8\xd2V\xe08\x88a\xfc\xfe\xc3f\x98\xa5\xf5\xee\xa9\xb9\xb7\x1c\xabZ\xb3u\xb7Fp\x9a\xf4\xa5#<\xeew\x13w\xcf\x1f\xbe\x06\x1bx\x9fI\x09J\xef\xc6\xc7\xd8\xb6\xb0\x97W|1nJ\xed\x9eU\x81\x0fHy\x13\x1bf\xe5C\x13j\xb85,\x86\xb1<a\xd7\x8d\xd7\xf6K\xf9\x9dS\xc7ki\xe6i\xe7\xcb9\xef\xfbI\x0c \xb3\xbe\x06COUjA\xf0\xf1\xbc\x0dUc(\xf7<>\xb6\xb6\xbdO\x86\xc7\xff\xac\xad\xaf+\x81\x0fH\xfd\xd6]f\xa4\x89\x16\x12\xfa\x8e\x12?\xf4\xf7\xefkye\xd5\xe7\x0c\x99X\xb3\xfc\xe0~\x5cq<N\xb3\xbb\xde\xb4\xb7\x8b\xed\xb3a\xa6h\xdcv\xee\xff]\xfc\xfcq\xb1}9\xde\xd6M\x8d\x87\xf0\xacN\xa1\xee)\x7f\xdb)\xf77\xd7\xd0n\x8d\xa1\xdcV\xd6\x8cL\x98\xd0\xf6\xa9\xd0z\x7f\xcc\x1f\x1c\xef\xfb\xe3\xfcv\xbcn\xda\xb1\xbe\x99\xe5\x0b\x9b\xb2,@\xd5\x1bY\xe8!(;\xff\xe8\xb9:`\x0b\x15>DS\xcad\x84\xe3\x94r\xceV\xeex-]\xf8p\xde\x7f-\xb4\x84\x9f\x1d\xa5\x0eK\xc6c\x16\xb6\x93\x07%wR{\x93B\x10\x19\xce\x18X\xaf\xe2\x04\x84\xf7R^[\xc5\xbe'3\xbe\xbeB\xd8K\x19\xca\x1d\xb5\xf4\xf5\x9bZ\xf32|\x81;\x98v\xfe\xdd\x83\xe3|\xf6\xe0=8\xf4\x1c\xee=x\x0d\x8dgy\x80z\xf8\x80\xaao\xad\x07\x15\xdf\xb6s-\xb585&p\x84\x0f\x83\xaa\xe1\xb0\xcb6\x0e\x85\xad\xc11<\x89\x7f+a;,\xb6\xad\xe2g\xa3Y\xcfA\x0b\xbd\x86q\x82\xd4\xdbYz\xef\xdb\xce\x9c\x8f\xff4a\xd7\x99\x86vk\x0c\xe5\x1e\xb7\xf8T\x84Qb\xd8\x1b\xd6\x9dl\x11\x9esx\xbddw\xbd\x7f\xa7\x02\x1f\xb0\x08U\xb5\xdc\xac\x97\xbb\xbc\xde\x83\x9b\x06ng\xa4)W\x1a\x08B\xd0k\xeco&\x06\x87Q\x8d\xd7\xd0\xbc\x8f\xbf\xf1\xa1\xdd\x1aC\xb9\xd7YKOE\x88\xa7RT\xf5\xee\xdd\xc4\xb0\xf7j\x8e\xe3}Ul{\xb3~Q\x10\xf8\x80iob\xa1G\xa0l\xb6\xa7\xde\xa2%\xa91\x81\xa3\xcc\xf36\xcej\x5c\xa3cx\xb2\x88/G\xb1\xc7+\xa5\x07x\xaf\x81\xd7`\xeam\xd4\x99\xb5;\xce\xd2\x86r\xf7Z\xfc\xe5r\x98\xf2<W\xfd\xf8\x05>`\x9a\xaa0\xe7\xc4\xff\xe5\x06\x86\xd4\x09\x1c\x93\x5cg\x86\xde\xfb,\xe5\xd8n,q\x15\x8e\xa4\xa1\xdd\xf8xR\x86r\x0f[\xfee%\xa5]W>\x14-\xf0\x01\x93\xde\x88GY\xf9l\xcfS+4\xac\xc4\xac!\xfb\xc0\xd0{\xaf\xbf\x0c\x5cei\xe7\xd8\x0d\x1b\xb8\xbb<\xf1\x8bG\xe9\xd0n\x8d\xa1\xdc\xf3\x9e\x14t_y`\x15\xf8\x80Io\xc4Uo\xb0z\xf7V\xf3\xc1\x1eB\xf6q\xcd_Sso=\xa4|\x01\xdbi\xe05\x18\xbe8\x8cR\xc3a\xc9j\x1f)\xb3Z[Y\x82eF;\xab~\x00\x02\x1fP\xf7\x8d\xf8PY\x8f\x95\xcak\xee\xefX\xad\x87\x94\x1e\xa4GM\xdcQ\x1c^=L\xd8u\xe2\xd0n<?8\xa5\xa4\xcc\xa8G\xef5[\x02\x1f\xd0\x1a\xf1D\xeb\xb2\xde\xbbp.\x98\x89\x1a\xab\x0f\xe4u<\x9d\xf7\xdc-:!e\xc8~\xb7\xa9;\x8b\xc3\xac)\xabp<\x99\xf0\xfaKy\x0f9\xeeY\xcft\xbe\xea\x07 \xf0\x01\xaf\xbf\x11+\xc3\xd2\xde@\x1ezF\x9e\xcex\x5c\xe9\xb1\x15Mj\x18ei\xa5Z\xc6\x0f^\xc3\xa3\x84\xe0\xd9\xda\x12,S\xa4\xb4\xfdf\xf1\xdc\xc7\x02\x1f\xd0\x860\x11\xbe\x85\x97\xcd\x98\x0b'O\x8f\xb5\xd4\xca\x03\xf9,\xb6\xe7Y\xf6\x0a\xa6\x84\xcc\xab\xc4`\x16\xc2N^c\xf9\xb1\xbd\x8e}\xb1<K\xdc/,\x818.9\xafq\xa1,\xad\x06\xa4\x86\x89\x5c\x13\xad4\x90\x8f\xb2\xf9\x86\xe4\xc2\x07\xee\xb8-\x1f\xa4\xc5c9[\xc2\xdd\x5c\x14\xcf\xf7\xa0\xa3\xc7;\x84\x82\x9d\xb8\x85\x7f\x0f\xe3U[Y\xda\x12^\xcb\x0a}\xe3\xc4\x952\x0e\xe2c\xaf\xaa\xb9w\xd8\xb5z\x91a\xe8\xb9h\x83\xeb\xc4\xe3\x12j\x9b\x0eC\x00^\xf6\x17h\x81\x0f\xb8\x0f\x13\xdb%\xbb\x1c+\xc3\xb2\xf2\x0f\xffy\x87e7\xe2m\x8cZ\xf2\xb4v\x1d\xd9O\x1d\xe3\xbd\x18\xec\x86m\x0au\x09\xc2k\xea\xaa\x22\xccmd\xe5\x85\xdc\x83.\x97`\x09\x8f\xfbE\xe2\xbe\xe1\xd8\xbe\x88\xeb\x94\x87\xedd\x19_\xc4\x0c\xe9\x82\x0f\x9a\xaa0q\x93)\xc3\xd2\x86\x0f\x93\x8d\x06ng\xdf\x04\x8e\xf6}\xd9*\xb609\xe1{10\xecw,\xec\xd5]\x85\xa3\xec}f\xd4\xd5\xe3\x18{\xeb\xcek\xfe\xdaf<\xe6Wq\xa8w\xa1\xa5[\x04>\xa0*L\x1c\x99\xa8\xb1\xd2@\x902Q#|X\xbe\x9bx\x93c\xad\xba\xfa/Y\xf1\x9c\xb6W\xf1\x03\xffI\xd7\x9fS\x8dU8\xa69\xe8A\x09\x96\x10zgY\x0d\xe7\xbe\xf7\xf3\xc3\xe25q\x11\xbf\x044~\x9e\x9f\xc0\x07\xeb\xfd\xc1\xb3U\x11&\xae{R\xe5\xbe\xcbR\x86r\x8fj,\xbd\xb6\x19\x87\x92X\xcd\xdf\x5c\xe8-\x0f\xc1\xe6Y\xd6L\xafm\xdb\xbe<\xce\x12xN\xfb0!,~1\x1ef\xb3/\x81\x18lg\x1f\xf7\xfa\xe5M\x06?\x81\x0f\xd6[\xd5\x9b\xac\xa1\xdc\xd5\x86\x83QV}\xae\xdb\xcd\x83P\x98z\xbc\x0ej,nO3\xc7\xf2Q\x9c\xa8\xf2\xf5\x1e\x06\xbd\x87\x81gT\xf3\xd7\xae\xb3\xfe\xac\xa6\xf10\xf4\x9d\xcfyS\x1b\xf1K\xc1US_\xd0\x04>X\xdf\x0f\xa0aE\x988\xb7$\xd7j\x03B\x96\xde\xbb\xf7*~\xd8\x84@\x91\xb2\xa6\xeaF\xa66\xdf2\x8fe\x18\x96\xbf\xca\xe6\x9b\xa8r\x19CD\x186=|\xb0\xbd\xfd`\xbb\x5c\xf5s\x8d3l\xeb,\xffw\xd1\xb7SF\xc2\xf3)\xb6a\x96\xb6\x1aIR\xf0\x8bC\xbds\x9d\xe3g\x96.\xac\xafq\xc5\xf5z\xf7V\xab\xaa\x08vp3!\xb8\x85\xe3\x96rN\xd8G+ \xacj\xf6uq\xbf\x835\x0a{gY\xfd^\xbd\x10\xeeNb :K\xbc\xafW-y\xbe\xfb5~%\xbc\x0e\xf7\xfa\xf8\xe52\x9c\x0e\x13\x8b-\x1fe\xf3\x9f\xa7\x19\x86z\xcf\xc2)\x01\xb3\x0e\x7f\xeb\xe1\x83\xf5\xecq\xa8Z/\xf7y\xd7ja\xf5\xec\xf8\x0c\x13?4?\xb5\xf2I<\xf1=\xb5ga\xac\xb5\x17z\x1c\x1f\xd5\x0c{7\xf1\xd8=\x0e=D\xe1\xbc\xcc.\x95C\x8a\xcfw\x96\xe06\xee\xeb)\x06\xe1\xef\xb1\xd8\xc2d\x8e\xd0\x03{<\xe7\xcd\x85\xd7\xd1\x8bx\xaa\x87\xc0\x07$\xbd)\xe7\x15\x1f:\xb9\x96Z\xa9\x94\xe1\xd6\xeb8Qc\xda\xef_'\xdc\x86\x09\x1c\x8buR#\xec\x85\xa1\xda\xad\xd0+\xd4\xe1\xd9\xaaG\xd9l%e6\xfa\xfe\xe5#\x04\xf7b\x0bA\xedq\x0c\xf5\xd7s\xdc\xdc\x8bX\xecZ\xe0\x03*\xdf\x94\xad\x97\xdb\xde@\x1ez_\xb7\x13v=(\xf9py\x95\xa5\x0f\xc9?3\x81c!\xc71|\xb8\xa7\x9c\xb3\x17\xbe`\xbd\x1dV\x04\xe9\xf2\xdf]\x0c \xfbs\xdc\xc4\xee:,\xff\x17{\xfc\xc2{l\xf8\x9b\xfbbv\xd7\xebw3\xc3M\xd5\xee\x15\x15\xf8`\xbd>\x84\xaa\xce\xaf)\xeb5b\xf1\xc7\xa7\xaa\xf7\xf5^\xe5\x84\x9ax}\xeaL\xc1\xb1\xd6o\x5c\x9e\xb8\xdf\xb0\xeb\xab\xd8\xc4\xd7m\x13\xaf\xa1|\x9d\xbe|\x84\xd3fb\xaf_x\xce\x875\x83\xdfFVs$F\xe0\x83\xf5R\x15\xe6F\x9ah\xe5\xc7'e\x080\xf5\x8d>\xf5x\xee\xce2D\xc4\xd4\x00\x14\xda2eh\xf3\xb0'\xe7\xca\xa6\x0c]\xa7\xce\x1e_\xbb\xca\x00qVo\x1e\x83_\x9d\xf3\xfc\xf6\xeb\x04d\x81\x0f\xd6\xebC\xa8l\x88\xe9\xd4z\xb9+=>\xc3,mH,y]\xe3\x9a\x138\x8e\x16Q\xdd\x7fM\xa5\x84\xe7\x9b>\x145\x8f\xc3\xb0UC\xd7\xd7q\xe2BJ\x98\xd9^\xd7\xf3Jc\xf0\x0b_\xd2\xdei\xf8\xb5&\xf0\xc1\x1a\x85\x89\x94\x9an\xca\xb0\xacV\xcaP\xfa,\xeb\x1a'O\xe0\xf0\x1ah\xcc0a\x9fq\x0f\xdeW\xc2)\x22)\xe1l\xf4\xe0=&e\xd8\xf2\xd9\xa2\xd7\x95my\xf0\x1b\xd7\x08}\x02\x1f\xf0\xa90W6\xc4t\xd8\x83u,\xbb\xfc\xc1\x19>4S&j\xd4^\xd7x\x86\x09\x1c;\x8e\xc8\xdc_\xaeR\x86s\xcf\x16p\xf7\xbbK~\xba!\x98T\x0e\xe5\xde\xf7H\xc7\xd7b^\xe3\xb6\xd7V\x0c})\xc3\xe0\xc9\x7f\xaf\x02\x1f\xf4\xff\x03h\xab\xe2\x03\x7fR\xf1^\xdas|\xee\xcd\xbc\xaeq\x9c\xc0q\x9a\xb8\xbb\xd7\xc2|R?\x80\xaf\x1a~\x1d\xed,\xf9u{\x94\xf0%\xe5S=\xd25\xd6|\xde\x8e\xf7\xb1\xceR\xfe\xde\x93\x0bz\x0b|\xb0\x1eo\x1aeo\x0a\x07\xca\xb0\xacT\xeaD\x8dy\x87[S\x7f\x7fw\xd6\xc2\xae\xd4\x0a\xe1MO\xd6\xd8Zb\xd8\x1b\x16\x17OS\xde{\xa6\x8c\x1c\xa4\xbe\xbe\x9e\xc6\xfb\xf2\x1ai\x80\xc0\x07=\x960\x11\xe0|\xd6ezh\xec\xf8\xa4,\xb94\xf7\xba\xc6&p\xb4\xee\xd87\x1d\xd0\xf6\x96\xf4\xb8SK\xb0\x5cN+\xf1\x14\x83\xcc\xf3\xc4\xbb\x1c\xaf\xf9k\xf1\xba\xa9\x1b\x12\xf8\xa0\xdf\xaa\x86DrM\xb4R\xa9a;o\xf0\xf5\x90r\xd2\xfc\x86\xd7\xc6\xc25\x16\xf8bx\xdc_\xe2k6\xe5\x1c\xc5Q\xc2k:u2\xd1:\xbf\x16\x1b\x0b\xbb\x02\x1f\xf4\xb7\x07!\xbc\xe1\x96\x9dcs\xac\x0c\xcbJ\x8fO\x9e\xf8\xc1\xd9\xd8q\xaa9\x81\xe3\xa9\x09\x1c\x0b\xd5d\x8f\xdcxI\xaf\xd9\xf0\x98Sz\xa4+\xeb\x0b\xc6\xd7\xe2\xa8\xc6kq\xb8\x86\xef\x11\x8f\xb2\x1a\xe7\xe8\x09|\xb0\x9ea\xa2\xaa\x0c\x8b\xf5rW{|\xb6j\x04\xafF\x8fS\x1c\xc2\xb7\x02\xc7\xe2\xa4\x9ew\xb5\xd7\xd0k)\xbc>vk\xbe7\xcc\xfa\x9aMy=\x5cg\x89\x13\x7f\xe2\x17\x99\xd4\xa1\xdd\x935\x1c\xdaMy\x8d\xa4\xfe-\x0b|\xd0S\x07\x15\xdf\x0c\x8f\x94aY\xa9\xd4\x89\x1a\xcf\x17t\x9cRC\xe4\xf6:\xaco\xdap\xa0~\x95%\x0eU\xce\xdb\xb6\xb1\x17\xffY\xcd_\x9b\xb5\xd7v\x9c\xf8\x9a\x1d\xd5\x9c\x04\x96'\xb6\xd7F[\xbf\x80\x84 \xdat\x0fd\x8de\x16\xcf\x04>XS\xf1\x9bx\xd9\x87\xc0u\x1f*\xfcw\xf8\xf8\x84\x0f\x86\x94a\xb1\x85\xf5\xc2\xc6\x9e\x95\xd4%\x9cr\x138j;\xa9\xd1\xb6;3\xbe\x8e\xc2k\xe3\xc5\x92^\xb3)\xabi\xdc\x7fA9\xab\xf9Z\xac3\xb4\xfb\xa4\xa5K\x00\x86\xc7\xf4~\xf1\xd8\xce\x9a\x08~\xf1\xef-\xb4c\xca)\x1f\xc9!X\xe0\x83\xfe\xa9z\x03\xd0c\xb3\xc2\x9e\x80\x1ao\xd0G\x0b.\x97\x93g\xe9\x138\xd4\xe6\xab'\xb5\xbdB\xdb\x9e\xd5\x091!P\x14\xdbEV\xbfgo\xd6\xd7l\x08\xa4__\xe4\x17\x94\x9aC\xbbm\x9c\xb5{\x7f\xfcv\x1f\x04\xbf\xd1,\x8f3\x06\xc6\xd0\x1e)\x85\xd8\xcf\xeb\x8c\x00\x08|\xd0\xaf@1\xac\xf8&>wy\x0f\xe6R\xb5\xe2\xc9\xbd\xe4\xf3\xa0f\x15?(R\xefc\x7f\x9d\xeb\xa1\xcd\xd8\xb6\xa9\xe7V\x85\xd0\xf7^YH\x08\xa1+\xf4\xb2\xc5\xa0\xf7~I\x18\xb8i\xf8\xfd\xa4\xce\x17\x94\xd1\x9c_P\xf2,}h\xb75\xefa\xb1\x8d^\xef\xb1\x0f\xef\xc1\xa1\xf7\xf5{\xf1\xb8\xe61\xa8?\x9a\x16\xaa\x13\x8f\xef\x5c_\xde\x05>\xe8\x97\xaa7g\xbd{\xab\xfb`\xd8\xca\xd2{e\xf2e\x14\xc3\x8eC\xfb\xa9u\xbe\xf4\xf2\xd53\xaa\xb9\xff\xc3\x90p[l\xaf\xe2\xe5m\xf1\xb3\x0f\xb3\xbb^\xb6\xb2 p3\xc3}\xa6\x84\xb0\x94\xf0q\xda@\x9d\xc8:C\xbb\xbb-:\xb7t/\xe1\xb8>\x8bA\xee\xfe\xd8\x86\xed\xaa\xe6\xf1}\xdda\xdd\xc2\xcc\x02\x1f\xf4'PT\xf5\x1e=_@u\x7f\x9a\x0b\xe3\xf7.\x97\x5c\x0c;\xf5\x83s;\x9e7FZ\x80\xb9*.\xde\x9d\xe3&\xea\x94\xe3\x08ao\x98\x18\xba\x92\xce\x19\xac\xb1\x9aFcA\xb3\xe6\xd0n\xbe\x80\xe2\xd5\x8b\x08|\xd3l\xceq\x9f\xc7\xb3\x9c\x87-\xf0A?\xc2^\xd5\x8c.eXV{|\xf6\xb2\xf4\xd2\x19K\xed\xb9\x88!!u\xf8\xf1\xa0%\x1f\xb2]\x09}\xa1W\xf4x\xc1ws\x1f\xf6R\xbf\xcc=Jx\xbd\x86}R{\xecF\x0d\xf7F\xe7Y\xfa\xb9\xa5\xe3\x15\xff]O\x1a\xce]\xb4\x10\xf6f\x0a\xd8\x02\x1f\xf4CU\x99\x8f\xdcz\xb9+\xfdPH\x1d\x0e=_Q1\xec\xd4\x90i\x02G\xfd\xd0\x17>\x9c\x9f/\xe8\xe6CP\xdfz-\xecU\x85\xf7\x94\x89\x04\xe3,\xad\x87\xb1\xf1s\x82g\x18\xda]\xe5\x17\xd9e\x7f\xf9yw\xd6\xb0'\xf0A?\x02E\x18\xa2)[V\xe9z\xda\x9a\x96,-Lm\xd6\xd8w\x15\xa1\xa4\xce\xda\xa6OL\xe0\xa8\xdd\xbe\xe1\xb8~9knRE8\xef\xf2\x9d\xe2v\x873|\x91\xdb\xa9x?\x09\x81\x22\xb5l\xd0hA\xed\x15B\xe4i\xe2\xee\xcfV\xb5\x22L\xfc\xbby\x9c-\xbe\x177\xdc\xfe\xe3y\xdf\xc7\x05>\xe8\xbe\xaa7\x81\x91&ZY\x18\x0f=\x00\xa9\x135\x8eW|\x8ee^#\x90\x8c\x1d\xdd\x99BLx=\x1c\xce\x11\xfc.c\xd0\xdb*9\xcf\xb3\xea5\xb4U\xf1zM\x0d\x15\xf9\x82\x8b\xb7\x8f\xba\xf0z\x0cm\x10{\xdd>\x9b\xdd\x9d\xb3y\xde\xd0M\xdf\xc4/a!\xe8\x8d\x9ah\xebAq#\xfe\x12!\xda\xf8\xca[gY\x8de\x8a\xa2\xf3\x9bo|0\x5cQ\xa0\x08\xe7\x86\xbdW\xf6\xd8B/\x80#\x0b\xad\xfb2\x10\xfev\xc3\xdf\xe6N\xc9{\xceu\x0cp\xe1}\xe9\xc4\xea8\x9d9\xb6\x8f\xe2q\xbd?\xbe\x8f\x12>WBP|\x15\x8f\xf5\xd9\x22\xbe\xfc\xbd\xe1\xd0@\xa7\xe9\xdd\x83\x0e\x8a=~jb\xf6\xf3\xd8~?\xb8\xb5\xe9q\x19\xd2\x85\xee~\x8b\xcc\xb3\xf2s\xc3\x0e\xf5\x08\x00 \xf0Aw\xc3\xdeVV~\x82\x7f8\xff\xc3D\x0d\x00\x04>\xe8\xb0<+/\x9bp\xa0\x0c\x0b\x00\x02\x1ftT,\x89QV\x86e\xd9+5\x00 \xf0\x01\x0d\xcb+\xae\xb7^.\x00\x02\x1ftU,\x8aZ6\xbd\xffxE+5\x00 \xf0\x01\x0d\x84\xbd\xaa%\xba\xac\x97\x0b\x80\xc0\x07\x1d\x17\x86j\xcb&j\x1c)\xc3\x02\x80\xc0\x07\x1d\x95\xb0DW\xa8\xc8\xaf\x0c\x0b\x00\x02\x1ftXU\x98S\x86\x05\x00\x81\x0f\xba*\x96ayR\xb2\xcby\x5c\xa6\x09\x00\x04>\xe8\xa8q\xc5\xf5\xca\xb0\x00 \xf0AW\x0d\x06\x83\x10\xe6\xca\xd6\xcb\x0deX.\xb4\x14\x00\x02\x1ft3\xec\x852,y\xc9.\xa1\x0c\x8b\xde=\x00\x04>\xe8\xb0\x10\xf6\xca\xca\xb0\xe4&j\x00 \xf0AG\x0d\x06\x83\x9d\xe2\xe2i\xc9.\xd7E\xd8S\x86\x05\x00\x81\x0f:\xac*\xcc\x8d4\x11\x00\x02\x1ft\xd4`0\xd8\xcb\xca\xd7\xcb=\xb7^.\x00\x02\x1ft\x9b\xde=\x00\x04>\xe8\xab\xc1`\x90g\xe5eX\x0e\xad\x97\x0b\x80\xc0\x07\xdd\x0d{[Yy\x99\x95P\x86\xc5D\x0d\x00\x04>\xe8\xb0<+/\xc3b\xbd\x5c\x00\x04>\xe8\xaa\xb8^\xee~\xc9.\x97E\xd8\x1bk)\x00\x04>\xe8\xae\xbc\xe2z+j\x00 \xf0AW\x0d\x06\x83QV^\x86\xe5X\x19\x16\x00\x04>\xe8n\xd8KY/7\xd7R\x00\x08|\xd0]a\xa8\xb6\xac\x0c\xcb\x912,\x00\x08|\xd0Q\xb1\x0c\xcb\xb3\x92]\xae3eX\x00\x10\xf8\xa0\xd3\xaa\xc2\x5c\xae\x0c\x0b\x00\x02\x1ftT,\xc3\xf2\xa4d\x97seX\x00\x10\xf8\xa0\xdb\xaaz\xf7\x94a\x01@\xe0\x83\xae\x1a\x0c\x06!\xccm\x97\xec\x12\xca\xb0\x5ch)\x00\x04>\xe8f\xd8K)\xc3\xa2w\x0f\x00\x81\x0f:,\x84\xbd\xb2\xf5r\x8fL\xd4\x00@\xe0\x83\x8e\x8aeX\x9e\x96\xecr]\x84\xbd\x5cK\x01 \xf0Aw\x8d+\xae\x1fi\x22\x00\x04>\xe8\xa8\xc1`\xb0\x97\x95\xaf\x97{n\xbd\x5c\x00\x04>\xe8\xb6\xaa2,#M\x04\x80\xc0\x07\x1d5\x18\x0c\xf2\xac|\xbd\xdc\xe7\xd6\xcb\x05@\xe0\x83\xee\x86\xbdP\x86\xa5\xac\xccJ(\xc3\x92k)\x00\x04>\xe8\xae0\x94[V\x86\xe5@\x19\x16\x00\x04>\xe8\xa8\xb8^\xee~\xc9.\x97\xd6\xcb\x05@\xe0\x83n\xcb+\xae\xb7\xa2\x06\x00\x02\x1ft\xd5`0\x18e\xe5eXN\x95a\x01@\xe0\x83\xee\x86=\xeb\xe5\x02 \xf0A\xcf\x850WV\x86\xe5H\x19\x16\x00\x04>\xe8\xa8\xb8^\xee\xb3\x92]\xae\xb3\xea\x22\xcc\x00 \xf0A\x8bU\x85\xb9\x5c\x19\x16\x00\x04>\xe8\xa8X\x86\xe5I\xc9.\xe7\xca\xb0\x00 \xf0A\xb7U\xf5\xee\x99\xa8\x01\xc0J\xbc\xa1\x09`~\xb1\x0c\xcbv\xc9.\xc7\xb7\xb7\xb7\x17Z\x0a\xa0{\xbe\xf9\xf8\x0b\xe3\xf8\xcf\xfcK/\xbf}\xd5\xc5\xe7\xa0\x87\x0f\xe6\xf4'\xff\xf8\x07\xc2\x17\xa7\xb2\xde=eX\x00\xba\xed$\xbb[9\xe9e\x11\xfe\xce\x8am$\xf0\xc1\x9ay\xfc\x9d\x1f\xdb\xca\xca\xd7\xcb=2Q\x03\xa0\xbb\xbe\xf4\xf2\xdb!\xf0]\xc7\xff\x86\xa2\xfa/\x8a\xd0wUly\xb1=\x12\xf8\xa0\xe7\xfe\xdc\x1f\xbe\x91\xfd\xea{\xbf\xf2\xf9\x92]\xae\x8b\xb0\x97k)\x80\xce{}$'\xd4[\x0de\xb8\xbe\x17\x86|\x8bmG\xe0\x83\x9e\xfa\xdc\xcb\x1f\xac\xda\xc5P.@?\x8c\xb3\xbbSt&\x09\xc3\xbd\x1f\x16\xa1\xef\xa2\xad\xc3\xbd\x02\x1f\xcc\xe8\xa7\xfe\xe0G\xb3\xf3_>+\xdb%\x94a9\xd1R\x00\xdd\xf7\xa5\x97\xdf\x0e\xa7\xe6T\xbd\xa7\x87\xc9{a\xb8\xf7U\x1c\xee\xdd\x12\xf8\xa0\xe3~\xf7\xdf\x7f\xb7j\x97\x91V\x02\xe8\x95\xd4\x95\x92\xc2y\xdda\xb87L\xf28)\xb6\xa1\xc0\x07\x1d\xf4\xd7\xbe\xfb(\xfb\xd6\xc5\xaf\x97\xed\xf2\xdcz\xb9\x00\xfd\xf2\xa5\x97\xdf\x0e\xe5\xb5\xcek\xfeZ(\xc8\xff~\x9c\xe41Z\xd5$\x0f\x81\x0fj\xfa\xf1?\xfeL\xf6\xad\xb3\xd2\xb0\x17\xce\xf1\xc8\xb5\x14@/\x8dg\xfc\xbd0\xc9\xe3E\xb1]\xc5I\x1e[\x02\x1f\xb4\xd8\x9b\xbf\xf3\xe3\xd9w\xbe\xf3\x9d\xb2]\xac\x97\x0b\xd0S_z\xf9\xed\x10\xf8\xae\xe7\xb8\x890\xdc\xfb\xb0\xa6\xdf\x9e\xc0\x07-\xf3\x17\xfe\xef\x0fe\xbf\xfaK\xff\xa2l\x97\xcb\x22\xec\x1di)\x80^\x1b7t;\xa1\xa6\xdf{q\xb8\xf7`\x91\xc3\xbd\x02\x1f\xd4\xf0c\xd7\x95\x7f2\xca\xb0\x00\xf4_\xd3_\xec\xc3p\xef\xd7\xb3\x05\xd6\xf4\x13\xf8 \xd1_\xfd\xbd\x9f\xa8*\xc3rz{{{\xa6\xa5\x00\xfa-\x96h9^\xd0\xcd\xdf\xd7\xf4kt\x09\xb77\x1c6\xa8\x16&j\xbc\xfa\xd6\xff,\xdd\xe7\x1f\xfd\xdc\xdf\xfc\xc56L\xbd\x07`).b8[\x940\xdc\xbb[|\xae\x84\xde\xc4\xb0\x8d\x8b\xa0y5\xeb\x8d\x0dnoo\x1d2\x886\xbe\xf2\xd6Y\xfc#\xfb\x84P\x86\xe5W\xfe\xd9/O\xfd\xbd|\xefg\xb2\x9f\xb9\xf8\xef\x1a\x10\x80E:\x8e\xc1\xef\xac\xee/\x1a\xd2\x85\x09~\xf4\xf7>\xf7\xfd\x7f\x87\xf5r\xcb\xca\xb0\xec\xbe\xf5\xd3\xd9\xdf\xf8\xad\xefh4\x00\x16-\xf4(\xbe\x7f\xbf\x84[\x9dI\x1e\x02\x1f\xbc\xe6\xc7~\xeb/g\x8f>\xfc\xa9\xec\x87\xbe\xfb\x93\x1f\xfd\xff\xcf\xfc\xf6\x8f\x94\x96a\xf9\xf2\x9f\xfa\xe1\xec\x87\x7f\xf7\x8f4\x1c\x00\xcb\xf2\xd1\x12n\xd9]M\xbf\xa3\x94\x9a~\x86t\xe1\x81\xcf\xff\xec/\xfc\xceO|g\xe3O\xdf\xff\xffs\x7f\xf1e\xf6\xaf~\xf1\x9fO\xdd\xff\xe7\x7f\xf6\xed\xec\x17\xfe\xd3ok8\x00V\xed4\xbb\x1b\xee\x9d\xb8\xde\xafI\x1b\x10\xbd\xb9\xfd\xd5\xf1Od\x1f\x87\xbd\xe03\xbf}S\xfa;?\x9b\xfd/\x0d\x07@\x1b\x84%\xdc\x9e|\xf3\xf1\x17BQ\xe8\xfbI\x1e\xdf_\x04@\x0f\x1f\xc4\xb0\x97M\x98m\xb5\xf1#\x7f\x94\xfd\x89\x1f\xfd\xcd\xec_\xbe\x7f\xfa\xa9\xdf\xf9\xfb?\xf7\xb7\xb2\xbf\xfb\xeb\xffM\xe3\x01\xd0F\xa1\xc7\x22\xf4\xf6\x1d\x855\x80\x05>\x84\xbd)a\xef\xa1?\xfb\xd9?\xc8\xfe\xcf\xef\xff\x9b\xec_\xff\xbb\x7f\xfb\xd1\xff77\xb7\xb2\x7f\xba\xf1#\xce\xdd\x03\xa0\x0b\xc1\xef@\xe0C\xd8\xabQG\xe9\xef\xfc\xe4\xb7\xb3_\xfa\x8f\x17\xd9;_\xfc\xf3\xca\xb0\x00\xd0f\x9f\x18\xda\x15\xf8\x10\xf6jz\xe7\x07\xbf\x95\xfd\xf5\xef\xfe\xa6\x06\x04\xa0\x8d&N\xde\x10\xf8\x10\xf6f\xf0\xf7\xfe\xf0\xd7\xb3\x9f\xfe\xfd\xff\xaa!\x01h\x830l\x1b>\xd7\x8e\xa6\xad\xc6!\xf0!\xec\x09}\x00t\xd3ev7l{\xf2pF\xee$\xca\xb2 \xec\xcd\xe8\xbf\xfc\xc0\xe7\xb2\x9f\xce\x04>\x00\x96\xae\xf6\x12k\x02\x1f\xc2\xde\x0c\xde\xbe\xfd\x1f\xd9\xcf\xff\xde\xafiT\x00\x96%\x0c\xdb\xdeO\xc2\xb8\xaa\xfb\xcb\x02\x1f\xc2^M\x9f\x1f\xfc\xef_\xfe\xf9\xdf\xfd\xb5\xafiU\x80\xb56,\xb6gK\xb8\x9f\xf3\x18\xf2\xc6\xf3\xdc\x88\xc0\x87\xb0W\xcf\xf1\xaf^<\x1fiU\x80\xf5\xf6\xcd\xc7_8X\xf0]\x84a\xdb\x8f\x8a&7qc&m \xec\xd5\xf8\xe3\xfb\x8d\xcb\xaf\x09{\x00\xc2\xdeVq\xf1r\x017=qY\xb4&\xe8\xe1C\xd8\x13\xf6\x00\xa8\xa7\xe9\xde\xbd0l{\xf4z\xed\xbc&\xe9\xe1C\xd8\x13\xf6\x00H\xf4\xcd\xc7_xT\x5c\x5c\x15\xdb\xc6\x9c7u\xbf\xd6m>\xcb$\x8c\xba\xf4\xf0!\xec\x09{\x00\xa4\xdb\x9b3\xec\x85a\xdb<K\xa8\x9d'\xf0\x81\xb0\x07\xc0j\xe43\xfe^X\xf2\xec\xa8N\xed<\x81\x0f\x84=\x00\x96\xec\x9b\x8f\xbf0,.6k\xfc\xca\x5c\xb5\xf3\x04>\x10\xf6\x00X\xbe\xd4\xcf\x86\x8f\x96<\x9b\xb7v^\x93L\xda@\xd8\x13\xf6\x00\xa8\x90X\x8a\xa5\xd1\xdayM\xd2\xc3\x87\xb0'\xec\x01Pm\xda\xe7C\x98\x841\x8eA\xefU[\x1f\xbc\xc0\x87\xb0'\xec\x01P\xed\xf5\xda{\x8d,y&\xf0\x81\xb0\x07@\x0b|\xf3\xf1\x17\xc2gD(\xc5\xb2\xd4\xday\x02\x1f\xc2\x9e\xb0\x07\xc0\xf2\x0c\x8b\xed\xddl\x01K\x9e-\x8bI\x1b\x08{\x00\xd0s\x9f\xd1\x04\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{Z\x15\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\xd66\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xa2\xc0W\x84\xb3G\xc2\x1e\x00@\x8f\x03_\xe1\xa8\x08i[\xc2\x1e\x00@\x0f\x03_\xec\xdd\x0b\x01m\xd4\xc0m\x09{\x00\x00m\x0b|\x85\x83x9W\xb8\x12\xf6\x00\x00\xda\x1b\xf8\xee\x83\xd5f\x11\xda\xf6\x84=\x00\x80\x1e\x05\xbe\x18\xf06'\x84?a\x0f\x00\xa0\x0f\x81oB\xc0{Rg\xf2\x86\xb0\x07\x00\xd0\xe2\xc0\x17\x83\xdd\x93\x84\x10(\xec\x01\x00t1\xf0\x95\x04\xbb\xca\xe0%\xec\x01\x00t;\xf0\x95N\xde\x10\xf6\x00\x00:\x10\xf8\x8a\xd0\x16B\xd6f\xc9.\x07\xc2\x1e\x00@\x87\x03_V=l\xbb\xfb\xfa\xe4\x0da\x0f\x00\xa0#\x81/\x06\xb9\xdd\x84]\x0f\x84=\x00\x80\x0e\x06\xbel\xcap\xed\x04#a\x0f\x00`u\x06\xb7\xb7\xb73\xfdb\x11\xe0^\x15\x17\x1b\x89\xbb_\x16\xdb\xb6\xb0\x07\x00\xb0|3\xf5\xf0\xc5\xc9\x1a\x1b5~E\xd8\x03\x00\xe8R\xe0\xcbfX:M\xd8\x03\x00X\x8d\xdaC\xbaq\xb2\xc6Ka\x0f\x00\xa0\x1bf\xe9\xe1\xcb\x85=\x00\x80\xee\xa8\xd5\xc3\xf7\xe6\xf6W\x1f\x15\x17WY\xbd\xf3\xf7\x84=\x00\x80\x15\xaa\xdb\xc3\xb7'\xec\x01\x00\xf4;\xf0\x1d,\xe9q\x09{\x00\x00\xcb\x0e|on\x7fu'k\xae\xbc\x8a\xb0\x07\x00\xd0\xb6\xc0\x97-\xa7wO\xd8\x03\x00XE\xe0\x8b\x935\xf6\x96\xf1\x80\x8a\xfb\x1a:,\x00\x00\xcdI\x9a\xa5\x1bW\xd6x\xb1\xc4\xc7u]lG\xc56\xfe\x8d\xcb\xaf\xbdr\x98\x00\x00\x16\x1f\xf8\xae\x8a\x8b\xcd\x15<\xbe\x9bb;\x09\xe1\xaf\x08~\x17\x0e\x17\x00\xc0\x02\x02_\x1cb}\xbf\x05\x8f\xf52\xbb\xeb\xf5;\xd1\xeb\x07\x00\x90.\xe5\x1c\xbeQK\x1ek\x98!\x1c\x86\x95\xaf\x8a\x10:\x8e\xb3\x86\x01\x00\xa8P\xda\xc3\x17'k|\xaf\xc5\x8f?\xf4\xfa\xe5\xbfq\xf9\xb5\x13\x87\x12\x00`\xb2\xaa\x1e\xbeQ\xcb\xc3\xde\x91\xb0\x07\x00P\xee\x8d\x8a\xeb\x0fZ\xf6xM\xe2\x00\x00h*\xf0\xc5\xc9\x1a\x9b-y\x9c&l\x00\x004\x1d\xf8\xb2v\x0c\xe7\x1egz\xf3\x00\x00\xe62q\xd2\xc6\x9b\xdb_\xdd*.^\xae\xe81)\xba\x0c\x00\xd0\xa0i=|\xa3\x15<\x96\xe3\x18\xf2\xce\x1c\x16\x00\x80\xfe\x04>\xbdy\x00\x00\xcb\x0e|on\x7fu/[\xfcd\x8d\xd3\x18\xf2\x94T\x01\x00Xv\xe0\xcb\x16\xd7\xbb\x17z\xf3\xc61\xe8]iz\x00\x80\xe5\xf8\xc4\xa4\x8d\x05M\xd68\xcf\x14H\x06\x00X\x99\xd7{\xf8F\x0d\xddn(\x90<\x8eA\xefJ3\x03\x00\xf4'\xf0\x85\xde\xbc0d;\xd6\xb4\x00\x00-\x0b|on\x7f5\x84\xbdY&k\xe8\xcd\x03\x00\xe8B\xe0\xcb\xea\xf7\xeeY\xee\x0c\x00\xa0\x03>\x9a\xb4Qc\xb2F\xe8\xcd\x0b\x93/,w\x06\x00\xd0\x11\xf7=|\x07\x15\xfb\xe9\xcd\x03\x00\xe8x\xe0\x1bM\xb9\xderg\x00\x00]\x0f|q\xb2\xc6\xc6\x83\x9fY\xee\x0c\x00\xa0O\x81/\xfb\xb8wOo\x1e\x00@O\x03_\x08x{z\xf3\x00\x00\xfa\xe9\xff\x0b0\x00\xb2\x10\xef\xec0\x8f}\x9d\x00\x00\x00\x00IEND\xaeB`\x82\x00\x006\xc9\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x006VIDATx\xda\xec\xddO\x8c$Y~\x17\xf0\x88\xf5X\xfe\x87]\xbd\xd2\xf2O+\xdc\xd9\x1cX\x0d\x18U\xad\xc0bA\xac\xaa\xc6B\xc2\x5c\xdc5\x12\x17\x9f*\xe7\xc0\xc1\x87\xa5kN\xec\xad\xa3%\x0e\xbem5#$$\x0e\x93u\xc2\xe2\xe0\xa9>\x1a\x1bu\x95\x16\xc1\x98?\xda*\xb3\x92Y\x0c\xee*\xc4\xb2\x96\xd0j\xbbla\x0c\xd8n\xe2M\xbd\xda\xa9\xe9\xc9|\x11\x91\x19\x99\x19\x11\xf9\xf9H\xa1\xec\xae\x8c\xca\x8c|\x99\x95\xf9\xcd\xf7\xe2\xfd^\xfe\xea\xd5\xab\x0cH\xdb\xfa\xca\x97\xf6\xcb\x8b\xd1\xf5{\x1f\x1ei\x0d\x00\xfa\xe63\x9a\x00j9\x8c\x1b\x00\xf4N\xae\x87\x0f\xd2\xb6\xbe\xf2\xa5Qy\xf1\x22\xfe\xf7\xed\xeb\xf7><\xd1*\x00\xf4\x89\x1e>\xa8V\xdc\xf9\xb7^>\x00zG\x0f\x1f$l}\xe5K\xf7\xca\x8b\xcb\xf0\xcf;?~p\xfd\xde\x87\x97Z\x07\x80\xbe\xd0\xc3\x07i\xfb\xaf\x85\xbd\xa0\xd0,\x00\x08|0\x1c\xd3\xc2\xdd~\xec\xf9\x03\x00\x81\x0f\xfa\xac\x0cu{\xe5\xc5\xfdiWe7=\x7f\x00 \xf0A\xcf\xa5&h\x14\x9a\x07\x80\xbe0i\x03\xa6x\xad\x14\xcb,o]\xbf\xf7\xe1\xa9\xd6\x02\xa0\xeb\xf4\xf0\xc1t\xe3\x1a\xfb(\xd1\x02\x80\xc0\x07=V'\xcc=\x8c=\x81\x00 \xf0A\x9f\x94!n\x9c}\xba\x14\xcb,c-\x06\x80\xc0\x07\xfds\xb8\xa4}\x01@\xe0\x83u\x8b\xa5X\xb6\x9b\xfcJ\xec\x11\x04\x00\x81\x0fzb\x9e\xf0\xa6\x97\x0f\x80NS\x96\x05\xa2\xb8z\xc6\xf7\xe6\xfcu%Z\x00\xe8,=|\xf0\xb1Ez\xea\xc6\x9a\x0f\x00\x81\x0f\xbao\x91\xd0v`}]\x00\x04>\xe8\xb08\xf1\xe2\xfe\x827\xe3\x5c>\x00\x04>\xe8\xb0qGn\x03\x00\x04>h[\x5c-c\xb7\x85\x9b\xba\xafD\x0b\x00\x02\x1ftS\xd1\xe2m\x09|\x00t\x8e\xb2,l\xb48\xd1\xe22\xab\xbf\x94Z\x1d\x0f\xae\xdf\xfb\xf0R\xeb\x02\xd0\x15z\xf8\xd8t\xe3\x96\xc3^PhV\x00\x04>\xe8\x8ee\xcc\xac\xddW\xa2\x05\x00\x81\x0f:\xa0\x0ce\xfb\xd9\xe2\xa5X\xa6\xdet\xe6\x5c>\x00\x04>\xe8\x84e\x8625\xf9\x00\xe8\x0c\x936\xd8H\xb1\x14\xcb\x8b%\xdf\xcd\xdb\xd7\xef}x\xa2\xb5\x01X7=|l\xaaU\xf4\xc0\x8d53\x00]\xa0\x87\x8f\x8d\xb3\xa4R,\xb3(\xd1\x02\xc0\xda\xe9\xe1c\x13\xed\xaf(\xec\x05\xce\xe5\x03@\xe0\x835Xe\x08\x1b+\xd1\x02\x80\xc0\x07+T\x86\xaf\xbd\xf2b{\x95w\x99\xdd\xf4(\x02\x80\xc0\x07+2^\xc3}\x1a\xd6\x05`\xadL\xda`c\xac\xa8\x14\xcb,o]\xbf\xf7\xe1\xa9g\x01\x80u\xd0\xc3\xc7&\x19o\xe8}\x03 \xf0\x81\xc0\xb7\x02\x07\xb1\x87\x11\x00\x04>X\x862l\x85\xb0w\x7f\xcd\x871\xf6L\x00 \xf0\xc1\xb0\xc3\x96\xc0\x07\x80\xc0\x07\xcb\xb0\xf5\x95/\xed\x94\x17\xbb\x1d8\x94\xfb\xb1\xa7\x11\x00\x04>hY\x97\xca\xa2\x08|\x00\xac\x9c\xb2,\x0cZ\x5c\xe5\xe2{5w\xbf\xbe\xfd\xb5\x86w\x13~\xefeV\xff\x1c\xc1/^\xbf\xf7\xe1\xb9g\x07\x80U\xd1\xc3\xc7\xd0\x8dk\xecsUn\xef\x94\xdb\xa8\xdc\xe6\x09b\xe7e\x80\x1b\xc5\xdb8\xab\xb1\xbfB\xcc\x00\x08|\xd0\xa2T\xb8z\x96\xdd\x14D\x1e\x95\xdb\xa4\xdc^.rG\xf16\xf6\xca\x7f~\xb1\xdc\x8e\x13\xbb\x1eX_\x17\x00\x81\x0fZP\x86\xaa\xb0\x86\xed\xeb\xc3\xaca\xf8\xf5i\xb9=(\xc3\xd9\xfe2V\xbf\x08\xc3\xb5\xe56.\xff\xf9\xd9r{\x92\xdd\xf4 \xben\xec\x19\x02`U\xde\xd0\x04\x0c\xd8\xdd\xde\xbd\x8br;\x0a\xbdp\xab\xba\xf3\xd8cX\x84-\xce\xce\x0d\xdb\xee\x9dc;\xf2\x14\x01\xb0\x0az\xf8\x18\xa4\xb8\xaaE\x08Wah5\x0c\xdb\xee\xac2\xecM\x09\x7f\xb7\xc3\xbd\x0f\xe21\xdd\x8b=\x90\x00\xb0tz\xf8\x18\xb20l{\xd9\xa5\x03\x8a\xc73\x8e\xe7\xf09\x8f\x0f\x00\x81\x0f\x16\x0cV]>\xbe0\xdc\xfb\xd23\x05\xc0*\x18\xd2\x05\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x04>\x00X\x91<\xcf\xef\x95\xdb\x9e\x96\x00\x81\x0f\x80a\x86\xbd\x9d\xf2\xe2RK\x80\xc0\x07\xc00\xc3\xdeay\xf1\x8dr\xdb\xd2\x1a\xb0\x5c\xd6\xd2\x05`\xd5A\xef^y1)\xb7\x87\xb7?{\xf5\xea\xd5\xa9\x96\x01\x81\x0f\x80a\x84\xbd0\x84{Rn\xf7\xb5\x06\xac\x8e!]\x00V\x15\xf6n\x87p_\x0f{gZ\x07\x96K\x0f\x1f\x00\xcb\x0ez\x9f\x1a\xc2\x05\x04>\x00\x86\x13\xf6\xea\x0c\xe1\x9ej)X.C\xba\x00,+\xec\xcd\x1a\xc2\x05VL\x0f\x1f\x00m\x07\xbd\xa6C\xb8\xa7Z\x0d\x04>\x00\xfa\x13\xf6\xcc\xc2\x85\x0e2\xa4\x0b@[ao\x9c\xdd\xf4\xd65\x0a{j\xf0\xc1\xf2\xe9\xe1\x03`\xd1\xa0\x17\x86p\x8f\xca\xed@k\x80\xc0\x07\xc0\xf0\xc2^\x18\xc2\x9d\x94\xdb\xf6\x9c7\xa1\x06\x1f\xac\x80!]\x00\xe6\x0d{\xe3\xecf\x08w[k@\xb7\xe9\xe1\x03\xa0i\xd0ks\x08\xf7T\x8b\x82\xc0\x07@\xb7\xc2\xde\xa2C\xb8\xc0\x1a\x18\xd2\x05\xa0n\xd8\x1bg\xed\x0f\xe1\x9ejYX>=|\x00T\x05=\xb3pA\xe0\x03`\xc0ao\x94\xdd\x14R^\xca\x10\xae\x1a|\xb0\x1a\x86t\x01\x98\x15\xf6\xf6\xcb\x8b\xf3\xcc\xf9z \xf0\x010\xc8\xb0\x17\x86p?(\xb7\xad%\xde\x8d\x1a|\xb0\x22\x86t\x01\xb8\x1b\xf4F\xd9\x12\x87p\x81\xf5\xd0\xc3\x07\xc0m\xd8[\xf5\x10\xee\xa9V\x07\x81\x0f\x80\xd5\x85\xbdU\x0c\xe1\x02kbH\x17`\xb3\x83\xde([\xdf\x10\xee\xa9g\x00VC\x0f\x1f\xc0\xe6\x86=\xb3pA\xe0\x03`\xc0ao\xedC\xb8j\xf0\xc1\xea\x18\xd2\x05\xd8\xac\xa0\x17V\xcd\x08AK\xaf\x1el\x10=|\x00\x9b\x13\xf6\xf6\xca\x8b\xcb\x8e\x84=5\xf8@\xe0\x03\xa0\xe5\xb0W\x94\x17\xcf3\xb3pa#\x19\xd2\x05\x18v\xd0\x0bC\xb8a\x16\xeen\xc7\x0e\xed\xd4\xb3\x03\x02\x1f\x00\xed\x18\xc5p\x15f\xe3\xee\xc4\x9f\xedj\x16\x10\xf8\x00\x18\x88W\xaf^\x9d\xc7\xb0\xf7)\xf1\x9c\xbe\xe0\xf5\xcbU\x04\xc2S\xcf\x0e\x08|\x00,?\x0c\x9e\xa6\xc2\xd7k\x81p/\xd33\x08\x02\x1f\x00\xc3\x0d\x84e\xf8\xbb\x5c\xd2m\x03+`\x96.\x00Iq\x86\xef}-\x01\x02\x1f\x00\xc3\x0c{a\x96\xefa\xcb7\xab\x06\x1f\x08|\x00tHX\x82-U\xbb\xefI\xb9]k&\x10\xf8\x00\xe8\xa18i\xe3 \xb1\xcb\xc5\xabW\xaf\x8a\xf2r\xdc\xf0\xa6O\xb5.\x08|\x00tCQq\xfdGC\xbde\xe8\x0b\x85\x9d\x9fj.\x10\xf8\x00\xe8\x91<\xcf\xc7Y\xba\x0c\xcb\xb3\xbb3m\xcb\x7f\x87\xf0wQ\xf3\xe6O\xb50\xac\x96\xb2,\x007\x01g\x94\xdd\xacJ\xb1Wna\xa2BrU\x8a2\xe0\xe4\x03n\x8b\xf0\xf8\x8b\x8a\xdd\xa6M\xe4\xd8\xcfn\x8a<[\xaf\x17\x04>\x80N\x84\x9a\x9d\xec\xe3\x82\xc2{]\x0c)\xf1\x1c\xba\xe7\xf1\xbfg1L]\x96\xdbi\x5cAcYB\x98K\x95ayR\xde\xff\xe5\x94\x10|\x19{\x06?H\xdd\xb8\x1a| \xf0\x01,;\xe4\x85@\xb2\x9f\xf5\xa3\xae\xdc\xe8\xce\xbfw\xb3;\xbd\x8d\xe5c\xb9\x0d\x81'm\x06\xc0\xd8\xd3\x99*\xc3r\x95\xdd\xcc\xdc\x9d\x15\xe6N\xca\xdb\x08\xe7\xf3=\xf2\x8a\x03\x81\x0f`U!\xef^\x0cx!\xc4l\xf7\xec\xf0G\x15\xd7\x7f?\x04\x96\x8f3\x04\xb1\xd3\x10\x00\xe3$\x8ayU\x95a)\xca\xdb\x7f\x99\xba\x81p>_\x0c\xd7\xd3\x86\xc3\xd5\xe0\x8350i\x03\x18l\xd0\x8b+D\x5c\x96\xdb\xfb=\x0c{\xc1^\x83}C\x8fe(\xa1\xf2A\xf9\xb8_\x96\xdb\xa4\xdc\xf6\x1b\xb6Y\xb8\xbf\x87\x89]\xce\xca07\xa9ys\xe1\xbe\xa7\xd5\xe7{\xe9\xd5\x09\x02\x1f@\x9bA\xefq\xd6\xef\x09\x04\xa39\x7fo\xebN\xf8\x0b\xe7\xd5\x15q\xa8\xb6\xcaQ\xc5\xf5E\xdd\x03\x88\xbd\x80\xd3\x02\xe7\xb9W)\xac\x9e!]`\x88\x0e\x17\x08z\xd7\xd9\xc7\x93#\xc2\xf6r\x8d!\xe5~K\xb7\x11\x82\xef\xe32\xf4=\x0b\xa1n\xda\xa4\x898\xd9\x22\xd5\x0bz\xdct\xb2E\xd8\xbf\xbc\xdd'\xf1\xfeo\xe9\xe1\x03\x81\x0f`1\xa1g\xa9\x0c\x19\xe1\x1c\xb6\x83\x9a\xbf\x12\xce);\x8d\xdby\xd5\xf9i+\xf6V\xf6q\x89\x98\xbdx\xb9H\x8fe\x18\xae}\x18\xcf\xf7+n\x87g\xe3y\x8eG\x15!\xf8p\xce\xe7\xa3\x88C\xc5\xb7\xe7\xf3\xe9\xe1\x03\x81\x0f\xa0\x15G\x89\xc0\x17\xc2\xce\xed\xcc\xd6\x93.?\x88;=j\xdf?\xce84\xbbwg\x9b\xa7\x170\xfc\xce\xfbq\xe8\xbb\xa8\x11$\x8f\x16\x0c\xc2ah\xf72\xde\x87\x1e>\x10\xf8\x00Z\x09J\xe7e\x98\x09\xab>\xdc\x1d\xa2<.\xb7I\xdfk\xc0\xc5\xfaw\x93\xb8\xdd\xad'8\xce\x9aOL\x09\xc1\xef\x1f\x96\xdb\xe7\x13\xfb\x5c\xc5\xf5r\x179\xe6\x97q\x02\xc9\xf3%\xd7\x0f\x04f0i\x03\x18\xaa\xd0\xcb\x17\x86\x22\xc39d\x9f-\x83\xc6x\x88\x05\x7fC\x80*\xb7\xd0\x03\x17\x82\xdf\x83r{7\xab\xbf\xc4YP\xd5\xe3v\xd8\xd2q\x9e\xc6c\x03\x04>\x80\xd6\x82P\xe8\xcd\xbb\x17z\xa7:v^\xde2\x1f\xf3\xe5k\xe1/\x14@\xbeJ\xfcJ\x08\x86\x7f)q\xfdY\x9b\xc3\xde\xe1\xd8\xbc2A\xe0\x03\xa0\xdd\xf0wXn\xa3\xf2\xbfo\x97\xdb\xb3)\xbb}\xae\xe2f\xc6Z\x12\x04>\x00\xfa\x11\xfe\xc2\xea\x1b\xe1\x1c\xba\xdb^\xbf0\xd4\x1df'\xa7\xce\xdd{:m\xbd\x5c@\xe0\x03\xa0\xdb\xc1\xef\xa3^\xbf\xec\xa6\xa0\xf3Nb\xd7\x10\x08\x0b-\x06\x02\x1f\x00\xfd\xb5\xf0z\xb9\x80\xc0\x07@G\xc52.\xa9\xa2\xd4\x17&W\x80\xc0\x07@\xbfU\x85\xb9CM\x04\x02\x1f\x00=\x15\xd7\xcb\xddM\xec\xf2l\x88\xb5\x0a\x01\x81\x0f`S\xc2^X/\xb7\xa8\xd8M\xef\x1e\x08|\x00\xf4X\x08s\xa9uw\x9f(\xc3\x02\x02\x1f\x00=\x95\xe7\xf9(K\xf7\xde\x85\xd58L\xd4\x00\x81\x0f`)A\xe4\xa8\xdc\xce\xe3p#\xcb\xa3\x0c\x0b\x08|\x00k\x09{{\xe5\xc5\xa3r\xdb.\xb7\xcbX.\x84\xe5\xb4\xf3\xc3\xc4.a\xbd\xdc\xc9\x12\xeew\x22\xcc\x83\xc0\x07pw\x081\xf4>}#\xce\x22ey\xed<M\xb1\x8c\xb0\x97\xdd\xd4\xfa\x0ba\xfeT\x98\x07\x81\x0f\xd8@e\x008\x8ca\xe0u\xef\xc7\xb0@;\xed<\x9e\xd1\xce\xb7\x8e\xdb.\xc3r'\xec\xdd\x12\xfa@\xe0\x0360\x84T\x95\x07\xd9\xd3J\xad\xb5s\xaaw\xaf\xf5\xf5r\xa7\x84\xbd[[B\x1f\x08|\xc0f9\xcc\xd2\x13\x08\xc6\x9ah%\xed|\xb4\x842,\xa9@'\xf4\x81\xc0\x07l\x82\xd8\xeb\x94*\x0frl\xa5\x87V\xdayT^<N\xecrU\xb6s\xb1\x84\xbb\xde+\xb7\x0b\xa1\x0f\x04>`\xb3\xa5z\x9d\xae3+=\xb4eR\xe3yh],\xed\x22\xf4\x81\xc0\x07l\xb8q\xe2\xba#\xb5\xe0\x16\x17\xcb\xb0\xa4\xd6\xcb\x0deXN\x96u\xff\x0dC\x9f\x92- \xf0\x01\x03\x0b\x22!\xec\xcdZ\xda+\xf4\xeeY\xe9\xa1\x1d\x93\x8a\xeb\x97\xde\x8b*\xf4\x81\xc0\x07l\xae\xfd\xc4uz\xf7\xda\x09\xd5U\xeb\xe5>-\xdb\xf9|\x15\xc7\x12\x9f\xcfq\x0c\xf3\xb3\x84\x92-'\x9e9\x10\xf8\x80a\x04\x91Q6{\xb5\x07\xbd{\xed\xb4qU\xb9\x9b\xd6\xcb\xb0\xd4\x08}!\x5c\xeeU\x84\xbe]\xb5\x17A\xe0\x03\x86!\xd5\xbbw\xa2w\xaf\x15\x9d\x5c/7\x86\xbe\xaaa\xe4\x83\xd8;\x09\x08|\xc0@\x03\x9f\xde\xbd\x05\xc5\x19\xaf\x07\x89]B\x19\x96\xb5\xb5s\x5c\xab\xf7I\xc5n_+\x1f\xc7\xbeg\x13\x04>\xa0\x9fa$\x0c5\xce\x9a5z\xb1\xaas\xca\x06\xae*\xcc\x8d\xd7}\x80\xb1\xee\xdf\xb3\x8a\xdd&q\xf8\x1f\x10\xf8\x80\x9e\xd9K}\xc0k\x9e\x85\x03\xf5~\x96.\xc3\xf2\xacC\xc5\xacC\xf0\xac\x9a\xb9{b\xe6.\x08|@\xff$\xcf\xdf\xd3<\x0b\x85\xbd\xaa\xf5r\x83\xce\x9c\x1b\xd7`\xe6\xaea~\x10\xf8\x80\x9e\xd9\x9b\xf1\xf3\x8b%\xac\xe5\xbai\xaa\xca\xb0<\xe9Z\x1b\xc7!\xfc\xa2b\xb7\x83X\xb7\x11\x10\xf8\x80\xae\x8b=P\xb3\x02\xc9\xa9\x16Z\xa8mGY\xba\xf7\xae\xb3\xe5n\xe2\x04\x92\xaa\xf3\xf9\x8e,\xbf\x06\x02\x1f\xd0\x0f\xa9\x0fl\x81o1E\x96.\xc3r\xd8\xf1r7\xe3,=\xb4\x1b\x1e\xdb\xc4\xd3\x0c\x02\x1f\xd0}{\x02_\xfb\xe2z\xb9\xa92,g\xb1\x14Jg\xdd9\x9f/\xc59\x9e \xf0\x01=0\xab\x87\xefB\xb1\xe5\x85T\x0d\xd5\x16}x\x10\xe5k \x04\xbaiC\xbba&\xef\x17c)\x17@\xe0\x03:nVy\x8dKM3\x9f8\x99a;\xb1\xcbq\x87\xca\xb0\xd4\x11\xceC\xbc;\xb4\x1b&\x9a\xec\xa8\xcf\x08\x02\x1f\xd0\x1f\xb3\xea\xc3\xf90\x9f/\xecU\x95aY\xf9z\xb9\x8b\x8a\xb3\x88\xc3c\xd2\xab\x07\x02\x1f\xd0\xd3p\x92\x09|\xad\x0a\xbda\xa9\x89\x1aG},u\x13B\x9e^=X\xae74\x01\xb0Do\x95\xdb(n{\xf12\x94iq\xfe^\xf3\x00\x1d\xda\xeeqb\x97+\xbdc\x80\xc0\x07\xacT\x9c\x94q\xaa%Z3\xa9\xb8\xfeP\x13\x01\xb3\x18\xd2\x05\xe8\xb8X\x86%\xb5^\xeeY\x9c\xf1\x0a \xf0\x01\xf4\xd4\xa4\xe2z\xbd{\x80\xc0\x07\xd0Wy\x9eW\xad\x97{l\xb2\x03 \xf0\x01\xf47\xec\x85\x99\xceEb\x97P\x86E\xef\x1e \xf0\x01\xf4X\x08{\xa92,\x85\x15K\x00\x81\x0f\xa0\xa7\xf2<\x0f\xcb\xd2=J\xec\x12\xca\xb0\x1ci)@\xe0\x03\xe8\xaf\xaa07\xd6D\x80\xc0\x07\xd0Sy\x9e\xefg\xd5eXN\xb5\x14 \xf0\x01\xf4\x97\xde=@\xe0\x03\x18\xaa<\xcf\x8b,]\x86\xe5I\x1f\xd7\xcb\x05\x04>\x00\xb2\xef\xaf\x97\x9b*\xb3\x12\xca\xb0\x98\xa8\x01\x08|\x00=Vd\xe92,\x87\xca\xb0\x00\x02\x1f@O\xc5\xf5r\x0f\x12\xbb\x5c\x94ao\xa2\xa5\x00\x81\x0f\xa0\xbf\x8a\x8a\xeb\xad\xa8\x01\x08|\x00}\x95\xe7\xf98K\x97a9V\x86\x05\x10\xf8\x00\xfa\x1b\xf6\xc2z\xb9\xa9\x89\x18a\xa2F\xa1\xa5\x00\x81\x0f\xa0\xbf\xc2Pmj\xa2\xc6\x912,\x80\xc0\x07\xd0S\xb1\x0c\xcb\xe3\xc4.W\x992,Um\xb8\x13{I\x81\x8474\x01\xc0\xdaT\x859eX>\x19\xee\xf6\xca\x8b\x9d\xb8\x85\xb0|{\xde\xe3[\xe5v\xaa\x85@\xe0\x03\xe8bxy\x98\xd8%\xac\x97{\xa2\xa5>\xe1\xf9\x8c\x9f\xef\x08|\x90fH\x17`=&\x15\xd7+\xc32%\x04\xcf\xf8\xb9!]\x10\xf8\x00\xba%\xcf\xf3\x10\xe6R\xeb\xe5\x862,\xe7Z\xeaS.g\xfc|O\xd3\x80\xc0\x07\xd0\xa5\xb0\x17z\xa3\x8a\xc4.\xa1\x0c\x8b\xde\xbdf\x81\x0f\xa8\xe0\x1c>`\x15!g/\xbb9\xc9>l\xb7\xff\x0e=\x5coo\xe0yj!\xec\xa5\xca\xb0\x14&j4\x0e|\xbb\x9a\x06\x04>`\xfdNf\x84\x9c\xbdx\xdd\xa6\x04\xdf0\xb9\xe0Qb\x97\xab2\xec)\xc3\xd2<\xf0\x01\x15\x0c\xe9\x02\xab\x0a|\xd3\xecoX;T\x85\xb9\xb1\x97\x0a \xf0\x01C\x0b|\xf7c\xaf\xd7\xe0\x95\x8f3\x84\xdb\xd4\xd0\xe3\x99\xf5r+]&\xdawO\xf3\x80\xc0\x07\xacQ<O\xefz\xc6\xd5\xe3\x0di\x06\xbd{\x8b\xbf\x8e.\xb5\x02\x08|@\xb7M65\xe8\xe4y^d\xe92,O\x85\x19@\xe0\x03\x86`V\x0f\xd7V\x19\x88\x06\x1b\xfab\x19\x96T\x99\x95\xd0\xf3Yxy\x00\x02\x1f\xd0{\xb1\x07k\xd6J\x09C\x0e<!\xe8\xa6\xca\xb0X/\x17\x10\xf8\x80\xc1\x85\x9fi\xee\x0f\xb1\x97/N$8H\xecrQ\x86\xbd\x89\x97E\xa3/\x0e\xf9\x8c\xedT\xeb\x80\xc0\x07t\xe3\xc3:L\xde\xb8\x9aqu1\xc0\x87\x5c\xf5\x98\xac\xa8\x01\x08|\xc0 \xcd\x0aA\x83\xea\xe5\x8b\x8f%U\x86\xe5\x99^)@\xe0\x03\x06)\x0ea\xce\xec\xe5\x8b\x93\x1c\xfa\x1e\xf6\xac\x97\x0b\x08|\xc0\xc6\x9b\x15\x86\xee\x0f$\x08\x1df\xe92,G\xca\xb0\x00\x02\x1f0h\x15\xbd|\x8f\xf3<\x1f\xf5\xf5\xb1\xc5c\x7f\x9c\xd8%<n\xeb\xe5\x02\x02\x1f\xb0\x11R=y\x93\x1e?\xae\xaa0W(\xc3\x02\x08|\xc0F\x883vg\xd5\xe5\xdb\xcd\xf3\xbcwC\xbb\xb1\x0c\xcb\xc3\xc4.g\xca\xb0\x00\x02\x1f\xb0i\xc6\x89\xeb\x8a\x1e\x0e\xedV\xf5\xee\x99\xa8\x01\x08|\xc0f\x89\x13\x17\x9e\xcc\xb8:\xacN1\xe9\xcbc\x89=\x92\xdb\x89]\x8e\xcb\xc7{\xeeY\x07\x04>`\x13C_Q^\x5c\xcc\xb8\xba\x17C\xbb\xca\xb0\x00\x02\x1f@\xb5\xfd\x18\x8a\xa6\xf9Z\x19\xa8v:~\xfc!\xec\xa5\xd6\xcb=2Q\x03\x10\xf8\x80\x8d\x16\x87v\xc7\x89]&]=\xf6x\x9e\xe1\xa3\xc4.W\xb1\x17\x13@\xe0\x036>\xf4\x85Y\xbbOg\x5c\xdd\xe5s\xdf\xaa\xc2\xe8\xd8\xb3\x0b\x08|\x00\x1f\x87\xbep\x9e\xdb\xf1\x9d\x1f\x85a\xdew\xca\x9fw24\xe5y\x1e\x86\xa2S\xeb\xe5\x9eY/\x17\xe8\x8274\x01\xd0\xb1\xd07\x8e\x93 F\xe56\xee\xf8\xcc\xd6\xaa2,c\xcf( \xf0\x01L\x0f}\xfb]?\xc62\x94\x16Yz\xbd\xdc\xa7\xd6\xcb\x05\xba\xc2\x90.@\xf3\xb0\x17z SeV\xc2Pt\xa1\xa5\x00\x81\x0f\xa0\xbf\xc2Pn\xaa\x0c\xcb\xa12,\x80\xc0\x07\xd0Sq\xbd\xdc\x83\xc4.\x17\xd6\xcb\x05\x04>\x80~+*\xae\xb7\xa2\x06 \xf0\x01\xf4U\x9e\xe7\xe3,]\x86\xe5\x992,\x80\xc0\x07\xd0\xdf\xb0W\xb5^n\xa0w\x0f\x10\xf8\x00z,\x84\xb9T\x19\x96'\xca\xb0\x00\x02\x1f@O\xc5\xf5rS\xbdwWYu\x11f\x00\x81\x0f\xa0\xc3\xaa\xca\xb0\x14\xca\xb0\x00\x02\x1f@O\xc52,\x0f\x13\xbb\x9c)\xc3\x02\x08|\x00\xfdV5T[h\x22@\xe0\x03\xe8\xa9X\x86e;\xb1\xcb\xb12,\x80\xc0\x07\xd0\xbd\x107)\xb7\xf3r\xdb\xa9\xd8/\x94aI\xf5\xee\x85\xf5r\x95a\x01\x04>\x80\x8e\x85\xbdqv\xb3,Z\xe8\xb5;-\xff\x9f\x0alE\x96\x9e\xa8qd\xa2\x06 \xf0\x01t+\xec\x85\x1e\xbd\xf7\xef\xfc(\x84\xb9\xaf\x95??\x8deW\xee\xee\x1b\xfe\xff(qsWe\xd8+\xb4*\xd0\x17oh\x02`\x03\xc2^\x18\x9e=\x9dquX*\xedE\xb9\xcf\x93\xec\xe3^\xbbI\xc5M\x1e\xcey\x1c\xfb\xf1\xb6/\xcb\xed\xf5\xde\xc1\xf3:?s\xce \xf0\x01L\x17B\xd2V\xc5>\x8fC\x90+C\xd9?\xc9\xd2\xeb\xe5\x862,'\xf3\x1cD\xf8\xbdX\xe6e\xda\xf1\xec\xd6\x0c\x8dS\x8f\xe9\xb5\xff\xbf\x8ca\xb1\xeag\x97V\x07\x01\x81\x0f`(B@\x1b\xd5\x08}\xe1\xfa\x7fP\xb1\xcfx\x91\x03)\x03\xd6\xed\x84\x91pL\xdb-=\xbeia\xf1\xe1\x9c\x012\xac\x1a\xf2z\x08\xac\xd5\xfb\x18~\xe6\xbcF\x10\xf8\x00\xd6\x22\x9co\x17f\xe7f7\xb3n\x1f.pSO\xdb\xe8\x11\x0b\xb7\x11{\xfaN\xb2\x9a={+t?\xfb\xf4\x9a\xc1\xb5\x8fqJ\x80<\x9b\xb2\xdbi\x0c\x87'^\x9d \xf0\x01\xb4\x19\xfaBP\xdb\x8fAk2%\xd4T\xf9\x7f\xe5\xf6\x9f[<\x9e\xd0\x13\xb6\x17\x83\xe8\xc1\x80\x9b~ZX\xac*y\x03\xb4\xcc,]`\xd3\x82\xdfi\xb9\x8d\xca\x7f\x86I\x1a\xd7\x0d~\xf5\x07\xcb\xed\x1f\xc7\x1a~\xf7Z<\x9eqy\xf1\xee\x06=\x05\xa1\xc7o\xcf\xd0/\x08|\x00\xab\x08~Evs^\xdfq\xc3_\xbd\xad\xe1\xb7\xd3\xe2\xb1\x84\xde\xaew\x1a\x06\xd0>\x0a+\x93\x08{ \xf0\x01\xac4\xf4\xbd\x8c=l\xffa\x8e\xd0\xf7\x8d2\xf4\x15m\xf5\xf6\x95\xc71)/\xf6\x06\x1c\xfa\x9e\xc6\xb6\x06\x04>\x80\xd5\x8a\xabo\xfc\x959\x7f=\x94r\x09\x130\x8a\x96B_\x98\xf9\x1az\x0e/\x06\xd6\xcc\xef\x94\x8f\xcd2t \xf0\x01\xac%\xec\x85\xde\xb9E\xc3Z(\xe5\xd2Z\x98\x89\x93K\xf6\xb2\xe9\xb3[\xfb&\xf4V\xbe\x1d{/\x01\x81\x0f`-BPK\xcd\xd6}\x96\xdd\xd4\xa5\xab\xd2jy\x918\xd4\x1cB\xdfq\x8f\xdb6\x84\xbd=\xa5W@\xe0\x03X\x9b\xb8^n\xaag.\x04\xbdq\x9c\xd1\xfbNE\xf0[J\x89\x91\x1e\xcf\xe0\xbd\x88a\xef\xdc+\x0d\x04>\x80u\x0a!-\xb5\xf2Fq;\x9b4\x0cI\xc6\xe0\xf7V\xf6\xe9^\xb7\x8be\x06\x9b\x1e\xce\xe0\x15\xf6@\xe0\x03X\xbfX|9\xb5\xe2\xc6\xd9\xb4\xf3\xceb\x0d\xbfq\xf9\xcf\x07\xd9\xc7u\xfc\x96^@\xb8G3x\x9fej\xec\x81\xc0\x07\xd0\x11U!\xad\xa8\x08`\x97\xa1\x8e_\xb9\xdd[\xd5\x84\x84\x1e\xcc\xe0\x0d5\xf6\xf6\x85=\x10\xf8\x00\xd6.\x96a\xd9\xae\x08.\xa7]<\xf6\x0e\xcf\xe0}W\x8d=\xe86k\xe9\x02\x9b\x14\xf6\xaa\xd6p\x0dC\xa6\x87\x1d<\xeep\xcc\xb7=g\xa1\xa7\xaf(\xb7\xaf\x96\xdb\xdf\xee\xc0\xe1\xbd\xa3\xec\x0a\x08|\x00]\x12\xc2\x5cj\xa2\xc6QG\x87$\xc3P\xeen\xc7\x8e)\x84\xe3\xfd\xae\xf6\x86\x02\x02\x1f\xb0\x81b\x19\x96\xc7\x89]\xae\xe2\xfa\xba]\xb4\xd3\xb1\xe3\xb9\xad\xb1g&.\xf4\x84s\xf8\x80M1\xa9\xb8\xbe\xcbK\x7fmu\xe8X\xc2\xa4\x91\x1da\x0f\xfaE\x0f\x1f0x\xb1\x0cKjH\xf4\xac\xab+B\xc4c\xefR\xd8Sv\x05zH\x0f\x1f\xb0\x09&\x15\xd7w\xb9w\xef^G\x8e\xe3X\xd8\x83\xfe\xd2\xc3\x07\x0cZ\x9e\xe7U\xeb\xe5>\xed\xf8\xf0d\x08X\xcfb\xf0\x0b\xdb\xf6:\xc2\x9e\xb2+ \xf0\x01t5\xec\x85\x80T$v\xb9\xae\xb8~\xed\xe2,\xd8\xd3\x8a\xc7\xf8\xbd%\x1e\x82\xb2+0\x00\x86t\x81!\xab\xbd^n\x8f-s\x06\xaf\xb0\x07\x03\xa1\x87\x0f\x18\xa4<\xcfC\x10:H\xec\x12\xca\xb0\x1c\x0d\xe0\xa1.#\xf0)\xbb\x02\x03\xa3\x87\x0f\x18\xaa\xaa07\x1e\xc8\xe3l;\xf0\x09{ \xf0\x01t_\x9e\xe7\xfbY\xba\x0c\xcb\xb3\x01\xad\x10\xd1f\xe0\x0beWF\xc2\x1e\x08|\x00]\x0f{U\xeb\xe5\x06\x87\x03z\xc8m\xcd\xdaUc\x0f\x04>\x80\xde\xa8*\xc3\xf2\xa4\x0c5\x97\x03\x09\xb7{-\xddT(\xbb\xb2#\xec\x81\xc0\x07\xd0\x87\x004\xca\xd2\xbdw\xe1\xfc\xb4\xa3\x01=\xe46\x86s\x9f\xaa\xb1\x07\xc3g\x96.0$E\x96.\xc3r8\xb0^\xac\xd1\x82\xbf\xaf\xec\x0al\x08=|\xc0 \xc4\xe1\xcdT\x19\x96\xb3\x01\x86\x9by{\xf8\xae\x85=\xd8,z\xf8\x80\xa1\xa8\x1a\xaa-\x06\xf8\x98w\xe7\x0c{\xca\xae\xc0\x86\xd1\xc3\x07\xf4^\x9e\xe7\xe3,=[\xf5x@eXn\x1f\xf3<\xbd{W\xc2\x1el&=|@\xdf\x83OU\x19\x96\xce\xaf\x97;\xa7\xa6\x81O\xd9\x15\xd8`z\xf8\x80\xbe\x0b\xb3rS\x135\x8e\x86R\x86\xe55\xa3\x06\xfb>\x13\xf6`\xb3\xe9\xe1\x03z+\x96ay\x9c\xd8%\xac\x97[\x0c\xf4\xe1\xef\xd5\xdc\xefX\xd9\x15@\x0f\x1f\xd0g\x93\x8a\xeb\x0f\x07\xfc\xd8\xeb\x0c\xe9>\x11\xf6\x80@\x0f\x1f\xd0K\xb1\x0cKj\x96j(\xc3r2\xd0\xc7>\xca\xd2\xc3\xd8\x81\xb2+\x80\xc0\x07\xf4^U\x98\xd9\xd4\xde\xbd0Ie<\xd4\xb0\x0b\x08|\xc0\x86\xc8\xf3\xbcj\xbd\xdc\xa7\x03/=\xb2\x93\x08{\xca\xae\x00\x9f\xe2\x1c>\xa0oa/\x94a)\x12\xbb\x0c\xb5\x0c\xcb]{S~v!\xec\x01\xb3\xe8\xe1\x03\xfa&\x84\xb9\xd4\xf9k\xc5\x06\x94\x1f\x19\xcd\x08{\xca\xae\x00S\xe9\xe1\x03z#\xae.\xf1(\xb1K(\xc3r4\xf06\x08=\x9cw\x87\xb3\x8f\x85=\xa0\x8a\x1e>\xa0O\xaa\xc2\xdcx\x03\xda\xe0\xee\xf9{j\xec\x01\xb5\xe8\xe1\x03z!\xcf\xf3\xfd\xac\xba\x0c\xcb\xe9\x064\xc5^\xbc|W\xd8\x03\xea\xd2\xc3\x07\xf4\x85\xde\xbd\x1b\xa1\x87O\x8d=@\xe0\x03\x86%\xcf\xf3\x22K\x97ay2\xd0\xf5r\xa79\xdc\xa0\xc7\x0a\xb4\xc4\x90.\xd0\xf5\xb07\xca\xd2E\x94C\x19\x96\xa3Mi\x0fa\x0f\x10\xf8\x80!*\xb2t\x19\x96C3T\x01\x04>\xa0\xa7\xe2z\xb9\x07\x89].\x9c\xcb\x06 \xf0\x01\xfdVT\x5c\x7f\xa8\x89\x00\x04>\xa0\xa7\xf2<\x1fg\xe92,\xc7\x1bR\x86\x05@\xe0\x03\x06\x19\xf6\xc2j\x12\xa9\x89\x18\x9b\xb0^.\x80\xc0\x07\x0cZ\x18\xaaMM\xd482[\x15@\xe0\x03z*\x96ay\x9c\xd8\xe5*\xdb\xa02,\x00\x02\x1f0DUaN\x19\x16\x00\x81\x0f\xe8\xabX\x86\xe5ab\x97\xb0^\xee\x89\x96\x02\x10\xf8\x80\xfe\x9aT\x5c\xaf\x0c\x0b\x80\xc0\x07\xf4U\x9e\xe7!\xcc\xa5\xd6\xcb\x0deX\xce\xb5\x14\x80\xc0\x07\xf43\xec\x852,Eb\x97P\x86E\xef\x1e\x80\xc0\x07\xf4X\x08{\xa92,\x85\x89\x1a\x00\x02\x1f\xd0Sy\x9e\xef\x94\x17\x8f\x12\xbb\x5c\x95aO\x19\x16\x00\x81\x0f\xe8\xb1\xaa07\xd6D\x00\x02\x1f\xd0Sy\x9e\xefg\xe9\xf5r\xcf\xac\x97\x0b \xf0\x01\xfd\xa6w\x0f@\xe0\x03\x86*\xcf\xf3\x22K\x97ayb\xbd\x5c\x00\x81\x0f\xe8o\xd8\x1be\xe92+\xa1\x0c\x8b\x89\x1a\x00\x02\x1f\xd0cE\x96.\xc3b\xbd\x5c\x00\x81\x0f\xe8\xab\xb8^\xeeAb\x97\x8b2\xecM\xb4\x14\x80\xc0\x07\xf4WQq\xbd\x155\x00\x04>\xa0\xaf\xf2<\x1fg\xe92,\xcf\x94a\x01\x10\xf8\x80\xfe\x86=\xeb\xe5\x02\x08|\xc0\xc0\x850\x97*\xc3r\xa4\x0c\x0b\x80\xc0\x07\xf4T,\xc3\xf28\xb1\xcbU\xa6\x0c\x0b\x80\xc0\x07\xf4ZU\x98+\x94a\x01\x10\xf8\x80\x9e\x8aeX\x1e&v9S\x86\x05@\xe0\x03\xfa\xad\xaawo\x90\x135\xca\xa0\xbb_n\xafjn\xe3.\x04\xf3\x06\xc7kr\x0d}\xfd\xbb\x1c\x97\xdb\xf9k\xaf\xe7\x93\xf8\xc5T\xe0\x03\x98\xf3\xcd5\x04\x83\xed\xc4.\xc7\xaf^\xbd:\x1f\xe2c/\x1f\xd7IyqVs\xf7\xa2\x03\x87\x5c\xf7\x18Bal\xe7[\xd2\xc7\xf7\xa3Iy\xf1\xfe\x94\xf7\xa40\x02\xf1<\xae\xef-\xf0\x014|sU\x86\xa5~\x88\xba\xbf\xce^\xbe\xd8\xbb\xb1[sw\xbd{\xf4\xf1\xfd(\xfc-\x1eT\xec\xf6x\xc8=}\x02\x1f\xb0\xcc\xb0\x93Z/\xf7h\xe8\x135b\x11\xe9g-\x87\xc3e\x98\xd4\xdcOal\xfa\xea\xb0\x07\x7f\x87\x02\x1f\xd0\xbbo\xd3\xa3\xf2\xe2Qb\x97\xab28\x14\x1b\xd2\x1cu?h\xd6\xd2\xcb\x17\xef\xf3~\x8d]\x15\xc6\xa6\xaf\xefG{\x15_>\xef\xda\x1dj;\x08|\xc02L*\xae\x1foJC\xc4b\xd2Ok\xee\xbe\x8e\x10\x5c\xf7>\x15\xc6\x06\x81\x0f\xe0\xfb\xdf\xa6\xf7+\xbe%\x9fm\xe0\xb0`\x08U\xd75\xf6[i/_\x83\xde\xbdM\xea\x91\x05\x81\x0f\xa0\x86\xaa\x19\x9c\xe3Mk\x90x\xaeb\xdd\x99\xadE\x9c\xf0\xb2\xaa Z\x87\xa1\x5c\xfa\xec\xb2\xc1\xbe\xd7Cm\x04\x81\x0fhM\x9c\x09\x97\xea1z\xba\xc1\xc3\x82!\xf0]\xd5\xd8\xef\xfe*\x02V,\x99S\xa7w\xef,\x96\x98\x81\xbe~\xe1\x0a\xef9g\x0d\xfeN\x05>\x80D\x80\xb8W\x11T\xc27\xe7b\x83?t^6x\xfc\x87\xcb\xec\xe5\xabQ2\xe7\xae\xb1W7\x030\xce\xaa{\xef.\x04>\x80z\xdf\x8cS3\xe1\x0e7}\xbd\xdc\xb8\x84\xdcE\x8d]\xb7\xb2\xe5\xf6\xf2\x1df\xf5f->5Q\x83\x81\xfc\xed\x85\xd7\xf1^6\xbb\xa7\xef8\x5c?\xe4\xf7\xa87\xbc\x0c\x80E\xc5\xb2\x07\xa9\xa2\xa6\x17\xd6\xcb\xfdD\xd8z^g\xbf\xb2][\xafUX\xa3'\xf6\xd6F\xf7\xc82\xc8\xd0\x17V\xf5\x09K\x08\xee\x94\x97a\x1b\x95\xdbi\xb9\x9do\xc2\x97Q\x81\x0fhCU0p\xd2\xff\xc7\x1f:\xa7\xe5\x07N\xe8e\xa8\xaa\xf7u\xdb\xcb\xd7v\xe8\xaa\xdb\xbb\xb7\xf1=\xb2\x0c:\xf8\x9do\xda\xe36\xa4\x0b,$\x96\xf6H\x85\x17\xab3|\xda\xb8n8k\xf3\x5c\xbe\x06\xbd{gzdA\xe0\x03\xb8\x1b \x8a\xc4.Vg\x98\x22\x9eOt\x5cc\xd7\xb6\xcf\xe5\xab:\xcf\xf2V\xe1Y\x02\x81\x0f\xe0VUi\x0f\xab3\xa4CU\x9d\x9a_\xad\xf4\xf2\xc5\xe5\xee\x0ej\xecz\xacG\x16\x04>\x80\xbb\x01\xe2qb\x97Ps\xeeHKM\x17\x83p\x9d\xf6i\xab\x97\xaf\xa8\xb1\x8f\x1eY\x10\xf8\x00>\xa1*\xac\x14N\xfa\xaf\xd5\x86u{\xf9F\x0b\x86\xf3:\xbd{G\x9e3\x10\xf8\x00n\x03\xc4^y\xf10\xb1\x8b\x93\xfek\x88\xe1\xaaN\x8fZ\xe8\xe5+\x16\xb8\xab:\xbfk\xbd\x5cX\xcf\xfb\xe9(\xbc\xa7\xc6\xf7\xd5\xa5Q\x96\x05\x98Ge\xef\x9e&\xaa\x1d\xfa&5\x96\xa4\x0b\x0e\xc2~M\xcf\x89\xacQ#\xf1\xd6\xd8\xb3\xb1\x96\x0f\xfbp~fx\x8enk\xc3\x85\xff\xa7f\xbd\x87\x1e\xe1\xf3;\xdb\xa9\xf3d{\xf5\x5c\xef\xc7\xe7{4\xedy.\xf7\x99\xf5<\x9f,\xda\xfb.\xf0\x01M\xdf\xb4B0\xd8N\xec\xe2\xa4\xff\xe6B/\xdf\x075\xf6+\xe6\x08fu\xc2\xb7\xd29\xab\xfd\x1b\x1a\xc5\xe7q\xbf\xe2oi\x9a\xad\x18\x14v\xef\xdc^\xa8\xeb8i\xa3W=~Ax^s\xf7\xb7\x97\xb5\xcer\x0cG\x97Y\xbdY\xe5\x95\xc7Q\xf7q\x95\xb7\x93/\xe1\xb1\xec\xc5\xe7\xfb`\x81\xe7\xf9\xfd\xf2vn\x97~\x9b+\xfc\x19\xd2\x05\x9a\xbe\x09\x1fU\xf4>8\xe9\xbf\xa1\xf8aUgq\xf7\x83&\xe7\xf2\xc5\x0f\x9a\xdd\x1a\xbbz\xceV\xf4e\xa9\xdcBo\xcd\x8b\xecf\xc2\xd3vK7\xbd\x1b\x03\xc1\xe5\xa2\xc3\x821\xf8?\xad\xb9\xfbd\x89k>O\xb2\xfa\xcb\xff\x9dt\xf4\xf9\x0eC\xb5\xa71h\x1e\xb4p\x93\xe1\xf5\xf2~\x08\xc2\xa1\xb7\xbfi\xdb\x0b|@\x13E\xc5\x9b\xb0\x93\xfe\x17k\xdb6\xf7\xab\xbb\xef\x13C\x82+\x09z\x97\xf1\xc3z{\x89w\x15N\x0bx\x1e\x96\xe4[0\xf4\x85/\x00u\xd7|>YB{\x85\x9e\xcf\x875v\xbd\xc8:z\xfaH\x1c\x099\xaf\xf9\x85\xab\xa9\xad\xf8\x85aG\xe0\x03\x96\xf2m\xb5\xbcx\x94\xd8\xc5I\xff\x8b}\xc8\x86\x9e\x80:\xc5\x98k\xf5\xf2\xd5\xec\xddS:g\xf9\x7f7G1\xe8\xdd_\xe1\xdd>\x0a=K\x0b\xf6\xbe\x8dk\xee\xb7[\xde\xcfa\x8b\xed\x15\x8eyR\xf7\x18\xbb\xf8\x05\xf3\xces\xbe\xb5\xc4\xbb\xb9nz\x1a\x86\xc0\x07\xd4U\xf5&lXpqE\x8b\xfb\xd5\xdaG\x8f\xec\xd2\xadk\xcd\xd6\xdd\x06\xc1i\xda\x17\x90p\xdc\xef\xd6}=.R6h\xca\xfbL\x9d\xa0\xf4n<\xc6\xae\x85\xbd\xa2\xe2\x8bq[\x1a\xf7\xac\x0a|@\x9d7\xb1\xbd,\xdd[t\xd6\xd5\xf3h\xfa$\x0e\xad>\xa9\xb1k\xb2\x97\xaff\xef\x9e\xd29\xab\xb1\xc8\xdf\xc5\xc5\x82\xf7\xfd0\x06\x90y_\x8f\xa1\xa7\xaa\xce\xb9\xa5[\x8b\x84\xcb;\xaf\xdb\xbaC\xb9g\xf1\xd8\xba\xf6>\x19\x8e\xffqW_W\x02\x1fP\xf7[w\xcaX\x13\xb5\xa6n1\xe6b\xce\xebn\xe9\x91]M\x88\x0f=\xa8\xa9\xa1\xfa\xf0\x5c?\xcbnz\xd3\xde*\xb7\xcf\x86\x99\xa2q\xdb\xb9\xfdw\xf9\xf3\x07\xe5\xf6v\xbc\xad\xeb\x06\x87\xf0\xb8\x0c\x22;\x0b<\x84q\xcd\xfb[hh\xb7\xc1P\xeeu\x17\xdfojLh\xfbTh\xbd}\xce\xef<\xdf\xb7\xcf\xf3[\xf1\xbaY\xcf\xf5\xf5<_\xb0\x05>\xa0\xea\x8d,\x84\x87\xd4\xf9GO\x9d\xf4\xdfz@\xa8\xf3\xc11\xb5\x97\xaff\xef\xdeq\x17\x87\xc3\x06\xecdJh\x09\x1f\xe6_,\x9f\x87{\xe5\xb6\x1fz\xac\xc29Y\xb3\x86\xd8\xc3\xdfX\xf8\x90/\xb7\x10v\xc2\xf3\xfe\xa4\xc1\xfd\x1f-\xf0z\xbcl\x10\xb0\x16\x19\xda\x0da\xaf\xceP\xee\xb8\xa3\xef7U\xeb\x8a\xdf\xba\x88!o\xef\xf69\x9f\xf2<\x9f\xc6\xeb\xc2c\xbdw'\xe8\xd7\xfd\x02.\xf0\x01s}k=\xac\xf8\xb6]h\xa9\xd6C_h\xd3\xab:\x1f~\xd3>t+~G\xe9\x9c\xd5?\x9f'\xb1\xdd\xafcP\x1b\xc5\x0f\xf3\xf39o\xefe|\x8d\xbc\x95\xd5\xef}\xdbY\xf0\xf8\x9f\xd5\xd8u\xae\xa1\xdd\x06C\xb9\xc7\x1d>u\xa4N(\x0eao\xaf\xe9d\x8b;A\xffA|\x1e\x04>\xa0uG\x15\xdf\xba\x9d\xf4\xbf<u\x82\xf4\xe1\xdd\x99\x985{\xf7<g\xeb\x0b\x04!\xe8\xb5\xd6\xfe18\x8ck\xee~\xd8\xc2\xf1\xb7>\xb4\xdb`(\xf7\xaa\xab_T\xe2\xdf]U\xef\xdeu\x0c{/\x17x\xbe/co\xf0\x5c_\x14\x04>`\xd6\x9bX\xe8\x11H\x15\x0b\xbd\xe8\xe2\x89\xd3C\x11'TT\x9d0\xbf\xf5\xda\x87`UH\xf4\x9c\xad\xef\xf9<YF\xd0\x8e=^u\xca\xf9\xec/x?/\x1b\xdcF\x93\xa1\xddIVo(w\xbf\xc3_T\xf6\xea<\xceu\x1f\xbf\xc0\x07\xccR\x15\x0c\x0c\x0b._Qc\x9f\xc3\x18\xd0\xc3\x07\xec\xae\xe7\xcc\xebd\xd6\x97\x83\x15\xae\xc2Qkh7\x1eO\x9d\xa1\xdc'\x1d?\xe7\xb4N\xbb\xae}(Z\xe0\x03\xa6\xbd\x11\x8f+\xc2\x83\xb5WW \xb6qe/_|\xbe\xaa>\xf4=g\xc3}\x9d\x5cf\xf5\xce\xb1\xdbk\xe1\xee\xc2\xeb\xacN\xb9\x98\xe4\xd0n\x83\xa1\xdc\xb3\x81\x14t_{`\x15\xf8\x80io\xc4Uo\xb0z\x8aVg\x5c\xf3C85\xfcn\xa2\xc6\xf0\xd5\x09\xf3;\x8b\xdeI\x1c\x96\x1c\xd7\xdc=\xb5\xdek\x9dY\xad\x9d,\xc12\xa7\x9du\x1f\x80\xc0\x074}#\xb6\xf6\xea\x0a\xc5\xb6\xae:G\xab\xea\x83\xf3\xc8s6xuz\x90\xee\xb5\xf4\x9a\x0c\xf7U\xa7,\xcc\xd4\xa1\xddx~p\x9d\x02\xc5\xe3\x01\xbdnG\x02\x1f\xd0\x19\xf1<\xb0TO\x90\xb5W\xd7\x17\xc2\xaf\xe7\xfc]\xcf\xd9f\xa83!`\xb7\xad;\x8b\xc3\xacuV\xe1x8\xe5\xdc\xc1:\xaf\xc7\xe3\x81\xad\xdeS\x08|@\x97(\xc3\xd2A\x0d\x8a1O\x0d\x8b\x9e\xb3\x8dx\x8d\xac\xe3\x1c\xb1q\xcd/\x22\x93;_*\xc75\x82ggK\xb0\xccP\xa7\xed\xef\x97\x8f}\x22\xf0\x01kWc\xc6\x9c\xb5W\xd7\x1f\xc6\x9b\xf6\xf2Y\xe3\x98e\x86\xcc\xcb\x9a\xc1,\x84\x9d\xa2\xc1\xf2c\xfb=\xfb\x92rZs\xbf\xb0:\xce$q^\xe3R\xbd\xe1%\x0b\xdc\x09\x14)\x85&Z\xeb\x87\xeb\xcb8\xeb\xf1\xfd\x06\xbf6\xee\xf0\x17\x8c\xd3\x15\xdc\xcdy\xd9n\xbd\x9c\xac\x12C\xc1N\xdc\xc2\xbf\xf7\xe2U\xa3\xac\xde\x12^\xabz]Nj\xae\x94q\x18\x8f\xbd\xaa\xe6\xde\x93\xbe-\xfb\x17\xbeT\x95mpU\xf3y\x09\x93\xab\xf6B\x00^\xf5\x17h\x81\x0f\xb8\x1df\xd9N\xecr\xac\xa4Gg>\x5c\x8b\x9a\x1f,]_\xe3x\xd73\xfa\xa9\x80\xb7\x1f\x83\xdd^\x97B]\xcd/\x16\x97\x15an+K\xcf$\x0f\xfa\x5c\x82\xa5h\xf0e,<\xb7\xef\xc7\xbf\xe5\xb0\x9d\xac\xa2G\xd3\x90.\xf8\xa0\xa9\x1afQ\xd2\xa3[\xea>\x17\x85\xa6\xea\xc7\x97\xadr\x0b\xc3\xee\xdf\x8b\x81\xe1\xa0ga\xaf\xe9*\x1c\xa9\xf7\x99q\x9f\xbf\x8ce\xf5&\xb1|*\xf8\x85\xb0\x1c\x87z\x97Z\xbaE\xe0\x03\x8a\x8ao\xe6GN\xfa\xef\x94Z\xcf\x85\xe7\xac\xdb_\xb2\xe29m/\xe3\x07\xfe\xc3\xbe?\xa6\x06\xabp\xcc\xfc\x223\x80\x12,!\xf4^\xcc\xf1{\xb7\xbd\x9f\xdf(_\x13\xe7\xf1K@\xeb\xe7\xf9\x09|\xb0\xd9\x1f<\xa3\xf2\xe2Qb\x97\xab\x81T\xb9\x87\xae\xfc\xcd\x85\x1e\xda\x10l\x1eg\xf5\xd6\x90\xed\xdb\x97\xc7y\x02\xcf\xb3!L\x08\x8b_\xb2\xf6\xe6l\x83[\xdb\xd9\xc7\xbd~E\x9b\xc1O\xe0\x83\xcdV\xf5&k(\x17\xda\x09z\xf7\xe2D\x95\xaf\x0d0\xe8\xdd\x0d<\xe3\x86\xbfv\x95\x0dg5\x8d\xbb\xa1\xefl\xc1\x9b\xda\x8a_\x0a.\xe3\xb9~\x0b3i\x036\xf7\x03(\xbc)\xa5N\x9cW\xd2\x03\xda\xf9[\x0b\xe7f\x9d.\x18\xf4B\xafQ\x08\x13\xe7\xd9'\x87\xf5O\xef\xfc;\x9c\x8b\xbb\xbd\xe6\xc0\x13\x86$\xc3\xca0\x075\x7f\xe5|h\xa7\x1f\xdc\x86\xbe\x18\xd4\x1e/xs\x1f\x05\xbf8\x13z\xbc\xc8\x0cf\x81\x0f6\xd7\xa4\xe2z\xbd{,\xf3C1\x17\xf6\x92B\x0f\xd1I\x0cD\xa75\xef\xebeG\x1e\xefA\x83_\x09+q\xec\x0f\xf1\xcbe8\x1d&\x16[\x0eA|\xd1\xf34C\x90?\x0d\xa7\x04\xcc;\xfc-\xf0\xc1f\xf68T\xad\x97\xfb\xb4o\xb5\xb0\xa0\x83\x7fg\xf7\x1a\x86\xbd\xeb\x18\x0e&}\x9c\xc0\x10\x1f\xef<\xc1\xed\xa3\x19\xaaC\x5c\xef9>\xa6\xfd8\xa22n\x18\x86_\x17^G\xa1\x9cK6O\xe8s\x0e\x1fl\xe6\x87PQ\xf1\xa1Sh)X\xd8I\x83\xb0\x17f\xb8\x8eB\xafP\x8f\x83\xcfQ6_I\x99\xd0F\x93!\xbf\x10B/m\xb9\x85\xc0\xf7\xa0\xdc\x9ed7\xe7.\xce\xeb\xfd8\xc4+\xf0\x01\x95o\xca\xd6\xcb\x85\xe5~\xb1\x0a\x1f\xeeu\x8aK\x87/Xo\x85\x15A\xfa\xfcw\x17\x03\xc8\x22\xbdW\xbbq\xe4a\xd0B\x98\x8f\xa1~T\xfe\xf7\x8b\xe5v\x9c5_21\x98\xc4*\x0b\x02\x1f0\xf5M\xb9\xea\xfc\x9aP\x86\xe5HK\xc1\xc2\x8a\x9a\xfb\xed\xf5}\x15\x9b8j0i\xa3\xcd\x9a\x86\x98\x9e\x87\xbf\xf3\xd8\xeb\x17\x1e\xf3\x93\x86\xc1o+k8\x12#\xf0\xc1f\xa9\x0ascM\x04\x0b\x07\xa0\xd0\xdbUgh\xf3\xc9@\xce\x95\xad3t\xfd\xacf\x88\xd9\xb8\xca\x00\xa1g7\xd6;\x0d\xc1\xef\xb8\xc1\xaf\x1e4\x09\xc8\x02\x1fl\xd6\x87Pj\x88\xe9\x99\xf5r\xa1\x15u\xce\xaf\xba\x1eBQ\xf38\x0c[5t\x1dF\x0e\xf6k\x86\x99\xed\xb6\xea\xce\xf54\xf8\x85/\xdd\xef\xb4\xfcZ\x13\xf8`\x83\xc2^\xd5z\xb9\x812,\xd0\x8e\xbd\x1a\xfbL\x06\xf0\xbe\x12N\x11\xa9\x13\xce\xc6w\xdec\xea\x0c[>^\xf6\xba\xb2\x1d\x0f~\x93\x06\xa1O\xe0\x03>\x15\xe6RCLO\x86X\x12\x01\xd6\xf4\xe5\xaa\xcep\xee\xe9\x12\xee~w\xc5\x0f7\x04\x93\xca\xa1\xdc\xdb\x91\x838)\xa5hp\xdb\x1b+\x86\xbe:\xc3\xe0\xb5\x83\xb1\xc0\x07\xc3\xff\x00\x1ae\xe9\xde\xbb\xdb\xda_\xc0\xe2\xea~\x00_\xb6\xfcw\xbe\xd2\x1e\xb1\xf2\xfe\xea\xac\xeaq\xfd\xfa{O\x9c\x14Vg\xad\xd9\xedx\x1f\x9b\xacN8\xae]\xd0[\xe0\x83\xcdx\xd3H\xbd)\x1c*\xc3\x02\xab\xb5\x84\xc9\x1a\xa3\x15\x86\xbd\xbd\xf2\xe2Q\x9d\xf7\x9e\x19#\x07\xe3\x9aw\xf5(\xde\x97\xd7H\x0b\x04>\x18\xb0\xf8f\x99*\xc3r6\xef2=\xc0B\x7f\x9bm\x07\xb4\xfd\x15\x1dw\xdd\x12,\x17\xb3J<\xc5 \xf3\xb4\xe6]N\xe2}n\xaa\xab\xb6nH\xe0\x83a\xab\x1a\x12)4\x11\xacEk\x81/\x86\xc7\x83\x15\x1dw\x08{u\xceQ\x1c\xd7x\xef\xa9\x13f\xeeo\xf8\xfbTkaW\xe0\x83\xe1\xf6 \x847\xdc\xd496\xc7\xca\xb0\xc0\xda\xb4\xd9#7Y\xd1{J8\xe6\x875v\xad\xac/\x18O#\x19\xd7\xbc\xeb\x8d\x1c\xda\x8d=\x9b[m\xdd\x9e\xc0\x07\xc3}\xa3H\xf5\xeeY/\x17\x96\xa3\xeeyW\xfb-\xfd\xad\x87\xbf\xe3\xdd\x86\xef\x0d\xf3\xdc\xcf\xa8f\xb0\xbc\xcajN\x02\x8b_8\xeb\x0e\xed\x9el\xe0\xd0n\x9d\xd7\xc8\x99\xc0\x07\x9b\xed\xb0\xe2\x9b\xe1\x912,\xd0\xbe\xd8sUk\xa8r\xd1\xb5cc/\xfe\xe3\x86\xbf6\xefl\xdeIV\xaf\xb7i\xdcp\x12XQ\xb3\xbd\xb6\xb2\x8e\x96j\x09A\xb4\xed\x1e\xc8\x18n\xeb|)?\x15\xf8`C\xc5o\xe2\xa9\x0f\x81\xab!T\xf8\x87\x0e\xab\xbb<X1o9\x95\xd8\xb3\xf7\xfe\x8a\xdeS\xea\xac\xa6\x11<mz\x9aH\xc3\xa1\xdd\x87qX\xb9k\xc21=/\x8f\xed\xb4\x8d\xe0\x17\xc3^h\xc7:\xe7J\xd6\x0e\xc1\x02\x1f\x0cO\xd5\x1b\x80\x155`\xb9\xea\xd6\x8f\x0b\xbdV\xa7MBL\x08\x14\xe5v\x9e5\xef\xd9\x9b7|\x84@\xfa\xb5\x1a\xbb\xce}\x9aH\xc3\xa1\xdd.\xce\xda\xbd}\xfev\xef\x04\xbf\xf1<\xc7\x19\x03ch\x8f\xed\x1a\xbb\x9f5\x19\xa9\x11\xf8`@\xe2\x9b\xc5n\xc5\x1b\xc4\x89\x96\x82\xe5\x89\x1f\xc2u\xcf\xad\x0a\xa1\xef\x83TH\x08\xa1+\xf4\xb2\xc5\xa0\xf7<\x11\x06\xae\xdb|\x1c\x0dJ\xb0\x04\xe3\x05\xeby\x86\xb0Xwh\xb73\xefa\xb1\x8d^\x9f\xc8\x12\xde\x83C\xef\xeb\xf7\xe2\xf3Z\xc4\xa0~oV\xa8\xae\xf9\xfc.\xf4\xe5\xfd\x0d\x7f\x9a0(Uo\xcez\xf7`5\xc6\xe5\xf6\xa2\xc1\xfe\xbb\xb7A\xa1\xfc\xe0\xbf\x0doMfh^\xc7\xfb\xfc\xa0\xc5\xc7P\xd4\x0c\x1f\xcf\x16\xfd\x22\x19\xc2b<'\xf1y\x9d\xb6\x0a\x01iV\x9d\xbf\x15\xdb\xaf\xf9\xbc>\x8e\xe1\xee\xf6\xe7!\xdc\xde_\xe0~\x9f4-\xcc\xac\x87\x0f\x06\x22\x9eg\x93z\x03y\xba\x84\xea\xfe\xc0\xf4\x00sY^\xbc\xbb\xc0M4\x0d{{5CW\xads\x06\x1b\xac\xa6q\x1b4\xdbh\xb3\xd3\xac\xfe\xd0n\xb1\x84\xe2\xd5\xcb\x08|\xb3,\x12\xf6\x8e\xe79\x0f[\xe0\x83a\x84\xbd\xaa\x19]\xca\xb0\xc0\xeaC_\xe8\x81:^\xf2\xdd\xdc\x86\xbd\xba_\xe6*\xcf+\x8b\xef'u{\xec\xc6-/\xcdXd\xf5\x86\xa6\xd7>kw\xc6p\xee\xb2\x85\xb07W\xc0\x16\xf8`\x18\x8e*z\x04\x0a\xeb\xe5\xc2ZB_\xf8p~\xba\xa4\x9b\x0f\xe7\x09\x8e^\x0b{U\xe7\x0e\xd6\x99H0\xc9\xea\xf50\xb6~Np\xc3Y\xbb\xbbq\xb6\xf2\xba\x8cV|\x7f\xef\xce\x1b\xf6\x04>\x18\x808\x8b.\xb5\xac\xd2UG\xceu\x81M\x0d}\xe1t\x8b\xb7\xb3\xf6&U\x84\xf3\xbf\xde)owo\x8e/r;\x15\xef'!P\xd4\xe9\xb5jm(wJ{\x85\x10\xf9\xac\xe6\xee\x8f\xe7-m\xd3\xc2q\x86\xa0\xfd [~/n\xb8\xfd\x07\x8b\xbe\x8f\x0b|\xd0\x7fUo\x02cM\x04k\x0f}!\xc4\x8c\xca\xed\xc9\x02\xc1\xef\x22\x06\xbd\xd0\xab7\x99\xb1O\xd5\xd0\xee(\x11\xf6FY\xfd\x922\xc5\x92\x8b\xb7\x8f\x1b\xb4\xd3d\x8d\xcf\xebe\xecu\xfblvs\xce\xe6YK7\x1d\x1e\xfb\xd3\x18\xf4\xc6m\xb4u^\xde\x88\xbfD\x88\xb6\xbe\xf2\xa5\xd3\xac\xc12E\xd1\xd9\xf5{\x1f\xee\xad\xe3xc\xfd\xae\xd4\xac\xbc0\xe4\xb2\xe7\x99\x85n\x89\x7f\xbb\xe1os'\xf1\x9es\x15\x03\x5cx_:\xb1:No\x9e\xdb{\xf1y\xbd}~\xef\xd5\xf8\x5c\x09A\xf1e|\xaeO\x971\xc1NY\x16\xe87\xbd{\xd0C\xb1\xc7OM\xcca>\xb7\xdf\x0fn]:.C\xba\xd0\xdfo\x91E\x96\x9e\xda\xffD\x8f\x00\x00\x02\x1f\xf47\xec\x8d\xb2t\x11\xe5p\xfe\x87\x89\x1a\x00\x08|\xd0cE\x96.\x9bp\xa8\x0c\x0b\x00\x02\x1f\xf4T\xac\x80\x9f*\xc3r\x91\x98\xc1\x07\x80\xc0\x07\xf4@Qq\xbd\xf5r\x01\x10\xf8\xa0\xafbQ\xd4\xd4\xf4\xfe\xe3\xb8\x1e%\x00\x08|\xd0\xc3\xb0\x17j9\xa5&bX/\x17\x00\x81\x0fz.\x0c\xd5\xa6&j\x1c)\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<N\xec\x12*\xf2+\xc3\x02\x80\xc0\x07=V\x15\xe6\x94a\x01@\xe0\x83\xbe\x8aeX\x1e&v9\x8b\xcb4\x01\x80\xc0\x07=5\xa9\xb8^\x19\x16\x00\x04>\xe8\xab<\xcfC\x98K\xad\x97\x1b\xca\xb0\x9ck)\x00\x04>\xe8g\xd8\x0beX\x8a\xc4.\xa1\x0c\x8b\xde=\x00\x04>\xe8\xb1\x10\xf6ReX\x0a\x135\x00\x10\xf8\xa0\xa7\xf2<\xdf)/\x1e%v\xb9*\xc3\x9e2,\x00\x08|\xd0cUan\xac\x89\x00\x10\xf8\xa0\xa7\xf2<\xdf\xcf\xd2\xeb\xe5\x9eY/\x17\x00\x81\x0f\xfaM\xef\x1e\x00\x02\x1f\x0cU\x9e\xe7E\x96.\xc3\xf2\xc4z\xb9\x00\x08|\xd0\xdf\xb07\xca\xd2eVB\x19\x16\x135\x00\x10\xf8\xa0\xc7\x8a,]\x86\xc5z\xb9\x00\x08|\xd0Wq\xbd\xdc\x83\xc4.\x17e\xd8\x9bh)\x00\x04>\xe8\xaf\xa2\xe2z+j\x00 \xf0A_\xe5y>\xce\xd2eX\x8e\x95a\x01@\xe0\x83\xfe\x86\xbd:\xeb\xe5\x16Z\x0a\x00\x81\x0f\xfa+\x0c\xd5\xa6\xca\xb0\x1c)\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<N\xecr\x95)\xc3\x02\x80\xc0\x07\xbdV\x15\xe6\x0aeX\x00\x10\xf8\xa0\xa7b\x19\x96\x87\x89]\xce\x94a\x01@\xe0\x83~\xab\xea\xddS\x86\x05\x00\x81\x0f\xfa*\xcf\xf3\x10\xe6\xb6\x13\xbb\x842,\xe7Z\x0a\x00\x81\x0f\xfa\x19\xf6\xea\x94a\xd1\xbb\x07\x80\xc0\x07=\x16\xc2^j\xbd\xdc#\x135\x00\x10\xf8\xa0\xa7b\x19\x96G\x89]\xae\xca\xb0Wh)\x00\x04>\xe8\xafI\xc5\xf5cM\x04\x80\xc0\x07=\x95\xe7\xf9~\x96^/\xf7\xccz\xb9\x00\x08|\xd0oUeX\xc6\x9a\x08\x00\x81\x0fz*\xcf\xf3\x22K\xaf\x97\xfb\xd4z\xb9\x00\x08|\xd0\xdf\xb0\x17\xca\xb0\xa4\xca\xac\x842,\x85\x96\x02@\xe0\x83\xfe\x0aC\xb9\xa92,\x87\xca\xb0\x00 \xf0AO\xc5\xf5r\x0f\x12\xbb\x5cX/\x17\x00\x81\x0f\xfa\xad\xa8\xb8\xde\x8a\x1a\x00\x08|\xd0Wy\x9e\x8f\xb3t\x19\x96g\xca\xb0\x00 \xf0A\x7f\xc3\x9e\xf5r\x01\x10\xf8`\xe0B\x98K\x95a9R\x86\x05\x00\x81\x0fz*\xae\x97\xfb8\xb1\xcbUV]\x84\x19\x00\x04>\xe8\xb0\xaa0W(\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<L\xecr\xa6\x0c\x0b\x00\x02\x1f\xf4[U\xef\x9e\x89\x1a\x00\xac\xc5\x1b\x9a\x00\x16\x17\xcb\xb0l'v9~\xf5\xea\xd5\xb9\x96\x02\xe8\x9f\xaf?\xf8\xc2$\xfe\xb3\xf8\xf2\x8bo]\xf6\xf11\xe8\xe1\x83\x05\xfd\xc9?\xfe\x81\xf0\xc5)\xd5\xbb\xa7\x0c\x0b@\xbf\x9dd7+'\xbd(\xc3\xdfi\xb9\x8d\x05>\xd80\x0f\xbe\xf3c\xa3,\xbd^\xee\x91\x89\x1a\x00\xfd\xf5\xe5\x17\xdf\x0a\x81\xef*\xfe7\x14\xd5\x7f\xbf\x0c}\x97\xe5V\x94\xdb=\x81\x0f\x06\xee\xcf\xfd\xe1\x1b\xd9\xaf}\xf0\xab\x9fO\xecrU\x86\xbdBK\x01\xf4\xde\xeb#9\xa1\xdej(\xc3\xf5\xbd0\xe4[n;\x02\x1f\x0c\xd4\xe7^\xfc`\xd5.\x86r\x01\x86a\x92\xdd\x9c\xa23M\x18\xee\xfdF\x19\xfa\xce\xbb:\xdc+\xf0\xc1\x9c~\xea\x0f~4;\xfb\x95\xd3\xd4.\xa1\x0c\xcb\x89\x96\x02\xe8\xbf/\xbf\xf8V85\xa7\xea==L\xde\x0b\xc3\xbd/\xe3p\xefH\xe0\x83\x9e\xfb\xdd\x7f\xff\xdd\xaa]\xc6Z\x09`P\xea\xae\x94\x14\xce\xeb\x0e\xc3\xbda\x92\xc7I\xb9\xed\x09|\xd0C\x7f\xed\xbb\xf7\xb2o\x9e\xffFj\x97\xa7\xd6\xcb\x05\x18\x96/\xbf\xf8V(\xafu\xd6\xf0\xd7BA\xfe\xe7q\x92\xc7x]\x93<\x04>h\xe8\xc7\xff\xf83\xd97O\x93a/\x9c\xe3Qh)\x80A\x9a\xcc\xf9{a\x92\xc7\xfb\xe5v\x19'y\x8c\x04>\xe8\xb07\x7f\xe7\xc7\xb3\xef|\xe7;\xa9]\xac\x97\x0b0P_~\xf1\xad\x10\xf8\xae\x16\xb8\x890\xdc{\xb7\xa6\xdf\xbe\xc0\x07\x1d\xf3\x17\xfe\xef\x0fe\xbf\xf6\xcb\xff\x22\xb5\xcbE\x19\xf6\x8e\xb4\x14\xc0\xa0MZ\xba\x9dP\xd3\xef\x838\xdc{\xb8\xcc\xe1^\x81\x0f\x1a\xf8\xb1\xab\xca?\x19eX\x00\x86\xaf\xed/\xf6a\xb8\xf7k\xd9\x12k\xfa\x09|P\xd3_\xfd\xbd\x9f\xa8*\xc3\xf2\xec\xd5\xabW\xa7Z\x0a`\xd8b\x89\x96\xe3%\xdd\xfcmM\xbfV\x97p{\xc3\xd3\x06\xd5\xc2D\x8d\x97\xdf\xfc\x9f\xc9}\xfe\xd1\xcf\xfd\xcd_\xea\xc2\xd4{\x00V\xe2<\x86\xb3e\x09\xc3\xbd\xbb\xe5\xe7J\xe8M\x0c\xdb\xa4\x0c\x9a\x97\xf3\xdeX\xfe\xea\xd5+O\x19D[_\xf9\xd2i\xfc#\xfb\x84P\x86\xe5W\xff\xd9\xaf\xcc\xfc\xbdb\xffg\xb2\x9f9\xff\xef\x1a\x10\x80e:\x8e\xc1\xef\xb4\xe9/\x1a\xd2\x85)~\xf4\xf7>\xf7\xfd\x7f\x87\xf5rSeXv\xbf\xf4\xd3\xd9\xdf\xf8\xed\xefh4\x00\x96-\xf4(>\xbf]\xc2\xad\xc9$\x0f\x81\x0f^\xf3c\xbf\xfd\x97\xb3{\xdf\xf8\xa9\xec\x87\xbe\xfb\x93\x1f\xfd\xff\xcf|\xfbG\x92eX\xde\xfeS?\x9c\xfd\xf0\xef\xfe\x91\x86\x03`U>Z\xc2-\xbb\xa9\xe9wT\xa7\xa6\x9f!]\xb8\xe3\xf3?\xfb\x0b\xbf\xf3\x13\xdf\xd9\xfa\xd3\xb7\xff\xff\xdc_|\x91\xfd\xab_\xfa\xe73\xf7\xff\xf9\x9f}+\xfb\x85\xff\xf4m\x0d\x07\xc0\xba=\xcbn\x86{\xa7\xae\xf7k\xd2\x06Don\x7fu\xf2\x13\xd9\xc7a/\xf8\xcc\xb7\xaf\x93\xbf\xf3\xb3\xd9\xff\xd2p\x00tAX\xc2\xed\xe1\xd7\x1f|!\x14\x85\xbe\x9d\xe4\xf1\xfdE\x00\xf4\xf0A\x0c{\xd9\x94\xd9V[?\xf2G\xd9\x9f\xf8\xd1\xdf\xca\xfe\xe5\xf3g\x9f\xfa\x9d\xbf\xffs\x7f+\xfb\xbb\xbf\xf1\xdf4\x1e\x00]\x14z,Bo\xdfQX\x03X\xe0C\xd8\x9b\x11\xf6\xee\xfa\xb3\x9f\xfd\x83\xec\xff\xfc\xfe\xbf\xc9\xfe\xf5\xbf\xfb\xb7\x1f\xfd\xff\xfe\xfdQ\xf6O\xb7~\xc4\xb9{\x00\xf4!\xf8\x1d\x0a|\x08{\x0d\xea(\xfd\x9d\x9f\xfcV\xf6\xcb\xff\xf1<{\xe7\x8b\x7f^\x19\x16\x00\xba\xec\x13C\xbb\x02\x1f\xc2^C\xef\xfc\xe07\xb3\xbf\xfe\xdd\xdf\xd2\x80\x00t\xd1\xd4\xc9\x1b\x02\x1f\xc2\xde\x1c\xfe\xde\x1f\xfeF\xf6\xd3\xbf\xff_5$\x00]\x10\x86m\xc3\xe7\xda\xd1\xac\xd58\x04>\x84=\xa1\x0f\x80~\xba\xc8n\x86mO\xee\xce\xc8\x9dFY\x16\x84\xbd9\xfd\x97\x1f\xf8\x5c\xf6\xd3\x99\xc0\x07\xc0\xca5^bM\x0f\x1f\xc2\xde\x1c\xdez\xf5?\xb2\x9f\xff\xbd_\xd7\xa8\x00\xacJ\x18\xb6\xbd\x9d\x84q\xd9\xf4\x97\xf5\xf0!\xec5\xf4\xf9\xfc\x7f\xff\xca\xcf\xff\xee\xaf\xff\xa2V\x05\xd8h{\xe5\xf6x\x05\xf7s\x16C\xded\x91\x1b\xd1\xc3\x87\xb0\xd7\xcc\xf1o^\xfc\xe2X\xab\x02l\xb6\xaf?\xf8B\x98\x05\xfbp\x89w\x11\x86m?*\x9a\xdc\xc6\x8d\x09|\x08{\xc2\x1e\x00\xcd\xc2\xde\xa8\xbcx\xb1\x84\x9b\x9e\xba,Z\x1b\x0c\xe9\x22\xec\x09{\x004s\xd8\xf2\xed\x85a\xdb\xa3\xd7k\xe7\xb5I\x0f\x1f\xc2\x9e\xb0\x07@M_\x7f\xf0\x85{\xe5\xc5e\xb9m-xS\xb7k\xdd\x16\xf3L\xc2hJ\x0f\x1f\xc2\x9e\xb0\x07@}\xfb\x0b\x86\xbd0l[d5j\xe7\x09| \xec\x01\xb0\x1e\xc5\x9c\xbf\x17\x96<;jR;O\xe0\x03a\x0f\x80\x15\xfb\xfa\x83/\xec\x95\x17\xf7\x1b\xfc\xcaB\xb5\xf3\x04>\x10\xf6\x00X\xbd\xba\x9f\x0d\x1f-y\xb6h\xed\xbc6\x99\xb4\x81\xb0'\xec\x01P\xa1f)\x96Vk\xe7\xb5I\x0f\x1f\xc2\x9e\xb0\x07@\xb5Y\x9f\x0fa\x12\xc6$\x06\xbd\x97]=x\x81\x0faO\xd8\x03\xa0\xda\xeb\xb5\xf7ZY\xf2L\xe0\x03a\x0f\x80\x0e\xf8\xfa\x83/\x84\xcf\x88P\x8ae\xa5\xb5\xf3\x04>\x84=a\x0f\x80\xd5\xd9+\xb7w\xb3%,y\xb6*&m \xec\x01\xc0\xc0\x09|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00 \xf0\x09|\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\xc0\xe6\x06>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x16\xf3\x99\x96\xc3\xd9=a\x0f\x00`\xc0\x81\xaftT\x86\xb4\x91\xb0\x07\x000\xc0\xc0\x17{\xf7B@\x1b\xb7p[\xc2\x1e\x00@\xd7\x02_\xe90^.\x14\xae\x84=\x00\x80\xee\x06\xbe\xdb`u\xbf\x0cm\xfb\xc2\x1e\x00\xc0\x80\x02_\x0cx\xf7\xa7\x84?a\x0f\x00`\x08\x81oJ\xc0{\xd8d\xf2\x86\xb0\x07\x00\xd0\xe1\xc0\x17\x83\xdd\xc3\x1a!P\xd8\x03\x00\xe8c\xe0K\x04\xbb\xca\xe0%\xec\x01\x00\xf4;\xf0%'o\x08{\x00\x00=\x08|eh\x0b!\xeb~b\x97Ca\x0f\x00\xa0\xc7\x81/\xab\x1e\xb6\xdd}}\xf2\x86\xb0\x07\x00\xd0\x93\xc0\x17\x83\xdcn\x8d]\x0f\x85=\x00\x80\x1e\x06\xbel\xc6p\xed\x14ca\x0f\x00`}\xf2W\xaf^\xcd\xf5\x8be\x80{Y^l\xd5\xdc\xfd\xa2\xdc\xb6\x85=\x00\x80\xd5\x9b\xab\x87/N\xd6\xd8j\xf0+\xc2\x1e\x00@\x9f\x02_6\xc7\xd2i\xc2\x1e\x00\xc0z4\x1e\xd2\x8d\x935^\x08{\x00\x00\xfd0O\x0f_!\xec\x01\x00\xf4G\xa3\x1e\xbe7\xb7\xbfz\xaf\xbc\xb8\xcc\x9a\x9d\xbf'\xec\x01\x00\xacQ\xd3\x1e\xbe}a\x0f\x00`\xd8\x81\xefpE\xc7%\xec\x01\x00\xb4\xa4\xf6\x90\xee\x9b\xdb_\xdd)/\xbe!\xec\x01\x00\xf4K\x93\x1e\xbeU\xf4\xee\x09{\x00\x00\xeb\x08|q\xb2\xc6\xfe*\x0e\xa8\xbc\xaf=O\x0b\x00@{j\x0d\xe9\xc6\x955\xde_\xe1q]\x95\xdbQ\xb9M~\xf3\xe2\x17_z\x9a\x00\x00\x96\x1f\xf8.\xcb\x8b\xfbk8\xbe\xebr;\x09\xe1\xaf\x0c~\xe7\x9e.\x00\x80%\x04\xbe8\xc4\xfa\xbc\x03\xc7z\x91\xdd\xf4\xfa\x9d\xe8\xf5\x03\x00h7\xf0M\xca\x8b\x83\x0e\x1d\xb3^?\x00\x80\xb6\x02_\x9c\xac\xf1\xbd\x0e\x1f\x7f\xe8\xf5+\xca\xe0w\xe2\xa9\x04\x00\x98\xaej\x96\xee\xb8\xe3a\xefH\xd8\x03\x00H{\xa3\xe2\xfa\xc3\x8e\x1d\xaf\xe1\x5c\x00\x80\xb6\x02_\x9c\xacq\xbf#\xc7i\xc2\x06\x00@\xdb\x81/\xeb\xc6p\xeeq\xa67\x0f\x00`!S'm\xbc\xb9\xfd\xd5Qy\xf1bM\xc7\xa4\xe82\x00@\x8bf\xf5\xf0\x8d\xd7p,\xc71\xe4\x9dzZ\x00\x00\x86\x13\xf8\xf4\xe6\x01\x00\xac:\xf0\xbd\xb9\xfd\xd5\xfdl\xf9\x935\x9e\xc5\x90\xa7\xa4\x0a\x00\xc0\xaa\x03_\xb6\xbc\xde\xbd\xd0\x9b7\x89A\xefR\xd3\x03\x00\xac\xc6'&m,i\xb2\xc6Y\xa6@2\x00\xc0\xda\xbc\xde\xc37n\xe9vC\x81\xe4I\x0cz\x97\x9a\x19\x00`8\x81/\xf4\xe6\x85!\xdb\x89\xa6\x05\x00\xe8X\xe0{s\xfb\xab!\xec\xcd3YCo\x1e\x00@\x1f\x02_\xd6\xbcw\xcfrg\x00\x00=\xf0\xd1\xa4\x8d\x06\x935Bo^\x98|a\xb93\x00\x80\x9e\xb8\xed\xe1;\xac\xd8Oo\x1e\x00@\xcf\x03\xdfx\xc6\xf5\x96;\x03\x00\xe8{\xe0\x8b\x935\xb6\xee\xfc\xccrg\x00\x00C\x0a|\xd9\xc7\xbd{z\xf3\x00\x00\x06\x1a\xf8B\xc0\xdb\xd7\x9b\x07\x000L\xff_\x80\x01\x00e|\xfb\xc4\xd4o\x058\x00\x00\x00\x00IEND\xaeB`\x82"
qt_resource_name = b"\x00\x11\x0bF\x95g\x00p\x00a\x00r\x00a\x00m\x00e\x00t\x00r\x00i\x00c\x00f\x00i\x00t\x00t\x00i\x00n\x00g\x00\x06\x07\x03}\xc3\x00i\x00m\x00a\x00g\x00e\x00s\x00\x1c\x053\xe8'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00z\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g\x00\x10\x0a1\xdeg\x00m\x00o\x00d\x00e\x00l\x00-\x00v\x00i\x00e\x00w\x00e\x00r\x00.\x00p\x00n\x00g\x00\x1c\x053\xf0'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00x\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g\x00\x1c\x053\xf4'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00y\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g"
qt_resource_struct = b"\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00(\x00\x02\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00:\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x9e\x00\x00\x00\x00\x00\x01\x00\x00F6\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x01\x00\x00~\xa5\x00\x00\x00x\x00\x00\x00\x00\x00\x01\x00\x006|"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 6,411.863636
| 139,433
| 0.736958
|
from PySide import QtCore
qt_resource_data = b"\x00\x006x\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x006\x05IDATx\xda\xec\xddOl-Y~\x17\xf0\xaaIG\xf9G\xe27\xd2\xf0O#\xe2\xfbX0j\x08\xb2G\x10\xd1 F\xbe\x1d!\x116\xb1Gb\x93\xd5\xbb\xbd`\x91\xc5\xf0\xdc+f\xf7\xca\x12\x8b\xd9\xb5\x1f-$$\x16\xefzE\xc4\x22m/C\x82\xfaZ\x83\xa0\xc3\x1f\x8d\x1dF\x0aC \xcfF\x84\x89\x84F\xf3\x9c\x88\x10 \x89\xa9\xd3>\x9e\xf6\xbc\xb6OU\xdd\xbfUu?\x1f\xa9t\xdf\xf3-\xdf?\xa7\xae\xef\xfd\xdes\xea\xfcN~}}\x9d\x01i\x1b_yk\xaf\xbc\x18\x5c\xbd\xff\xd1\xa1\xd6\x00\xa0k>\xa3\x09\xa0\x96\xfd\xb8\x01\x80\xc0\x07}\xb3\xf1\x95\xb7\x06\xe5\xc5N\xb9m\xc6\x9e>\x00\x10\xf8\xa0g\x8a;\xff\xd6\xcb\x07@\xe7\xe4\xce\xe1\x83\x87m|\xe5\xadG\xe5\xc5E\xf8\xe7\x9d\x1f?\xbez\xff\xa3\x0b\xad\x03@W\xe8\xe1\x83\xb4\xbd\xd7\xc2^Ph\x16\x00\x04>\xe8\x8f\xfb\xc2\xdd^\xec\xf9\x03\x00\x81\x0f\xba\xac\x0cu\xc3\xf2b\xf3\xbe\xab\xb2\x9b\x9e?\x00\x10\xf8\xa0\xe3R\x134\x0a\xcd\x03@W\x98\xb4\x01\xf7\x88\xa5X^V\xec\xf6\xf6\xd5\xfb\x1fM\xb4\x16\x00m\xa7\x87\x0f\xee7\xaa\xb1\x8f\x12-\x00\x08|\xd0au\xc2\xdcn\xec\x09\x04\x00\x81\x0f\xba\xa4\x0cq\xa3\xec\xd3\xa5X\x1e2\xd2b\x00\x08|\xd0=\xfb\x0b\xda\x17\x00\x04>X\xb5X\x8ae\xab\xc9\xaf\xc4\x1eA\x00\x10\xf8\xa0#\xa6\x09oz\xf9\x00h5eY \x8a\xabg|w\xca_W\xa2\x05\x80\xd6\xd2\xc3\x07\x9f\x98\xa5\xa7n\xa4\xf9\x00\x10\xf8\xa0\xfdf\x09mO\xac\xaf\x0b\x80\xc0\x07-\x16'^l\xcex3\xce\xe5\x03@\xe0\x83\x16\x1b\xb5\xe46\x00@\xe0\x83y\x8b\xabe\xec\xcc\xe1\xa66\x95h\x01@\xe0\x83v*\xe6x[\x02\x1f\x00\xad\xa3,\x0bk-N\xb4\xb8\xc8\xea/\xa5V\xc7\xe3\xab\xf7?\xba\xd0\xba\x00\xb4\x85\x1e>\xd6\xddh\xcea/(4+\x00\x02\x1f\xb4\xc7\x22f\xd6\xee)\xd1\x02\x80\xc0\x07-P\x86\xb2\xbdl\xf6R,\xf7\xdet\xe6\x5c>\x00\x04>h\x85E\x8625\xf9\x00\x10\xf8`\x95b)\x96\xdd\x05\xde\xc5f\xecA\x04\x00\x81\x0fVd\x19=p#\xcd\x0c@\x1b(\xcb\xc2\xdaYP)\x96\x87(\xd1\x02\xc0\xca\xe9\xe1c\x1d\xed-)\xec\x05\xce\xe5\x03@\xe0\x83\x15Xf\x08\x1b)\xd1\x02\x80\xc0\x07KT\x86\xafay\xb1\xb5\xcc\xbb\xccnz\x14\x01@\xe0\x83%\x19\xad\xe0>\x0d\xeb\x02\xb0R&m\xb06b)\x96\x97+\xba\xfb\xb7\xaf\xde\xffh\xe2(\x00\xb0\x0az\xf8X'\xa35\xbdo\x00\x04>\x10\xf8\x96\xe0I\xeca\x04\x00\x81\x0f\x16\xa1\x0c[!\xecm\xae\xf8a\x8c\x1c\x09\x00\x04>\xe8w\xd8\x12\xf8\x00\x10\xf8`\x116\xbe\xf2\xd6vy\xb1\xd3\x82\x87\xb2\x19{\x1a\x01@\xe0\x839kSY\x14\x81\x0f\x80\xa5S\x96\x85^\x8b\xab\x5c|\xb7\xe6\xeeW\xb7\xbf\xd6\xf0n\xc2\xef\xbd\xca\xea\x9f#\xf8\xc5\xab\xf7?:st\x00X\x16=|\xf4\xdd\xa8\xc6>\x97\xe5\xf6N\xb9\x0d\xcam\x9a vV\x06\xb8A\xbc\x8d\xd3\x1a\xfb+\xc4\x0c\x80\xc0\x07s\x94\x0aW'\xd9MA\xe4A\xb9\x8d\xcb\xed\xd5,w\x14ocX\xfe\xf3\x8b\xe5v\x94\xd8\xf5\x89\xf5u\x01\x10\xf8`\x0e\xcaP\x15\xd6\xb0}}\x985\x0c\xbf>/\xb7\xc7e8\xdb[\xc4\xea\x17a\xb8\xb6\xdcF\xe5??[n\x07\xd9M\x0f\xe2\xebF\x8e\x10\x00\xcb\xf2\x86&\xa0\xc7\xee\xf6\xee\x9d\x97\xdba\xe8\x85[\xd6\x9d\xc7\x1e\xc3\x22lqvn\xd8v\xee<\xb6C\x87\x08\x80e\xd0\xc3G/\xc5U-B\xb8\x0aC\xaba\xd8v{\x99a\xef\x9e\xf0w;\xdc\xfb8>\xa6G\xb1\x07\x12\x00\x16N\x0f\x1f}\x16\x86m/\xda\xf4\x80\xe2\xe3\x19\xc5s\xf8\x9c\xc7\x07\x80\xc0\x073\x06\xab6?\xbe0\xdc\xfb\xca\x91\x02`\x19\x0c\xe9\x02\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x08|\x00\x00\x02\x1f\x00,I\x9e\xe7\x8f\xcam\xa8%@\xe0\x03\xa0\x9fao\xbb\xbc\xb8\xd0\x12 \xf0\x01\xd0\xcf\xb0\xb7_^|\xa3\xdc6\xb4\x06,\x96\xb5t\x01Xv\xd0{T^\x8c\xcbm\xf7\xf6g\xd7\xd7\xd7\x13-\x03\x02\x1f\x00\xfd\x08{a\x08\xf7\xb8\xdc6\xb5\x06,\x8f!]\x00\x96\x15\xf6n\x87p_\x0f{\xa7Z\x07\x16K\x0f\x1f\x00\x8b\x0ez\x9f\x1a\xc2\x05\x04>\x00\xfa\x13\xf6\xea\x0c\xe1N\xb4\x14,\x96!]\x00\x16\x15\xf6\x1e\x1a\xc2\x05\x96L\x0f\x1f\x00\xf3\x0ezM\x87p'Z\x0d\x04>\x00\xba\x13\xf6\xcc\xc2\x85\x162\xa4\x0b\xc0\xbc\xc2\xde(\xbb\xe9\xadk\x14\xf6\xd4\xe0\x83\xc5\xd3\xc3\x07\xc0\xacA/\x0c\xe1\x1e\x96\xdb\x13\xad\x01\x02\x1f\x00\xfd\x0b{a\x08w\x5cn[S\xde\x84\x1a|\xb0\x04\x86t\x01\x986\xec\x8d\xb2\x9b!\xdc-\xad\x01\xed\xa6\x87\x0f\x80\xa6Ao\x9eC\xb8\x13-\x0a\x02\x1f\x00\xed\x0a{\xb3\x0e\xe1\x02+`H\x17\x80\xbaao\x94\xcd\x7f\x08w\xa2ea\xf1\xf4\xf0\x01P\x15\xf4\xcc\xc2\x05\x81\x0f\x80\x1e\x87\xbdAvSHy!C\xb8j\xf0\xc1r\x18\xd2\x05\xe0\xa1\xb0\xb7W^\x9ce\xce\xd7\x03\x81\x0f\x80^\x86\xbd0\x84\xfbA\xb9m,\xf0n\xd4\xe0\x83%1\xa4\x0b\xc0\xdd\xa07\xc8\x168\x84\x0b\xac\x86\x1e>\x00n\xc3\xde\xb2\x87p'Z\x1d\x04>\x00\x96\x17\xf6\x961\x84\x0b\xac\x88!]\x80\xf5\x0ez\x83luC\xb8\x13G\x00\x96C\x0f\x1f\xc0\xfa\x86=\xb3pA\xe0\x03\xa0\xc7ao\xe5C\xb8j\xf0\xc1\xf2\x18\xd2\x05X\xaf\xa0\x17V\xcd\x08AK\xaf\x1e\xac\x11=|\x00\xeb\x13\xf6\x86\xe5\xc5EK\xc2\x9e\x1a| \xf0\x010\xe7\xb0W\x94\x17\x1fff\xe1\xc2Z2\xa4\x0b\xd0\xef\xa0\x17\x86p\xc3,\xdc\x9d\x96=\xb4\x89\xa3\x03\x02\x1f\x00\xf31\x88\xe1*\xcc\xc6\xdd\x8e?\xdb\xd1, \xf0\x01\xd0\x13\xd7\xd7\xd7g1\xec}J<\xa7/x\xfdr\x19\x81p\xe2\xe8\x80\xc0\x07\xc0\xe2\xc3\xe0$\x15\xbe^\x0b\x84\xc3L\xcf \x08|\x00\xf47\x10\x96\xe1\xefbA\xb7\x0d,\x81Y\xba\x00$\xc5\x19\xbe\x9bZ\x02\x04>\x00\xfa\x19\xf6\xc2,\xdf\xfd9\xdf\xac\x1a| \xf0\x01\xd0\x22a\x09\xb6T\xed\xbe\x83r\xbb\xd2L \xf0\x01\xd0Aq\xd2\xc6\x93\xc4.\xe7\xd7\xd7\xd7Ey9jx\xd3\x13\xad\x0b\x02\x1f\x00\xedPT\x5c\xff\xf1Po\x19\xfaBa\xe7\xe7\x9a\x0b\x04>\x00:$\xcf\xf3Q\x96.\xc3rrw\xa6m\xf9\xef\x10\xfe\xcek\xde\xfcD\x0b\xc3r)\xcb\x02p\x13p\x06\xd9\xcd\xaa\x14\xc3r\x0b\x13\x15\x92\xabR\x94\x01'\xefq[\x84\xe7_T\xecv\xdfD\x8e\xbd\xec\xa6\xc8\xb3\xf5zA\xe0\x03hE\xa8\xd9\xce>)(<lcH\x89\xe7\xd0}\x18\xff{\x1a\xc3\xd4E\xb9M\xe2\x0a\x1a\x8b\x12\xc2\x5c\xaa\x0c\xcbAy\xff\x17\xf7\x84\xe0\x8b\xd83\xf8A\xea\xc6\xd5\xe0\x03\x81\x0f`\xd1!/\x04\x92\xbd\xac\x1bu\xe5\x06w\xfe\xbd\x93\xdd\xe9m,\x9f\xcbm\x08<\x9eg\x00\x8c=\x9d\xa92,\x97\xd9\xcd\xcc\xdd\x87\xc2\xdcqy\x1b\xe1|\xbe\xa7^q \xf0\x01,+\xe4=\x8a\x01/\x84\x98\xad\x8e=\xfcA\xc5\xf5\xdf\x0b\x81\xe5\xf3\x0cAl\x12\x02`\x9cD1\xad\xaa2,Ey\xfb\xafR7\x10\xce\xe7\x8b\xe1\xfa\xbe\xe1p5\xf8`\x05L\xda\x00z\x1b\xf4\xe2\x0a\x11\x17\xe5\xf6\xa2\x83a/\x186\xd87\xf4X\x86\x12*\x1f\x94\xcf\xfbU\xb9\x8d\xcbm\xafa\x9b\x85\xfb\xdbM\xecrZ\x86\xb9q\xcd\x9b\x0b\xf7}_}\xbeW^\x9d \xf0\x01\xcc3\xe8=\xcb\xba=\x81`0\xe5\xefm\xdc\x09\x7f\xe1\xbc\xba\x22\x0e\xd5V9\xac\xb8\xbe\xa8\xfb\x00b/\xe0}\x81\xf3\xcc\xab\x14\x96\xcf\x90.\xd0G\xfb3\x04\xbd\xab\xec\x93\xc9\x11a{\xb5\xc2\x90\xb29\xa7\xdb\x08\xc1\xf7Y\x19\xfaNB\xa8\xbbo\xd2D\x9cl\x91\xea\x05=j:\xd9\x22\xec_\xde\xeeA\xbc\xff[z\xf8@\xe0\x03\x98M\xe8Y*CF8\x87\xedI\xcd_\x09\xe7\x94M\xe2vVu~\xda\x92\xbd\x9d}R\x22f\x18/g\xe9\xb1\x0c\xc3\xb5\xbb\xf1|\xbf\xe2vx6\x9e\xe7xX\x11\x82\xf7\xa7<\x1eE\x1c*\xbe=\x9fO\x0f\x1f\x08|\x00sq\x98\x08|!\xec\xdc\xcel=n\xf3\x93\xb8\xd3\xa3\xf6\xbd\xc7\x19\x87f\x87w\xb6iz\x01\xc3\xef\xbc\x88C\xdfE\x8d y8c\x10\x0eC\xbb\x17\xf1>\xf4\xf0\x81\xc0\x070\x97\xa0tV\x86\x99\xb0\xea\xc3\xdd!\xca\xa3r\x1bw\xbd\x06\x5c\xac\x7f7\x8e\xdb\xddz\x82\xa3\xac\xf9\xc4\x94\x10\xfc\xfea\xb9}>\xb1\xcfe\x5c/w\x96\xc7\xfc*N \xf9p\xc1\xf5\x03\x81\x07\x98\xb4\x01\xf4U\xe8\xe5\x0bC\x91\xe1\x1c\xb2\xcf\x96Ac\xd4\xc7\x82\xbf!@\x95[\xe8\x81\x0b\xc1\xefq\xb9\xbd\x9b\xd5_\xe2,\xa8\xeaq\xdb\x9f\xd3\xe3\x9c\xc4\xc7\x06\x08|\x00s\x0bB\xa17\xefQ\xe8\x9dj\xd9yy\x8b|\xce\x17\xaf\x85\xbfP\x00\xf92\xf1+!\x18\xfe\xa5\xc4\xf5\xa7\xf3\x1c\xf6\x0e\x8f\xcd+\x13\x04>\x00\xe6\x1b\xfe\xf6\xcbmP\xfe\xf7\xcb\xe5vr\xcfn\x9f\xab\xb8\x99\x91\x96\x04\x81\x0f\x80n\x84\xbf\xb0\xfaF8\x87\xee\xb6\xd7/\x0cu\x87\xd9\xc9\xa9s\xf7\x9e\xdf\xb7^. \xf0\x01\xd0\xee\xe0\xf7q\xaf_vS\xd0y;\xb1k\x08\x84\x85\x16\x03\x81\x0f\x80\xee\x9ay\xbd\x5c@\xe0\x03\xa0\xa5b\x19\x97TQ\xeas\x93+@\xe0\x03\xa0\xdb\xaa\xc2\xdc\xbe&\x02\x81\x0f\x80\x8e\x8a\xeb\xe5\xee$v9\xe9c\xadB@\xe0\x03X\x97\xb0\x17\xd6\xcb-*v\xd3\xbb\x07\x02\x1f\x00\x1d\x16\xc2\x5cj\xdd\xdd\x03eX@\xe0\x03\xa0\xa3\xf2<\x1fd\xe9\xde\xbb\xb0\x1a\x87\x89\x1a \xf0\x01,$\x88\x1c\x96\xdbY\x1cndq\x94a\x01\x81\x0f`%aoX^<-\xb7\xadr\xbb\x88\xe5BXL;\xef&v\x09\xeb\xe5\x8e\x17p\xbfca\x1e\x04>\x80\xbbC\x88\xa1\xf7\xe9\x1bq\x16)\x8bk\xe7\xfb\x14\x8b\x08{\xd9M\xad\xbf\x10\xe6'\xc2<\x08|\xc0\x1a*\x03\xc0~\x0c\x03\xaf{\x11\xc3\x02\xf3i\xe7\xd1\x03\xed|\xebh\xdeeX\xee\x84\xbd[B\x1f\x08|\xc0\x1a\x86\x90\xaa\xf2 C\xad4\xb7vN\xf5\xee\xcd}\xbd\xdc{\xc2\xde\xad\x0d\xa1\x0f\x04>`\xbd\xecg\xe9\x09\x04#M\xb4\x94v>\x5c@\x19\x96T\xa0\x13\xfa@\xe0\x03\xd6A\xecuJ\x95\x079\xb2\xd2\xc3\x5c\xdayP^<K\xecrY\xb6s\xb1\x80\xbb\x1e\x96\xdb\xb9\xd0\x07\x02\x1f\xb0\xdeR\xbdNW\x99\x95\x1e\xe6e\x5c\xe38\xcc],\xed\x22\xf4\x81\xc0\x07\xac\xb9Q\xe2\xbaC\xb5\xe0f\x17\xcb\xb0\xa4\xd6\xcb\x0deX\x8e\x17u\xff\x0dC\x9f\x92- \xf0\x01=\x0b\x22!\xec=\xb4\xb4W\xe8\xdd\xb3\xd2\xc3|\x8c+\xae_x/\xaa\xd0\x07\x02\x1f\xb0\xbe\xf6\x12\xd7\xe9\xdd\x9bO\xa8\xaeZ/\xf7y\xd9\xceg\xcbx,\xf1x\x8eb\x98\x7fH(\xd9r\xec\xc8\x81\xc0\x07\xf4#\x88\x0c\xb2\x87W{\xd0\xbb7\x9f6\xae*w3\xf72,5B_\x08\x97\xc3\x8a\xd0\xb7\xa3\xf6\x22\x08|@?\xa4z\xf7\x8e\xf5\xee\xcdE+\xd7\xcb\x8d\xa1\xafj\x18\xf9I\xec\x9d\x04\x04>\xa0\xa7\x81O\xef\xde\x8c\xe2\x8c\xd7'\x89]B\x19\x96\x95\xb5s\x5c\xab\xf7\xa0b\xb7\xf7\xca\xe7\xb1\xe7h\x82\xc0\x07t3\x8c\x84\xa1\xc6\x87f\x8d\x9e/\xeb\x9c\xb2\x9e\xab\x0as\xa3U?\xc0X\xf7\xef\xa4b\xb7q\x1c\xfe\x07\x04>\xa0c\x86\xa9\x0fx\xcd3s\xa0\xde\xcb\xd2eXNZT\xcc:\x04\xcf\xaa\x99\xbb\xc7f\xee\x82\xc0\x07tO\xf2\xfc=\xcd3S\xd8\xabZ/7h\xcd\xb9q\x0df\xee\x1a\xe6\x07\x81\x0f\xe8\x98\xe1\x03??_\xc0Z\xae\xeb\xa6\xaa\x0c\xcbA\xdb\xda8\x0e\xe1\x17\x15\xbb=\x89u\x1b\x01\x81\x0fh\xbb\xd8\x03\xf5P \x99h\xa1\x99\xdav\x90\xa5{\xefZ[\xee&N \xa9:\x9f\xef\xd0\xf2k \xf0\x01\xdd\x90\xfa\xc0\x16\xf8fSd\xe92,\xfb-/w3\xca\xd2C\xbb\xe1\xb9\x8d\x1df\x10\xf8\x80\xf6\x1b\x0a|\xf3\x17\xd7\xcbM\x95a9\x8d\xa5PZ\xeb\xce\xf9|)\xce\xf1\x04\x81\x0f\xe8\x80\x87z\xf8\xce\x15[\x9eI\xd5Pm\xd1\x85'Q\xbe\x06B\xa0\xbboh7\xcc\xe4\xfdb,\xe5\x02\x08|@\xcb=T^\xe3B\xd3L'Nf\xd8J\xecr\xd4\xa22,u\x84\xf3\x10\xef\x0e\xed\x86\x89&\xdb\xea3\x82\xc0\x07t\xc7C\xf5\xe1|\x98O\x17\xf6\xaa\xca\xb0,}\xbd\xdcY\xc5Y\xc4\xe19\xe9\xd5\x03\x81\x0f\xe8h8\xc9\x04\xbe\xb9\x0a\xbda\xa9\x89\x1a\x87],u\x13B\x9e^=X\xac74\x01\xb0@o\x97\xdb n\xc3x\x19\xca\xb48\x7f\xafy\x80\x0em\xf7,\xb1\xcb\xa5\xde1@\xe0\x03\x96*N\xca\x98h\x89\xb9\x19W\x5c\xbf\xaf\x89\x80\x87\x18\xd2\x05h\xb9X\x86%\xb5^\xeei\x9c\xf1\x0a \xf0\x01t\xd4\xb8\xe2z\xbd{\x80\xc0\x07\xd0Uy\x9eW\xad\x97{d\xb2\x03 \xf0\x01t7\xec\x85\x99\xceEb\x97P\x86E\xef\x1e \xf0\x01tX\x08{\xa92,\x85\x15K\x00\x81\x0f\xa0\xa3\xf2<\x0f\xcb\xd2=M\xec\x12\xca\xb0\x1cj)@\xe0\x03\xe8\xae\xaa07\xd2D\x80\xc0\x07\xd0Qy\x9e\xefe\xd5eX&Z\x0a\x10\xf8\x00\xbaK\xef\x1e \xf0\x01\xf4U\x9e\xe7E\x96.\xc3r\xd0\xc5\xf5r\x01\x81\x0f\x80\xec{\xeb\xe5\xa6\xca\xac\x842,&j\x00\x02\x1f@\x87\x15Y\xba\x0c\xcb\xbe2,\x80\xc0\x07\xd0Qq\xbd\xdc'\x89]\xce\xcb\xb07\xd6R\x80\xc0\x07\xd0]E\xc5\xf5V\xd4\x00\x04>\x80\xae\xca\xf3|\x94\xa5\xcb\xb0\x1c)\xc3\x02\x08|\x00\xdd\x0d{a\xbd\xdc\xd4D\x8c0Q\xa3\xd0R\x80\xc0\x07\xd0]a\xa865Q\xe3P\x19\x16@\xe0\x03\xe8\xa8X\x86\xe5Yb\x97\xcbL\x19\x96\xaa6\xdc\x8e\xbd\xa4@\xc2\x1b\x9a\x00`e\xaa\xc2\x9c2,\xdf\x1f\xee\x86\xe5\xc5v\xdcBX\xbe=\xef\xf1\xedr\x9bh!\x10\xf8\x00\xda\x18^v\x13\xbb\x84\xf5r\x8f\xb5\xd4\xf7\xf9\xf0\x81\x9fo\x0b|\x90fH\x17`5\xc6\x15\xd7+\xc3rO\x08~\xe0\xe7\x86tA\xe0\x03h\x97<\xcfC\x98K\xad\x97\x1b\xca\xb0\x9ci\xa9O\xb9x\xe0\xe7CM\x03\x02\x1f@\x9b\xc2^\xe8\x8d*\x12\xbb\x842,z\xf7\x9a\x05>\xa0\x82s\xf8\x80e\x84\x9cavs\x92}\xd8n\xff\x1dz\xb8\xbe\xbc\x86\xe7\xa9\x85\xb0\x97*\xc3R\x98\xa8\xd18\xf0\xedh\x1a\x10\xf8\x80\xd5;~ \xe4\x0c\xe3u\xeb\x12|\xc3\xe4\x82\xa7\x89].\xcb\xb0\xa7\x0cK\xf3\xc0\x07T0\xa4\x0b,+\xf0\xddgo\xcd\xda\xa1*\xcc\x8d\xbcT\x00\x81\x0f\xe8[\xe0\xdb\x8c\xbd^\xbdW>\xcf\x10nSC\x8f\xa7\xd6\xcb\xadt\x91h\xdf\xa1\xe6\x01\x81\x0fX\xa1x\x9e\xde\xd5\x03W\x8f\xd6\xa4\x19\xf4\xee\xcd\xfe:\xba\xd0\x0a \xf0\x01\xed6^\xd7\xa0\x93\xe7y\x91\xa5\xcb\xb0<\x17f\x00\x81\x0f\xe8\x83\x87z\xb86\xca@\xd4\xdb\xd0\x17\xcb\xb0\xa4\xca\xac\x84\x9e\xcf\xc2\xcb\x03\x10\xf8\x80\xce\x8b=X\x0f\xad\x94\xd0\xe7\xc0\x13\x82n\xaa\x0c\x8b\xf5r\x01\x81\x0f\xe8]\xf8\xb9\xcff\x1f{\xf9\xe2D\x82'\x89]\xce\xcb\xb07\xf6\xb2h\xf4\xc5!\x7f`\x9bh\x1d\x10\xf8\x80v|X\x87\xc9\x1b\x97\x0f\x5c]\xf4\xf0)W='+j\x00\x02\x1f\xd0K\x0f\x85\xa0^\xf5\xf2\xc5\xe7\x92*\xc3r\xa2W\x0a\x10\xf8\x80^\x8aC\x98\x0f\xf6\xf2\xc5I\x0e]\x0f{\xd6\xcb\x05\x04>`\xed=\x14\x866{\x12\x84\xf6\xb3t\x19\x96CeX\x00\x81\x0f\xe8\xb5\x8a^\xbegy\x9e\x0f\xba\xfa\xdc\xe2c\x7f\x96\xd8%<o\xeb\xe5\x02\x02\x1f\xb0\x16R=y\xe3\x0e?\xaf\xaa0W(\xc3\x02\x08|\xc0Z\x883v\x1f\xaa\xcb\xb7\x93\xe7y\xe7\x86vc\x19\x96\xdd\xc4.\xa7\xca\xb0\x00\x02\x1f\xb0nF\x89\xeb\x8a\x0e\x0e\xedV\xf5\xee\x99\xa8\x01\x08|\xc0z\x89\x13\x17\x0e\x1e\xb8:\xacN1\xee\xcas\x89=\x92[\x89]\x8e\xca\xe7{\xe6\xa8\x03\x02\x1f\xb0\x8e\xa1\xaf(/\xce\x1f\xb8\xba\x13C\xbb\xca\xb0\x00\x02\x1f@\xb5\xbd\x18\x8a\xee\xf3^\x19\xa8\xb6[\xfe\xf8C\xd8K\xad\x97{h\xa2\x06 \xf0\x01k-\x0e\xed\x8e\x12\xbb\x8c\xdb\xfa\xd8\xe3y\x86O\x13\xbb\x5c\xc6^L\x00\x81\x0fX\xfb\xd0\x17f\xed>\x7f\xe0\xea6\x9f\xfbV\x15FG\x8e. \xf0\x01|\x12\xfa\xc2ynGw~\x14\x86y\xdf)\x7f\xde\xca\xd0\x94\xe7y\x18\x8aN\xad\x97{j\xbd\x5c\xa0\x0d\xde\xd0\x04@\xcbB\xdf(N\x82\x18\x94\xdb\xa8\xe53[\xab\xca\xb0\x8c\x1cQ@\xe0\x03\xb8?\xf4\xed\xb5\xfd1\x96\xa1\xb4\xc8\xd2\xeb\xe5>\xb7^.\xd0\x16\x86t\x01\x9a\x87\xbd\xd0\x03\x99*\xb3\x12\x86\xa2\x0b-\x05\x08|\x00\xdd\x15\x86rSeX\xf6\x95a\x01\x04>\x80\x8e\x8a\xeb\xe5>I\xecrn\xbd\x5c@\xe0\x03\xe8\xb6\xa2\xe2z+j\x00\x02\x1f@W\xe5y>\xca\xd2eXN\x94a\x01\x04>\x80\xee\x86\xbd\xaa\xf5r\x03\xbd{\x80\xc0\x07\xd0a!\xcc\xa5\xca\xb0\x1c(\xc3\x02\x08|\x00\x1d\x15\xd7\xcbM\xf5\xde]f\xd5E\x98\x01\x04>\x80\x16\xab*\xc3R(\xc3\x02\x08|\x00\x1d\x15\xcb\xb0\xec&v9U\x86\x05\x10\xf8\x00\xba\xadj\xa8\xb6\xd0D\x80\xc0\x07\xd0Q\xb1\x0c\xcbVb\x97#eX\x00\x81\x0f\xa0}!n\x5cng\xe5\xb6]\xb1_(\xc3\x92\xea\xdd\x0b\xeb\xe5*\xc3\x02\x08|\x00-\x0b{\xa3\xecfY\xb4\xd0k7)\xff\x9f\x0alE\x96\x9e\xa8qh\xa2\x06 \xf0\x01\xb4+\xec\x85\x1e\xbd\x17w~\x14\xc2\xdc{\xe5\xcf'\xb1\xec\xca\xdd}\xc3\xff\x9f&n\xee\xb2\x0c{\x85V\x05\xba\xe2\x0dM\x00\xacA\xd8\x0b\xc3\xb3\x93\x07\xae\x0eK\xa5\xbd,\xf79\xc8>\xe9\xb5\x1bW\xdc\xe4\xfe\x94\x8fc/\xde\xf6E\xb9\xbd\xde;xV\xe7g\xce\x19\x04\x04>\x80\xfb\x85\x90\xb4Q\xb1\xcf\xb3\x10\xe4\xcaP\xf6O\xb2\xf4z\xb9\xa1\x0c\xcb\xf14\x0f\x22\xfc^,\xf3r\xdf\xe3\xd9\xa9\x19\x1a\xef}L\xaf\xfd\xffU\x0c\x8bU?\xbb\xb0:\x08\x08|\x00}\x11\x02\xda\xa0F\xe8\x0b\xd7\xff\x83\x8a}F\xb3<\x902`\xddN\x18\x09\x8fikN\xcf\xef\xbe\xb0\xb8;e\x80\x0c\xab\x86\xbc\x1e\x02k\xf5>\x86\x9f9\xaf\x11\x04>\x80\x95\x08\xe7\xdb\x85\xd9\xb9\xd9\xcd\xac\xdb\xdd\x19n\xea\xf9<z\xc4\xc2m\xc4\x9e\xbe\xe3\xacf\xcf\xde\x12mf\x9f^3\xb8\xf6c\xbc'@\x9e\xde\xb3\xdb$\x86\xc3c\xafN\x10\xf8\x00\xe6\x19\xfaBP\xdb\x8bAk|O\xa8\xa9\xf2\xff\xca\xed?\xcf\xf1\xf1\x84\x9e\xb0a\x0c\xa2Oz\xdc\xf4\xf7\x85\xc5\xaa\x927\xc0\x9c\x99\xa5\x0b\xac[\xf0\x9b\x94\xdb\xa0\xfcg\x98\xa4q\xd5\xe0W\x7f\xb0\xdc\xfeq\xac\xe1\xf7h\x8e\x8fgT^\xbc\xbbF\x87 \xf4\xf8\x0d\x0d\xfd\x82\xc0\x07\xb0\x8c\xe0Wd7\xe7\xf5\x1d5\xfc\xd5\xdb\x1a~\xdbs|,\xa1\xb7\xeb\x9d\x86\x01\xb4\x8b\xc2\xca$\xc2\x1e\x08|\x00K\x0d}\xafb\x0f\xdb\x7f\x98\x22\xf4}\xa3\x0c}\xc5\xbcz\xfb\xca\xc71./\x86=\x0e}\xcfc[\x03\x02\x1f\xc0r\xc5\xd57\xfe\xca\x94\xbf\x1eJ\xb9\x84\x09\x18\xc5\x9cB_\x98\xf9\x1az\x0e\xcf{\xd6\xcc\xef\x94\xcf\xcd2t \xf0\x01\xac$\xec\x85\xde\xb9Y\xc3Z(\xe52\xb70\x13'\x97\x0c\xb3\xfbg\xb7vM\xe8\xad\xfcr\xec\xbd\x04\x04>\x80\x95\x08A-5[\xf7$\xbb\xa9KWe\xae\xe5E\xe2Ps\x08}G\x1dn\xdb\x10\xf6\x86J\xaf\x80\xc0\x07\xb02q\xbd\xdcT\xcf\x5c\x08z\xa38\xa3\xf7\x9d\x8a\xe0\xb7\x90\x12#\x1d\x9e\xc1{\x1e\xc3\xde\x99W\x1a\x08|\x00\xab\x14BZj\xe5\x8d\xe2v6i\x18\x92\x8c\xc1\xef\xed\xec\xd3\xbdn\xe7\x8b\x0c6\x1d\x9c\xc1+\xec\x81\xc0\x07\xb0z\xb1\xf8rj\xc5\x8d\xd3\xfb\xce;\x8b5\xfcF\xe5?\x1fg\x9f\xd4\xf1[x\x01\xe1\x0e\xcd\xe0=\xc9\xd4\xd8\x03\x81\x0f\xa0%\xaaBZQ\x11\xc0.B\x1d\xbfr{\xb4\xac\x09\x09\x1d\x98\xc1\x1bj\xec\xed\x09{ \xf0\x01\xac\x5c,\xc3\xb2U\x11\x5c&m|\xec-\x9e\xc1\xfb\xae\x1a{\xd0n\xd6\xd2\x05\xd6)\xecU\xad\xe1\x1a\x86L\xf7[\xf8\xb8\xc3c\xbe\xed9\x0b=}E\xb9}\xb5\xdc\xfev\x0b\x1e\xde;\xca\xae\x80\xc0\x07\xd0&!\xcc\xa5&j\x1c\xb6tH2\x0c\xe5\xee\xb4\xec1\x85p\xbc\xd7\xd6\xdeP@\xe0\x03\xd6P,\xc3\xf2,\xb1\xcbe\x5c_\xb7\x8d\xb6[\xf6xnk\xec\x99\x89\x0b\x1d\xe1\x1c>`]\x8c+\xaeo\xf3\xd2_\x1b-z,a\xd2\xc8\xb6\xb0\x07\xdd\xa2\x87\x0f\xe8\xbdX\x86%5$z\xda\xd6\x15!\xe2coS\xd8Sv\x05:H\x0f\x1f\xb0\x0e\xc6\x15\xd7\xb7\xb9w\xefQK\x1e\xc7\x91\xb0\x07\xdd\xa5\x87\x0f\xe8\xb5<\xcf\xab\xd6\xcb}\xde\xf2\xe1\xc9\x10\xb0Nb\xf0\x0b\xdb\xd6*\xc2\x9e\xb2+ \xf0\x01\xb45\xec\x85\x80T$v\xb9\xaa\xb8~\xe5\xe2,\xd8I\xc5s\xfc\xee\x02\x1f\x82\xb2+\xd0\x03\x86t\x81>\xab\xbd^n\x87-r\x06\xaf\xb0\x07=\xa1\x87\x0f\xe8\xa5<\xcfC\x10z\x92\xd8%\x94a9\xec\xc1S]D\xe0Sv\x05zF\x0f\x1f\xd0WUan\xd4\x93\xe79\xef\xc0'\xec\x81\xc0\x07\xd0~y\x9e\xefe\xe92,'=Z!b\x9e\x81/\x94]\x19\x08{ \xf0\x01\xb4=\xecU\xad\x97\x1b\xec\xf7\xe8)\xcfk\xd6\xae\x1a{ \xf0\x01tFU\x19\x96\x832\xd4\x5c\xf4$\xdc\x0e\xe7tS\xa1\xec\xca\xb6\xb0\x07\x02\x1f@\x17\x02\xd0 K\xf7\xde\x85\xf3\xd3\x0e{\xf4\x94\xe71\x9c\xfb\x5c\x8d=\xe8?\xb3t\x81>)\xb2t\x19\x96\xfd\x9e\xf5b\x0df\xfc}eW`M\xe8\xe1\x03z!\x0eo\xa6\xca\xb0\x9c\xf60\xdcL\xdb\xc3w%\xec\xc1z\xd1\xc3\x07\xf4E\xd5Pm\xd1\xc3\xe7\xbc3e\xd8Sv\x05\xd6\x8c\x1e>\xa0\xf3\xf2<\x1fe\xe9\xd9\xaaG=*\xc3r\xfb\x9c\xa7\xe9\xdd\xbb\x14\xf6`=\xe9\xe1\x03\xba\x1e|\xaa\xca\xb0\xb4~\xbd\xdc)5\x0d|\xca\xae\xc0\x1a\xd3\xc3\x07t]\x98\x95\x9b\x9a\xa8q\xd8\x972,\xaf\x194\xd8\xf7D\xd8\x83\xf5\xa6\x87\x0f\xe8\xacX\x86\xe5Yb\x97\xb0^n\xd1\xd3\xa7?\xac\xb9\xdf\x91\xb2+\x80\x1e>\xa0\xcb\xc6\x15\xd7\xef\xf7\xf8\xb9\xd7\x19\xd2=\x10\xf6\x80@\x0f\x1f\xd0I\xb1\x0cKj\x96j(\xc3r\xdc\xd3\xe7>\xc8\xd2\xc3\xd8\x81\xb2+\x80\xc0\x07t^U\x98Y\xd7\xde\xbd0Ie\xd4\xd7\xb0\x0b\x08|\xc0\x9a\xc8\xf3\xbcj\xbd\xdc\xe7=/=\xb2\x9d\x08{\xca\xae\x00\x9f\xe2\x1c>\xa0ka/\x94a)\x12\xbb\xf4\xb5\x0c\xcb]\xc3{~v.\xec\x01\x0f\xd1\xc3\x07tM\x08s\xa9\xf3\xd7\x8a5(?2x \xec)\xbb\x02\xdcK\x0f\x1f\xd0\x19qu\x89\xa7\x89]B\x19\x96\xc3\x9e\xb7A\xe8\xe1\xbc;\x9c}$\xec\x01U\xf4\xf0\x01]R\x15\xe6Fk\xd0\x06w\xcf\xdfSc\x0f\xa8E\x0f\x1f\xd0\x09y\x9e\xefe\xd5eX&k\xd0\x14\xc3x\xf9\xae\xb0\x07\xd4\xa5\x87\x0f\xe8\x0a\xbd{7B\x0f\x9f\x1a{\x80\xc0\x07\xf4K\x9e\xe7E\x96.\xc3r\xd0\xd3\xf5r\xef\xb3\xbfF\xcf\x15\x98\x13C\xba@\xdb\xc3\xde K\x17Q\x0eeX\x0e\xd7\xa5=\x84=@\xe0\x03\xfa\xa8\xc8\xd2eX\xf6\xcdP\x05\x10\xf8\x80\x8e\x8a\xeb\xe5>I\xecr\xee\x5c6\x00\x81\x0f\xe8\xb6\xa2\xe2\xfa}M\x04 \xf0\x01\x1d\x95\xe7\xf9(K\x97a9Z\x932,\x00\x02\x1f\xd0\xcb\xb0\x17V\x93HM\xc4X\x87\xf5r\x01\x04>\xa0\xd7\xc2Pmj\xa2\xc6\xa1\xd9\xaa\x00\x02\x1f\xd0Q\xb1\x0c\xcb\xb3\xc4.\x97\xd9\x1a\x95a\x01\x10\xf8\x80>\xaa\x0as\xca\xb0\x00\x08|@W\xc52,\xbb\x89]\xc2z\xb9\xc7Z\x0a@\xe0\x03\xbak\x5cq\xbd2,\x00\x02\x1f\xd0Uy\x9e\x870\x97Z/7\x94a9\xd3R\x00\x02\x1f\xd0\xcd\xb0\x17\xca\xb0\x14\x89]B\x19\x16\xbd{\x00\x02\x1f\xd0a!\xec\xa5\xca\xb0\x14&j\x00\x08|@G\xe5y\xbe]^<M\xecrY\x86=eX\x00\x04>\xa0\xc3\xaa\xc2\xdcH\x13\x01\x08|@G\xe5y\xbe\x97\xa5\xd7\xcb=\xb5^.\x80\xc0\x07t\x9b\xde=\x00\x81\x0f\xe8\xab<\xcf\x8b,]\x86\xe5\xc0z\xb9\x00\x02\x1f\xd0\xdd\xb07\xc8\xd2eVB\x19\x16\x135\x00\x04>\xa0\xc3\x8a,]\x86\xc5z\xb9\x00\x02\x1f\xd0Uq\xbd\xdc'\x89]\xce\xcb\xb07\xd6R\x00\x02\x1f\xd0]E\xc5\xf5V\xd4\x00\x10\xf8\x80\xae\xca\xf3|\x94\xa5\xcb\xb0\x9c(\xc3\x02 \xf0\x01\xdd\x0d{\xd6\xcb\x05\x10\xf8\x80\x9e\x0ba.U\x86\xe5P\x19\x16\x00\x81\x0f\xe8\xa8X\x86\xe5Yb\x97\xcbL\x19\x16\x00\x81\x0f\xe8\xb4\xaa0W(\xc3\x02 \xf0\x01\x1d\x15\xcb\xb0\xec&v9U\x86\x05@\xe0\x03\xba\xad\xaawom&j\x94\xe1wRn\xd7\x1d\xdd&^\xca\xf4\xe0opTng\xaf\xbd\xb6\x8f\xe3\x17S\x81\x0f`\xca7\xd7\x10\xe6\xb6\x12\xbb\x1c]__\x9fi)`\x09\xefG\xe3\xf2\xe2\xc5=\xefIa\x04\xe2\xc3\xb8\xbe\xb7\xc0\x07\xd0\xf0\xcdU\x19\x16\xa0-\xefG\xe1\xbd\xe8I\xc5n\xcf\xfa\xdc\xd3'\xf0\x01\x8b\x12\xde`S\xeb\xe5\x1e\x9a\xa8\x01,I\xdd/\x97\x85\xc0\x07P\xff\xdb\xf4\xa0\xbcx\x9a\xd8\xe5\xb2\x0c{\x85\x96\x02\x96\xf0~4\xac\xf8\xf2y\xd7\x8e\xc0\x07P\xdf\xb8\xe2\xfa\x91&\x02\x10\xf8\x80\xee~\x9b\xde\xab\xf8\x96|j\xbd\xdcN*4\x01t\xd7\x1b\x9a\x00\x98\xb3\xaa2,\xa35n\x9b6\xcdH\x0e\x93j\xb6j\xee\xfb\x5cH\xa7\xc3.\x1a\xec{%\xf0\x01T\x883\xe16+\x82\xc3\xc5\xba\xb6O\xf9\xdc\xf7[t\xac\x8ek\x06\xbe\xf36=n\x98\xe2\xef\xee\xa2|\xbd\x9ff\xf5\xce\xcf\xeb\xed\x12\x8f\x86t\x81y\x05\x88\xd0c\xb4_\xf1\xcd\xb9\xd0R\xad8V\xe18\xed\xd6\xdc}\xa4\xc5\xe8\x81QV\xdd{w.\xf0\x01\xd4\xfbf\x9c\x9a\x09\xb7\xaf\x0cK+\xc2\xde\xa0A\xf0>P\x18\x9b>\x88#\x0b\xc3r;}`\x97\xa3p}\x9f\xdf\xa3\x0c\xe9\x02\xf3\x08\x11\xe1\x8d4U\xd4\xf4\xdcz\xb9\xad1\xce\xea\x95\xa88W:\x87\x9e\x85\xbe\xf0\xe5eX\xbe_m\x97\x97a\x0b_~&\xe5v\xb6\x0e_F\x05>`\x1e\xaa\x82\x81s\xc0\xda\x11\xcc\xc3q\xa8[gl\xa4\xc5\xe8q\xf0[\xbb\x9ekC\xba\xc0\xac!bT\x11\x22N\xcc\xf0l\xc5q\xda\xce\xea\x0f\xe5\xbek(\x17\x04>\x80\xdb\x10a\xbd\xdc\xee\x18g\xf5\x86rC\x9d\xc4C\xcd\x05\x02\x1f\xc0\xad\x10\xe6ReX\x0e\xd7\xb9\x0cK\x8b\x82y\x08\xe5uJ\xb0\x84\x80>\xd2b \xf0\x01\xdc\x86\x88Ay\xf1,\xb1\xcbe\xd6\xe3\x12\x07\x1d:N\xdb\x15\xc7\xe9\xaeB@\x07\x81\x0f\xe0\xae\xaa0W(\xc3\xd2\x0a\xe3\x9a\xfb\x19\xca\x85\x1e3K\x17h,\x96a\xd9\xad\x08\x0fc-\xb5\xf2\xe3\x14\x02\x5c\xdd\xa1\xdc=-\x06+\xf9;\x1dd7%b\xb2ENp\x13\xf8\x80iT\xf6\xeei\xa2V\x84\xf2\xa75w\x1f\xe9\x8d]\xd9q\x0a\x13\x9f\xc2\xb1\xba\xad\x0d\x17\xfe\xbfS\x11\xce\xcf\xeel\x13\xc3\xf0\x9d:\xd6{\xf1x\x0f\xee;\xce\xe5>\x0f\x1d\xe7\xe3Y\xffF\x05>\xa0\xe9\x9b\xd6(K\xf7\x1a\x1d)\xc3\xd2\x8a\x0f\x96q\xcd\xddC\xd9\x9cc\xad\xb6\xd4\xe3\x13>\xecG\xf1\xc3\x7f\xab\xe1\xafo\xc4\xa0\xb0s\xe7\xf6\xc2\xea\x11\xe3y\xf4\xaa\xc7/\x0a\x1f\xd6\xdc\xfd\xcb\x8bz\xed\xc4\xd7\xf0EVofy\xe5\xe3\xa8\xfb\xbc\xca\xdb\xc9\x17\xf4\xe5+\x1c\xef'3\x1c\xe7\x17\xe5\xed\xdc.\xfd6U\xf8s\x0e\x1f\xd0\xf4M\xf8\xb0\xa2\xf7A\x19\x96\xd5+\xb2\xf4\xec\xe9\xbb\xc7k\xa4\xb9\x96\xf7e\xa9\xdcBo\xcd\xcb\xecf\x22\xcd\xd6\x9cnz'\x06\x82\x8b\x18.\xa6\x16\xbf\xac=\xaf\xb9\xfb8\xbe',\xc2\xb8f\xd8{\xde\xd6/,!\xd8\x97\xdb$\x06\xcd's\xb8\xc9\xf0zy\x11\x82p\x98y\xdf\xb4\xed\x05>\xa0i\x90H\xbd\x09\x1f\x1a\x1a\x5c\xf9\x87L\xe852\x94\xdb\xbe\xa0w\x11?\xac\xb7\x16xW!\xe4\x7f\x18\xcf\xdd\x9c%\xf4\x85/m\xe75v\x0d\xef\x05\xc7\x0bz\x0d\xef\xd6\xd8\xf5<k\xe9\xe9#q$\xe4,\xab\xbf\xb2M\x13\x1b\xf1\x0b\xc3\xb6\xc0\x07,\xe4\xdbjE\x90\xb8\xb4\xf6\xea\xca\x8fQ\x93\xa1\xdc\xe7\x86r\x97rL\x0ec\xd0\xdb\x5c\xe2\xdd>\x0d=K3\xf6\xbe\x8dj\xee\xb7\x13\x97\xec[\xc5k\xb8\x95_X\xee\x1c\xf3\x8d\x05\xde\xcdU\xd3Sg\x04>\xa0\xae\xaa7aC\xb9\xed8Fu>dB\x8dD\xe1|9V\xb5D\xddN\x83\xe0\xf4)qi\xbdwk\xee^\xc4/\x84\xcb|\x0d\xb7r\xf9\xbfX\xe4\xfc\xe9\x12\xee\xaa\xf1\x975\x81\x0f\xa8\xf3&6\xcc\xd2C\x13\xa7z\x8bV~\x8c\xea\x0e\x83\x05\x86r\x97g\x96\xbf\x8b\xf3\x19\xef{7\x06\x90iC_\xe8\xa9:\xad\xb1\xeb\xc6,\xe1r\x8a\xd7p+kF\xc6\xc7\xff\xac\xad\xaf+\x81\x0f\xa8\xfb\xad;\x19 4\xd1J?h\x9a\x0e\xe5N\xb4\xdar\xc4`}\x94\xd8%L\x9c9\xc9nz\xd3\xde.\xb7\xcf\x86\x99\xa2q\xdb\xbe\xfdw\xf9\xf3\xc7\xe5\xf6\xe5x[W\x0d\x1e\xc2\xb3\xb8\xda\xca\xb4F5\xefo\xa6\xa1\xdd\x06\xaf\xe1VN4\xaa1\xa1\xedS\xa1\xf5\xf6\x98\xdf9\xde\xb7\xc7\xf9\xedx\xddC\xc7\xfaj\x9a/\xd8\xca\xb2\x00Uod\xa1\x87`\xb3\x22@\x5ch\xa9\x95\x0ao\xfe\x86r\xdb}|\x9e\xbc\x16Z\xc2\xcf\x0e\xeb\x0eK\xc6\xbf\xb1\xb0\x1d\xc7p\x11\xc2U\xdd\xde\xa4\x10D\x86S\x06\xd6\x8b8\x01\xe1\x83\x1a\xbb\x87\xa1\xdd\xe3)\xdf\x0f\xc65_\xc3\xa3\x96\xbe\xdfT\xad+~+\xf4\xda\xee?\xf4\xa5\xeb\xceq\x9e\xdcy\x0f\x0e=\x87{w^C\xe3i\x1e\xa0\x1e>\xa0\xea[\xeb~\xc5\xb7m\x01b\xb5\xc7(\x1c\x9f\xba3\x01\xf7\x0c\xe5._\xec\x8d\xb9\x8a\xdbA\xb9\x0d\xca\x9f\x8d\xa6=\x07-\x1c\xc38A\xea\xed\xac~\xef\xdb\xf6\x8c\x8f\xff\xa4\xc6\xaeS\x0d\xed6\x18\xca=j\xf1\xa9#\xa3\x9aao\xd8\xb4\x87=<\xe7\xf0z\xc9nz\xffN\x04>`\x11\x0e+\xbeu[/w\xb5ao\xd0 p\x1f\xb4\xf1$\xf752\x8aAon\x7f318\x8cj\xee\xbe?\x87\xc7?\xf7\xa1\xdd\x06C\xb9\x97YK'\x86\xc5s\x9c\xabz\xf7\xaeb\xd8{5\xc3\xf1\xbe(\xb7\xbdi\xff\x8e\x05>\xe0\xa17\xb1\xd0#\x90*\x16z\xde\xc6\x13\xa7\xd7\xcc8\xab7\x0cv\xaed\xcej\xc5^\x9aW\x8b\xb8\xdd,}\x8e\xe0\xad\xbd\x19\xef\xe7U\x83\xdbh2k\xb7\xeek\xb8\xcd\xbd\xd3\xc3:\xcfs\xd5\x8f_\xe0\x03\x1eR\x15\xe6\x94aYm o2\x94;\xd2b\xbdV'\xcco,q\x15\x8eZC\xbb\xf1\xf1\xd4\x19\xcam{\xeft\x9dv]\xf9P\xb4\xc0\x07\xdc\xf7F<\xaa\x08\x13'fz\xae\xf4\xf8\x84\xde\xd7\xf7j\xeen(\xb7\xe7\xe2\x89\xfeu\xce\xb1\x1b\xce\xe1\xeeB\xb8\xacS.&9\xb4\xdb`(\xf7\xb4'\xbd\xd3+\xff\x1b\x14\xf8\x80\xfb\xde\x88\xab\xde`\xf5\xee\xad\xd6\xb8\xe6~\xa7\x86r\xd7F\x9d/`\xdb\xb3\xdeI\x1c\x96\x1c\xd5\x0d\x87\x89\xd5>\xea\xccj\xed\xd3Z\xcf\xdb\xab~\x00\x02\x1f\xd0\xf4\x8d\xf8@\x19\x96\x95\x06\xf2\x10\xe0\xea\xac\xc7\xda\xa7\x0fK\xaa\xd5\xe9Az4\x8f;\x8a=\xc6\x075v\xbdwh7\xf6P\xd7))3\xea\xd1{\xcd@\xe0\x03\xda\x14&\x06Y\xba\xf7.\xcc\x943Qcu\xc7\xa7\xee\x07eP\x08\xe6k\xa5\xce\x84\x80\x9dy\xddY\xec9\xae\xb3\x0a\xc7\xee=\xe7\x0e\xd6y\x0f9\xea\xd9\xea=\x85\xc0\x07\xb4\x892,\xed\x0d{MV\xd385\x83z\xbd\xac\xe8<\xcdQV\xafT\xcb\xf8\xce\xebxT#x\xb6\xb6\x04\xcb\x03\xea\xb4\xfdf\xf9\xdc\xc7\xab|\x90\x02\x1fp\xfbF\x1c\xbe\x85\xefV\x84\x88\xb1\x96Zi\x0f\x81\xa1\x5c\xda\x142/j\x06\xb3\x10v\x8a\x06\xcb\x8fu\xad@\xf8\xa4\xe6~OB\xe8K\x9c\xd7\xb8P\x96V\x03nU\xbd\x11\x17\x9ah\xa5a\xfci\xcd\xdd;q\xdeS\xf9\x9c&K\xb8\x9b\xb3\xb2-\xf6;z\xccC(\xd8\x8e[\xf8\xf70^5\xc8\xea-\xe1\xb5\xac\xd07\xae\xb9R\xc6~|\xecU5\xf7:7\xab<\x0c=\x97mpY\xf3\xb8\x84\xda\xa6\xc3\x10\x80\x97\xfd\x05Z\xe0\x03n\x87YR\xbdGG\xca\xb0\xac\xf4\x83\xbf\xee\x07\xc3I\x87\xce{\xdaqt?u\x9c\xf7b\xb0\x1b\xb6)\xd4\xd5\xf9\x92\x91\xdd\xac\xff\x9a\x0as\x1bY\xba\x90{\xd0\xe5Y\xe5\xe1q\xbf\xa8\xb9o8\xb6/\xe2\x04\xac\xb0\x1d/\xa3G\xd3\x90.\xf8\xa0\xa9\x1af\xb9\xca\x94aY\xf5\x07I\x9d\x0f\x7fC\xb9\x1d\xfd\xb2Un!\xa4\x7f7\x06\x86'\x1d\x0b{MW\xe1\xe8\xe5\xeb7\xf6\xd6\x9d6\xfc\xb5\xcdx\xcc/\xe2P\xefBK\xb7\x08|@Q\xf1\xcd\xfc\xd0D\x8d\x95\x85\x81\xf0!\xdad(\xd7q\xea\xc8\x97\xacxN\xdb\xab\xf8\x81\xbf\xdb\xf5\xe7\xd4`\x15\x8e\x87\xec\xf7`Vy\xf8{=\x9f\xe2\xf7n{?\xbfQ\xbe&\xce\xe2\x97\x80\xb9\x9f\xe7'\xf0\xc1z\x7f\xf0\x0c*\x02\xc5\xa5\xc2\xbd\xab\x0b\x05Y\xfd\xa1\xdc\xbe\x95\xb0\xe8\xf3q\x0d\xbd\xe5!\xd8<\xcb\xea\xad!\xdb\xb5/\x8f\xd3\x04\x9e\x93>L\x08\x8b_\xb8\x86S\xb6\xc1\xad\xad\xec\x93^\xbfb\x9e\xc1O\xe0\x83\xf5V\xf5&k(w\xb5\xc7\xa6N \xb8t\x9c\xba\x11\xe0\xe3D\x95\xf7z\x18\xf4\xee\x06\x9eQ\xc3_\xbb\xcczt*\xc2\x9d\xd0w:\xe3Mm\xc4/\x05\x17\xf1\x5c\xbf\x99\x99\xb4\x01\xeb\xfb\x01\x14\xde\x94R'\xce\x9f\xea5Z\xd9\xb1\xa93\xeb\xf1\x96\xa1\xdc\xf6\x1f\xcfpn\xd6d\xc6\xa0\x17z\x8d\xc2q>\xcb\xbe\xbf\xc8\xf2\xe4\xce\xbf\xc3\xb9\xb8[\xab|\xaea\x86m\xf9|\x8f\xb2\xea\x09\x1a\xb7\xce\xfa\xf6\xfa\xbd\x0d}1\xa8=\x9b\xf1\xe6>\x0e~\xf1=a4\xcb\x0cf\x81\x0f\xd6\xd7\xb8\xe2z\xbdF\xab\x09\x07M\x86r\x9fwu\xf6t\xf9\xb8sa/)\xf4\x10\x1d\xc7@4\xa9y_\xafZ\xf2|\x9f4\xf8\x95\xb0\x12\xc7^\x1f\xbf\x5c\x86\xd3ab\xb1\xe5\xc3l\xf6\xf34C\x90\x9f\x84S\x02\xa6\x1d\xfe\x16\xf8`=CE\xd5z\xb9\xcf\xbbV\x0b\xabG\x8e\xb3\xfaC\xb9\x85\xe6j}xo\x12\xf6\xaeb8\x18wq\x02C|\xbe\xd3\x04\xb7\x8fg\xa8\xf6q)\xc0\xf8\x9c\xf6\xe2\x88\xca\xa8a\x18~]x\x1d\x85r.\xd94\xa1\xcf9|\xb0\x9e\x1fBE\xc5\x87\x8e \xb1\xba ^\xb7>\x9d\xa1\xdc\xfe\x84\xf7\x8f\xbfd\x95\xdb \xf4\x0au8\xf8\x1cf\xd3\x95\x94\x09m4\xee\xf3\x0b!\xf4\xd2\x96[\x08|\x8f\xcb\xed ~a\x9b\xd6\x8b8\xc4+\xf0\x01\x95o\xca\xd6\xcbm_\xd8\x1b4\x08\xda\x07\x0aa\xb7\xfex\x8ej\x86\xf7\xf0\x05\xeb\xed\xb0\x22H\x97\xff\xeeb\x00\x99\xa5\xf7j'~\xe1\xe9\xb5\x10\xe6c\xa8\x0f\x7f\xef_,\xb7\xa3\xac\xdez\xc4\xaf\x1b\xc7\xf7\x0c\x81\x0f\xb8\xf7M\xb9\xea\xfc\x9aP\x86\xe5PK\xad\xc48\xab\xd7\x1bt\xaeTN'\xd4=F\xc3\xae\x87\xf7\x86\xe7\x9d&\xdb\xaci\x88\xe9x\xf8;\x8b\xbd~\xe19\x1f4\x0c~\x1bY\xc3\x91\x18\x81\x0f\xd6KU\x98\x1bi\xa2\x95|`\x867\xee\xdaC\xb9Z\xac\xf5\xc73\xf4v\xd5\x19\xda<\xe8\xc9\xb9\xb2u\x86\xaeOj\x86\x98\xb5\xab\x0c\x10zv\xe3\x97\xb8\x10\xfc\x8e\x1a\xfc\xea\x93&\x01Y\xe0\x83\xf5\xfa\x10J\x85\x8a\x13\xc3\x84+9.\xa1\xd7\xb5n\xe9\x86\x03\x93i:\xa1\xce\xf9UW}\xe8\xa9\xady\xdei\x189\xd8\xab\x19f\xb6\xe6Uw\xae\xa3\xc1/|\xa1{g\xce\xaf5\x81\x0f\xd6(TT\xad\x97\x1b(\xc3\xb2\x1a\xe3\x9a\xfb\x19\xca\xed\x8e\xe1\x1c\x8f{\xdb\xbf\xac\xd4yM\x8e\xee\xbc\xc7\xd4\x19\xb6|\xb6\xe8ue[\x1e\xfc\xc6\x0dB\x9f\xc0\x07|*\xcc\xa5\x86\x98\x0e\xfaX\x12\xa1\x03\x1f\x98\xe1\xc3\xb2N\xa1\xdc\xabl\xf6\xc5\xe9Y\xde\x97\xab:\xc3\xb9\x93\x05\xdc\xfd\xce\x92\x9fn\x08&\x95C\xb9\xb7#\x07qRJ\xd1\xe0\xb6\xd7V\x0c}u\x86\xc1k\x07c\x81\x0f\xfa\xff\x014\xc8\xd2\xbdw\xb7\xb5\xbfX\xeeqi2\x94[\x08\xe4\x9dQ\xf7\x03\xf8b\x01\xaf\xa7e\xbe~\xeb\xac\xeaq\xf5\xfa{O\x9c\x14Vg\xad\xd9\xadx\x1f\xeb\xacN8\xae]\xd0[\xe0\x83\xf5x\xd3H\xbd)\xec+\xc3\xb2\xf4\xb0\xd7\xa4@\xed\xa9\x99\xd3\xfd\xb3\x80s1\x07K|\xfd\x0e\xcb\x8b\xa73|Q\x19\xd5\xbc\xab\xa7\xf1\xbe\xbcF\xe6@\xe0\x83~\x07\x8b\xf0f\xf9\xa4\x22L\x8c\xb5\xd4JBx\x9da\xbf\xab\xcc\xac\xdc\xbe\xfem\xce;\xa0\xed-\xe9q\xd7-\xc1r\xfe\xd0\x17\x95\x18d\x9e\xd7\xbc\xcbq\xbc\xcfuu9\xaf\x1b\x12\xf8\xa0\xdf\xaaz\x86\x0aM\xb4\x92\x10\xfe\xb4\xe6\xee\xfb\x86r{k0\xc7\xd7\xd4 \x9b\xad\xe8q\x13\xe3\x9a_V\xaa\xbe\xa8\x145\xc3\xcc\xe6\x9a\xbfO\xcd-\xec\x0a|\xd0\xdf`\x11\xdepS\xe7\xd8\x1c)\xc3\xb2\xf4c\xd2\xa4@\xed\x89\xde\xd7^\x9bg\x8f\xdcxI\xaf\xdf\xf0\x98wk\xecZY>(\x9eF2\xaay\xd7k9\xb4\x1b\xdf/6\xe6u{\x02\x1f\xf4\xf7\x8d\x22\xd5\xbbg\xbd\xdc\xd5\xa8\xbb\xd6\xa8\xa1\xdc\xee\xaa{\xde\xd5\xde\x9c\xfe\xd6\xc3\xdf\xf1N\xc3\xf7\x86i\xeegP3X^f5'\x81\xc5/\x9cu\x87v\x8f\xd7ph\xb7\xcek\xe4T\xe0\x83\xf5\xb6_\xf1\xcd\xf0\xd0P\xe1\xd2Cx\x93\xb5FG&\xd2tS<n\xb5\x86*g];6\xf6\xe2?k\xf8k\xd3\xce\xe6\x1dg\xf5z\x9b\x9a\xbev\x8b\x9a\xed\xb5\x91\xb5\xb4TK\x08\xa2\xf3\xee\x81\x8c\xe1\xb6\xce\x97\xf2\x89\xc0\x07\xeb\x1b,\x06\x15\x1f\x02\x97\x0a\xf8.\xff\x03!k6\x94{\xac\xd5:\xad\xee\xf1+\xa6-\xa7\x12{\xf6^,\xe9\xf5[g5\x8d\xe0y\xd3\xd3D\x1a\x0e\xed\xee\xc6/Nm\x13\x1e\xd3\x87\xe5c\x9b\xcc#\xf8\xc5\xf7\x8b\xd0\x8euF\x03j\x87\xe07\xfc]B\xefT\xbd\x01XQc5\xc7\xa4\xee\xb98\x83\xf0\xc1\xd1\xd2\xe7\xb1oi\xb7Z\xc2\x90f\x9d\x899\xe15\x11B\xc2\xa8n\xc8\x8f\x81\xa2N\x0d\xbcy\x85\xbd\x10H\xdf\xab\xb1\xeb\xd4\xa7\x89\x84\x90X\xde\xcf\xf3\x9am\x16f\xed\x0eZ\xd6\x03~\x1bBwb\xf0;\x8d\x7f\xf3\xc7M\x1fg\xc3\xe3{\xdad\xa4F\xe0\x83\x1e\x89o\x16;\x15o\x10z\x8f\x96{L\xea\x9e\xe8~k\xab\xc5O\xe7\x91#Z+\xc0\x5c\xc4\x0f\xfd:\xbdb!\xf4}\x90\x0a\x091t\x85\xbf\xedQ\xc5\xeb\xe3*\x9b\xe3I\xfe\x0d{\xa6g=\x0d\xa1\x88\xc1i\xb3F{\x1dg\xf5\x96\xaf[\xc6\xdf\xf7\xa3{\xfe\xbew\xe2\xf6\x22\x1e\xd7I\xdc\xce\xeek\xa3\x06\xc7w\xa6/\xef\x02\x1f\xf4K\xd5\x9b\xb3\xde\xbd\xf6\x1d\x13\xfa)|x\xbfl\xb0\xff\xdd\x900Mx\xbb\x9d\xe8\xf3\xc1\x1c\x9fCQ3\x80\xcc|\x1aB\x08B\xf1\x9c\xc4\x0f\xeb\xb4U\x18fnIA\xf2\xbd\x9a\xc7\xf5Y\x0cw\xb7?\xbf\xcc\xea\x0d\xd9>\xe4\xa0io\xbbs\xf8\xa0'\xe2y6\xa97\x90\xe7\x86\xe3VbC\x13\xac\x9f8\xd4\xf6\xee\x92^7!\xec\x0dk\x86\xaeZ\xe7\x0c6\xa8\x179\xb7\x19\xe5\x0dg\xed\x16\x0b(^\xbd\x88\xc0\xf7\x90Y\xc2\xde\xd14\xe7a\x0b|\xd0\x8f\xb0W5\xa3K\x19\x16X~\xe8\x0b=PG\x0b\xbe\x9b\xdb\xb0W\xf7\xcb\x5c\xe5\xb0|\xc3\xa5\xff\xe6=\xa3\xbc\x88\xcf\xa9N \x1e\xb7\xe0}ww\xc9w\x1b\xc2\xdeT\x01[\xe0\x83~8\xac\xe8\x11(\x94\xf9\x80\x95\x84\xbe\xf0\xe1\xfc|A7\x1f\xce\x0f\x1b\xbc\x16\xf6\xaa\xea\xb2\xd59\x0fs\x9c\xd5\xeba\x9c\xfb9\xc1\x0dg\xed\xee\xc4\xd9\xca\xab2X\xf2\xfd\xbd;m\xd8\x13\xf8\xa0\x07\xe2\x09\xbf\xa9\xfan\x97-9\xd7\x05\xd65\xf4\x85\xd3-\xbe\x9c\xd5\xeb\xb9\xaa#\x9c\xff\xf5Ny\xbb\xc3)\xbe\xc8mW\xbc\x9f\x84@Q\xa7\xd7ja\xc5\xc1c\x88<\xa9\xb9\xfb\xb3iK\xdb\xcc\xe1q\x86\xa0\xfd8[|/n\xb8\xfd\xc7\xb3\xbe\x8f\x0b|\xd0}Uo\x02#M\x04+\x0f}!\xc4\x0c\xca\xed`\x86\xe0w\x1e\x83\xde \xb1\xec^\xd5\xd0\xee \x11\xf6\x06Y\xcdU2\xb2\x9bQ\x83\x8b\x056\xd9\xa8A;\x8dWx\x5c/b\xaf\xdbg\xb3\x9bs6O\xe7t\xd3\xe1\xb9?\x8fAo4\x8f\xb6\xce\xcb\x1b\xf1\x97\x08\xd1\xc6W\xde\x9ad\x0d\x96)\x8aN\xaf\xde\xffh\xb8\x8a\xc7\x1bK~\xa4f\xe5\x85!\x97\xa1#\x0b\xed\x12\xffv\xc3\xdf\xe6v\xe2=\xe72\x06\xb8\xf0\xbetlu\x9c\xce\x1c\xdbG\xf1\xb8\xde\x1e\xdfG5>WBP|\x15\x8f\xf5d\x11\x13\xec\x94e\x81n\xd3\xbb\x07\x1d\x14{\xfc\xd4\xc4\xec\xe7\xb1\xfd^pk\xd3\xe32\xa4\x0b\xdd\xfd\x16Yd\xe9\xa9\xfd\x07z\x04\x00\x10\xf8\xa0\xbbao\x90\xa5\x8b(\x87\xf3?L\xd4\x00@\xe0\x83\x0e+\xb2t\xd9\x84}eX\x00\x10\xf8\xa0\xa3b\x05\xfcT\x19\x96\xf3\xc4\x0c>\x00\x04>\xa0\x03\x8a\x8a\xeb\xad\x97\x0b\x80\xc0\x07]\x15\x8b\xa2\xa6\xa6\xf7\x1f\xc5\xf5(\x01@\xe0\x83\x0e\x86\xbdP\xcb)5\x11\xc3z\xb9\x00\x08|\xd0qa\xa865Q\xe3P\x19\x16\x00\x04>\xe8\xa8X\x86\xe5Yb\x97P\x91_\x19\x16\x00\x04>\xe8\xb0\xaa0\xa7\x0c\x0b\x00\x02\x1ftU,\xc3\xb2\x9b\xd8\xe54.\xd3\x04\x00\x02\x1ft\xd4\xb8\xe2zeX\x00\x10\xf8\xa0\xab\xf2<\x0fa.\xb5^n(\xc3r\xa6\xa5\x00\x10\xf8\xa0\x9ba/\x94a)\x12\xbb\x842,z\xf7\x00\x10\xf8\xa0\xc3B\xd8K\x95a)L\xd4\x00@\xe0\x83\x8e\xca\xf3|\xbb\xbcx\x9a\xd8\xe5\xb2\x0c{\xca\xb0\x00 \xf0A\x87U\x85\xb9\x91&\x02@\xe0\x83\x8e\xca\xf3|/K\xaf\x97{j\xbd\x5c\x00\x04>\xe86\xbd{\x00\x08|\xd0Wy\x9e\x17Y\xba\x0c\xcb\x81\xf5r\x01\x10\xf8\xa0\xbbao\x90\xa5\xcb\xac\x842,&j\x00 \xf0A\x87\x15Y\xba\x0c\x8b\xf5r\x01\x10\xf8\xa0\xab\xe2z\xb9O\x12\xbb\x9c\x97ao\xac\xa5\x00\x10\xf8\xa0\xbb\x8a\x8a\xeb\xad\xa8\x01\x80\xc0\x07]\x95\xe7\xf9(K\x97a9R\x86\x05\x00\x81\x0f\xba\x1b\xf6\xea\xac\x97[h)\x00\x04>\xe8\xae0T\x9b*\xc3r\xa8\x0c\x0b\x00\x02\x1ftT,\xc3\xf2,\xb1\xcbe\xa6\x0c\x0b\x00\x02\x1ftZU\x98+\x94a\x01@\xe0\x83\x8e\x8aeXv\x13\xbb\x9c*\xc3\x02\x80\xc0\x07\xddV\xd5\xbb\xa7\x0c\x0b\x00\x02\x1ftU\x9e\xe7!\xccm%v\x09eX\xce\xb4\x14\x00\x02\x1ft3\xec\xd5)\xc3\xa2w\x0f\x00\x81\x0f:,\x84\xbd\xd4z\xb9\x87&j\x00 \xf0AG\xc52,O\x13\xbb\x5c\x96a\xaf\xd0R\x00\x08|\xd0]\xe3\x8a\xebG\x9a\x08\x00\x81\x0f:*\xcf\xf3\xbd,\xbd^\xee\xa9\xf5r\x01\x10\xf8\xa0\xdb\xaa\xca\xb0\x8c4\x11\x00\x02\x1ftT\x9e\xe7E\x96^/\xf7\xb9\xf5r\x01\x10\xf8\xa0\xbba/\x94aI\x95Y\x09eX\x0a-\x05\x80\xc0\x07\xdd\x15\x86rSeX\xf6\x95a\x01@\xe0\x83\x8e\x8a\xeb\xe5>I\xecrn\xbd\x5c\x00\x04>\xe8\xb6\xa2\xe2z+j\x00 \xf0AW\xe5y>\xca\xd2eXN\x94a\x01@\xe0\x83\xee\x86=\xeb\xe5\x02 \xf0A\xcf\x850\x97*\xc3r\xa8\x0c\x0b\x00\x02\x1ftT\x5c/\xf7Yb\x97\xcb\xac\xba\x083\x00\x08|\xd0bUa\xaeP\x86\x05\x00\x81\x0f:*\x96a\xd9M\xecr\xaa\x0c\x0b\x00\x02\x1ft[U\xef\x9e\x89\x1a\x00\xac\xc4\x1b\x9a\x00f\x17\xcb\xb0l%v9\xba\xbe\xbe>\xd3R\x00\xdd\xf3\xf5\xc7_\x18\xc7\x7f\x16_z\xf9\xad\x8b.>\x07=|0\xa3?\xf9\xc7?\x10\xbe8\xa5z\xf7\x94a\x01\xe8\xb6\xe3\xecf\xe5\xa4\x97e\xf8\x9b\x94\xdbH\xe0\x835\xf3\xf8\xdb?6\xc8\xd2\xeb\xe5\x1e\x9a\xa8\x01\xd0]_z\xf9\xad\x10\xf8.\xe3\x7fCQ\xfd\x17e\xe8\xbb(\xb7\xa2\xdc\x1e\x09|\xd0s\x7f\xee\x0f\xdf\xc8~\xf5\x83_\xf9|b\x97\xcb2\xec\x15Z\x0a\xa0\xf3^\x1f\xc9\x09\xf5VC\x19\xae\xef\x86!\xdfr\xdb\x16\xf8\xa0\xa7>\xf7\xf2\x07\xabv1\x94\x0b\xd0\x0f\xe3\xec\xe6\x14\x9d\xfb\x84\xe1\xdeo\x94\xa1\xef\xac\xad\xc3\xbd\x02\x1fL\xe9\xa7\xfe\xe0G\xb3\xd3_\x9e\xa4v\x09eX\x8e\xb5\x14@\xf7}\xe9\xe5\xb7\xc2\xa99U\xef\xe9a\xf2^\x18\xee}\x15\x87{\x07\x02\x1ft\xdc\xef\xfe\xfb\xefT\xed2\xd2J\x00\xbdRw\xa5\xa4p^w\x18\xee\x0d\x93<\x8e\xcbm(\xf0A\x07\xfd\xb5\xef<\xca\xbey\xf6\xeb\xa9]\x9e[/\x17\xa0_\xbe\xf4\xf2[\xa1\xbc\xd6i\xc3_\x0b\x05\xf9?\x8c\x93<F\xab\x9a\xe4!\xf0AC?\xfe\xc7\x9f\xc9\xbe9I\x86\xbdp\x8eG\xa1\xa5\x00zi<\xe5\xef\x85I\x1e/\xca\xed\x22N\xf2\x18\x08|\xd0bo\xfe\xce\x8fg\xdf\xfe\xf6\xb7S\xbbX/\x17\xa0\xa7\xbe\xf4\xf2[!\xf0]\xcep\x13a\xb8\xf7nM\xbf=\x81\x0fZ\xe6/\xfc\xdf\x1f\xca~\xf5\x97\xfeEj\x97\xf32\xec\x1dj)\x80^\x1b\xcf\xe9vBM\xbf\x0f\xe2p\xef\xfe\x22\x87{\x05>h\xe0\xc7.+\xffd\x94a\x01\xe8\xbfy\x7f\xb1\x0f\xc3\xbd\xefe\x0b\xac\xe9'\xf0AM\x7f\xf5\xf7~\xa2\xaa\x0c\xcb\xc9\xf5\xf5\xf5DK\x01\xf4[,\xd1r\xb4\xa0\x9b\xbf\xad\xe97\xd7%\xdc\xdep\xd8\xa0Z\x98\xa8\xf1\xea\x9b\xff3\xb9\xcf?\xfa\xb9\xbf\xf9\x8bm\x98z\x0f\xc0R\x9c\xc5p\xb6(a\xb8w\xa7\xfc\x5c\x09\xbd\x89a\x1b\x97A\xf3b\xda\x1b\xcb\xaf\xaf\xaf\x1d2\x886\xbe\xf2\xd6$\xfe\x91}\x9fP\x86\xe5W\xfe\xd9/?\xf8{\xc5\xde\xcfd?s\xf6\xdf5 \x00\x8bt\x14\x83\xdf\xa4\xe9/\x1a\xd2\x85{\xfc\xe8\xef}\xee{\xff\x0e\xeb\xe5\xa6\xca\xb0\xec\xbc\xf5\xd3\xd9\xdf\xf8\xadok4\x00\x16-\xf4(~x\xbb\x84[\x93I\x1e\x02\x1f\xbc\xe6\xc7~\xeb/g\x8f\xbe\xf1S\xd9\x0f}\xe7'?\xfe\xff\x9f\xf9\xed\x1fI\x96a\xf9\xf2\x9f\xfa\xe1\xec\x87\x7f\xf7\x8f4\x1c\x00\xcb\xf2\xf1\x12n\xd9MM\xbf\xc3:5\xfd\x0c\xe9\xc2\x1d\x9f\xff\xd9_\xf8\x9d\x9f\xf8\xf6\xc6\x9f\xbe\xfd\xff\xe7\xfe\xe2\xcb\xec_\xfd\xe2?\x7fp\xff\x9f\xff\xd9\xb7\xb3_\xf8O\xbf\xad\xe1\x00X\xb5\x93\xecf\xb8\xf7\xde\xf5~M\xda\x80\xe8\xcd\xad\xaf\x8e\x7f\x22\xfb$\xec\x05\x9f\xf9\xed\xab\xe4\xef\xfcl\xf6\xbf4\x1c\x00m\x10\x96p\xdb\xfd\xfa\xe3/\x84\xa2\xd0\xb7\x93<\xbe\xb7\x08\x80\x1e>\x88a/\xbbg\xb6\xd5\xc6\x8f\xfcQ\xf6'~\xf47\xb3\x7f\xf9\xe1\xc9\xa7~\xe7\xef\xff\xdc\xdf\xca\xfe\xee\xaf\xff7\x8d\x07@\x1b\x85\x1e\x8b\xd0\xdbw\x18\xd6\x00\x16\xf8\x10\xf6\x1e\x08{w\xfd\xd9\xcf\xfeA\xf6\x7f~\xff\xdfd\xff\xfa\xdf\xfd\xdb\x8f\xff\xbf\xb99\xc8\xfe\xe9\xc6\x8f8w\x0f\x80.\x04\xbf}\x81\x0fa\xafA\x1d\xa5\xbf\xf3\x93\xdf\xca~\xe9?\x9ee\xef|\xf1\xcf+\xc3\x02@\x9b}\xdf\xd0\xae\xc0\x87\xb0\xd7\xd0;?\xf8\xcd\xec\xaf\x7f\xe775 \x00mt\xef\xe4\x0d\x81\x0fao\x0a\x7f\xef\x0f\x7f=\xfb\xe9\xdf\xff\xaf\x1a\x12\x806\x08\xc3\xb6\xe1s\xed\xf0\xa1\xd58\x04>\x84=\xa1\x0f\x80n:\xcfn\x86m\x8f\xef\xce\xc8\xbd\x8f\xb2,\x08{S\xfa/?\xf0\xb9\xec\xa73\x81\x0f\x80\xa5k\xbc\xc4\x9a\xc0\x87\xb07\x85\xb7\xaf\xffG\xf6\xf3\xbf\xf7k\x1a\x15\x80e\x09\xc3\xb6\xb7\x930.\x9a\xfe\xb2\xc0\x87\xb0\xd7\xd0\xe7\xf3\xff\xfd\xcb?\xff\xbb\xbf\xf65\xad\x0a\xb0\xd6\x86\xe5\xf6l\x09\xf7s\x1aC\xdex\x96\x1b\x11\xf8\x10\xf6\x9a9\xfa\xd5\xb3\xe7#\xad\x0a\xb0\xde\xbe\xfe\xf8\x0b\xfb\x0b\xbe\x8b0l\xfbq\xd1\xe4y\xdc\x98I\x1b\x08{\x0d\xfe\xf8~\xe3\xfck\xc2\x1e\x80\xb07(/^.\xe0\xa6\xef]\x16m\x1e\xf4\xf0!\xec\x09{\x0043\xef\xde\xbd0l{\xf8z\xed\xbcy\xd2\xc3\x87\xb0'\xec\x01P\xd3\xd7\x1f\x7f\xe1QyqQn\x1b3\xde\xd4\xedZ\xb7\xc54\x930\x9a\xd2\xc3\x87\xb0'\xec\x01P\xdf\xde\x8ca/\x0c\xdb\x16Y\x8d\xday\x02\x1f\x08{\x00\xacF1\xe5\xef\x85%\xcf\x0e\x9b\xd4\xce\x13\xf8@\xd8\x03`\xc9\xbe\xfe\xf8\x0b\xc3\xf2b\xb3\xc1\xaf\xccT;O\xe0\x03a\x0f\x80\xe5\xab\xfb\xd9\xf0\xf1\x92g\xb3\xd6\xce\x9b'\x936\x10\xf6\x84=\x00*\xd4,\xc52\xd7\xday\xf3\xa4\x87\x0faO\xd8\x03\xa0\xdaC\x9f\x0fa\x12\xc68\x06\xbdWm}\xf0\x02\x1f\xc2\x9e\xb0\x07@\xb5\xd7k\xef\xcde\xc93\x81\x0f\x84=\x00Z\xe0\xeb\x8f\xbf\x10>#B)\x96\xa5\xd6\xce\x13\xf8\x10\xf6\x84=\x00\x96gXn\xeff\x0bX\xf2lYL\xda@\xd8\x03\x80\x9e\xfb\x8c&@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x04>\x10\xf6\x00@\xe0\x03a\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8@\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00@\xe0\x03a\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x81\x0f\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x84=\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x04>\x10\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81O\xd8\x13\xf6\x00@\xe0C\xd8\x13\xf6\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x81\x0faO\xd8\x03\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\xd3\xaa\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\x08|\xc2\x9e\xb0\x07\x00\x08|\xc2\x9e\xb0\x07\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\xb0\xb6\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x1e\x07>a\x0f\x00\xa0\xc7\x81O\xd8\x03\x00\xe8q\xe0\x13\xf6\x00\x00z\x1c\xf8\x84=\x00\x80\x16\x05\xbe2\x9c=\x12\xf6\x00\x00z\x1c\xf8J\x87eH\x1b\x08{\x00\x00=\x0c|\xb1w/\x04\xb4\xd1\x1cnK\xd8\x03\x00h[\xe0+\xed\xc7\xcb\x99\xc2\x95\xb0\x07\x00\xd0\xde\xc0w\x1b\xac6\xcb\xd0\xb6'\xec\x01\x00\xf4(\xf0\xc5\x80\xb7yO\xf8\x13\xf6\x00\x00\xfa\x10\xf8\xee\x09x\xbbM&o\x08{\x00\x00-\x0e|1\xd8\xed\xd6\x08\x81\xc2\x1e\x00@\x17\x03_\x22\xd8U\x06/a\x0f\x00\xa0\xdb\x81/9yC\xd8\x03\x00\xe8@\xe0+C[\x08Y\x9b\x89]\xf6\x85=\x00\x80\x0e\x07\xbe\xacz\xd8v\xe7\xf5\xc9\x1b\xc2\x1e\x00@G\x02_\x0cr;5v\xdd\x17\xf6\x00\x00:\x18\xf8\xb2\x07\x86k\xef1\x12\xf6\x00\x00V'\xbf\xbe\xbe\x9e\xea\x17\xcb\x00\xf7\xaa\xbc\xd8\xa8\xb9\xfby\xb9m\x09{\x00\x00\xcb7U\x0f_\x9c\xac\xb1\xd1\xe0W\x84=\x00\x80.\x05\xbel\x8a\xa5\xd3\x84=\x00\x80\xd5h<\xa4\x1b'k\xbc\x14\xf6\x00\x00\xbaa\x9a\x1e\xbeB\xd8\x03\x00\xe8\x8eF=|on}\xf5Qyq\x915;\x7fO\xd8\x03\x00X\xa1\xa6=|{\xc2\x1e\x00@\xbf\x03\xdf\xfe\x92\x1e\x97\xb0\x07\x00\xb0\xec\xc0\xf7\xe6\xd6W\xb7\xb3\xf9\x95W\x11\xf6\x00\x00\xda\x16\xf8\xb2\xe5\xf4\xee\x09{\x00\x00\xab\x08|q\xb2\xc6\xde2\x1ePy_C\x87\x05\x00`~j\xcd\xd2\x8d+k\xbcX\xe2\xe3\xba,\xb7\xc3r\x1b\xff\xc6\xf9\xd7^9L\x00\x00\x8b\x0f|\x17\xe5\xc5\xe6\x0a\x1e\xdfU\xb9\x1d\x87\xf0W\x06\xbf3\x87\x0b\x00`\x01\x81/\x0e\xb1~\xd8\x82\xc7z\x9e\xdd\xf4\xfa\x1d\xeb\xf5\x03\x00\xa8\xaf\xce9|\xa3\x96<\xd60C8\x0c+_\x94!t\x1cg\x0d\x03\x00P!\xd9\xc3\x17'k|\xb7\xc5\x8f?\xf4\xfa\x15\xbfq\xfe\xb5c\x87\x12\x00\xe0~U=|\xa3\x96\x87\xbdCa\x0f\x00 \xed\x8d\x8a\xeb\xf7[\xf6xM\xe2\x00\x00\x98W\xe0\x8b\x9356[\xf28M\xd8\x00\x00\x98w\xe0\xcb\xda1\x9c{\x94\xe9\xcd\x03\x00\x98\xc9\xbd\x936\xde\xdc\xfa\xea\xa0\xbcx\xb9\xa2\xc7\xa4\xe82\x00\xc0\x1c=\xd4\xc37Z\xc1c9\x8a!o\xe2\xb0\x00\x00\xf4'\xf0\xe9\xcd\x03\x00Xv\xe0{s\xeb\xab{\xd9\xe2'k\x9c\xc4\x90\xa7\xa4\x0a\x00\xc0\xb2\x03_\xb6\xb8\xde\xbd\xd0\x9b7\x8eA\xefB\xd3\x03\x00,\xc7\xf7M\xdaX\xd0d\x8d\xd3L\x81d\x00\x80\x95y\xbd\x87o4\xa7\xdb\x0d\x05\x92\xc71\xe8]hf\x00\x80\xfe\x04\xbe\xd0\x9b\x17\x86l\xc7\x9a\x16\x00\xa0e\x81\xef\xcd\xad\xaf\x86\xb07\xcdd\x0d\xbdy\x00\x00]\x08|Y\xf3\xde=\xcb\x9d\x01\x00t\xc0\xc7\x936\x1aL\xd6\x08\xbdya\xf2\x85\xe5\xce\x00\x00:\xe2\xb6\x87o\xbfb?\xbdy\x00\x00\x1d\x0f|\xa3\x07\xae\xb7\xdc\x19\x00@\xd7\x03_\x9c\xac\xb1q\xe7g\x96;\x03\x00\xe8S\xe0\xcb>\xe9\xdd\xd3\x9b\x07\x00\xd0\xd3\xc0\x17\x02\xde\x9e\xde<\x00\x80~\xfa\xff\x02\x0c\x00\x1eE:\x8bH\x8b\x05S\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x0f\xb6\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00@\x00\x00\x00@\x08\x06\x00\x00\x00\xaaiq\xde\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x12t\x00\x00\x12t\x01\xdef\x1fx\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x00\x0f3IDATx\x9c\xe5\x9by|TU\x96\xc7\xbf\xaf*KU\xaa\x0a\xb2\x90\xa4*\x8e\xa3\x01\x82c\x98\x86@l\xb7@\xc0fQ\x16\xa1\xb5\x05\x5c\x80\x06\x84\xb0((\xc8\x08\xd2\x0dQTF\xa4\x05Y\x14\x08K\x90\xc8\x9anA\x09\x81\x04ADtD\x0d\x04\xdbFIB@gHU\xc0\x90\xa4\xeaU\xaa*\xa9zw\xfe\xa8\xa4$\x92\x04*\x9b\xd3\xce\xef\xf3y\x9fT\xee;\xf7\x9c\xdf9\xef\xdc\xe5\xdd{\x9fD\x03HMM\x0du:\x9d\xcf\x02#%IJhH\xe6\x9f\x05B\x88|\xe0}`\xcb\xd2\xa5K/\xfc\xfc\xbe\xf4\xf3\x82y\xf3\xe6\xbd\x08<\x13\x1e\x1e\x1e\x1a\x1f\x1fO||<\x00\x9d;wn[\xa6\xad\x8c\xe2\xe2b\x00\xce\x9c9\xc3\x993g\xb8r\xe5J\x05\xb0r\xe9\xd2\xa5/^-\xe7\x0b@jjj\xa8\xcb\xe5Z\x01L\x188p \x03\x06\x0chO\xbem\x8e\xc3\x87\x0f\xf3\xe1\x87\x1f\x02l\x09\x0e\x0e\x9e\xfd\xd2K/U\x00\x04\xd4\x09\xb8\x5c\xae\x15\x1a\x8dfBJJ\x0a&\x93\xe9\x97\xe2\xd9f\x180`\x00\xf1\xf1\xf1\xa4\xa5\xa5Mp:\x9d\x00\x13\x01\xd4\x00\xf3\xe7\xcf\x7f\x16\x98?c\xc6\x8c_\xa5\xf3u0\x18\x0c\xdcv\xdbm\x9c8q\x22\xa1O\x9f>\x95\xc7\x8f\x1f\xff\x5c\x9d\x9a\x9a\x1a\xeav\xbb\xf7\x0c\x1a4H\xd3\xa3G\x8f_\x9ac\x9b\xc3`0 I\x12\xe7\xce\x9d\xbb{\xe0\xc0\x81\xebU.\x97k\x82V\xab\x0dMJJjW\x22\xa2\xa4\x18w\xe6rDIq\xbb\xda\x05HJJB\xab\xd5\x86\xba\x5c\xae\x09\x01\xc0\xc8\xee\xdd\xbb\xa3\xd1h\xda\xc5\xb8p\xc8\xb8?X\x8f\xe7\xabC\x00\xb8\xbf\xc8E}\xc7 \x02FLE\xd2\xea\xdb\x85\x83F\xa3\xa1{\xf7\xee\xe4\xe5\xe5\x8d\x0c\x00\xfa\xd7\x0dum\x8d\x9a\x83[q\x1f\xdb\x03N{\xbdr\xf7\x89\x1c\xdc\xa7\x8f\x13\x90\xfc\x10\x81\x0f\x8co\x17.\xf1\xf1\xf1\xe4\xe5\xe5\xf5W\x01m\xfe\xf4=\x85\xa7\xa9z\xf1\x09\xaa\xb3\xb7\xa2\xc86\x14\xb7B\xc0\xa0\xb1\xf5\xfe*\xb2\x8d\xea\xec\xadT\xbd\xf8\x04\x9e\xc2\xd3m\xca\x07~\xf29\x00 &&\xa6M\x8c(e\x16\x5c\x19K\xf1\x14\xe6\xfb\xca\x02z\xf6!\xe8\x0fO\xa1\x8a0\xe2\xcaJ'p\xc8x\xd4w\x0e\xa6\xfaoo\xe1>}\x1cq\xb9\x04\xc7\x9b\xcf\xa2\x8eK x\xdc<T\x11\xc66\xe1V\xe7s\x00\xb4~\x06\x88*\x19W\xd6\x16\xaa\x0fg\xfa\xca\xd47wE3z&\xean?\xcd\xac\x15\xb7\x02\x80*\xc2\x88&\xe5e<\x05\xf98w\xaf\xc6\xf3\xdfE(\xdf\x9e\xa4f\xc1\x18\x82\x06\x8c\x22x\xf8\x04\xa4\x90\xd6\xed\x1f\xeae@k\xc2uh7\xae\x0f\xd2\x11\x0e\x19\x00)D\x8f\xe6\xd1Y\x04\xdd;\xe4\x1aY\xe1Q\xea\xfd\xaf\xee\x96\x80\xee\xcf\x9b\xa8\xfe\xec\x00\xce\x9d\xab\xbc\x81\xcc\xddE\xf5'\xfb\x09\x1e1\x91\xe0A\xa3[\x9bn\xeb\x05\xc0\xfd\xddI\xec\x1b\x97 ~4\xfb\xca\x82\xef\x1f\x83f\xe4\xa4F\x9f\x9ep+\x0d\x96\x07\xdd;\x84\xc0\x84\xbe8\xdf\xdf\x8c+g\x17\xc2f\xc5\xb1m%\xce\x9c\xdd\xe8&/ \xe0\xdfz\xb7\x16\xed\x96\x07@\xb9l\xa6\xea\xdd7\xa9>y\xccW\x16x{ot)\x7fF\xd5\xa9\xe9Y\xa5\xe2i8\x00\xe0\xcd\x1c\xedc\xb3\x08\x1e4\x0a{\xda+\xd4|{\x12J/b}\xf5)\x82z'\x132\xf6YT\x91-\x9f\xb56;\x00\xc2n\xc3q`'\x8e\xbfm\xf4\x95\xa9\x22M\xe8\xa7-\x220\xfe\xc6\x9ePc\x19p5T\x9dL\x18\x16\xbcE\xcd\x99\x93\xc8\xeb\x16\xa3\x5c6\xe3\xfa\xe2(\xae/\x8e\xa2\xfd\xc3d\xb4C\x1eE\xd2\x19\x9a\xebF\xf3\x02\xe0<\xba\x0f\xf9\x9d\xe5\x08{m;\xd7\x19\xd0=2\x05\xed\xb0\xc7\xea\xc9\xd5\xfc#\xcf\xf7;\xb0{\xe25z\x9a\xca\x80\x9f#0\xbe7a\xab\xf6\xe2\xd8\xbf\x03\xfb_7 \xec6\xec\xbb\xd3\xa8\xda\xbf\x1d\xfd\x1f\xe7\xa0\xe9\xff`s\x5c\xf1/\x00\xd5\xff\xc8CN_\x8e\xfbB\x81\xaf,d\xf8c\xe8FM\xa9\xf7\x14\xaa\xb2v\xe0\xc8\xde\x81\xe7\x92\xb9^}\xcd}\xc3\xd1\x8dJA\x1d\xe5M]q\xe3\xfe\xfb\xa0\x1d\xf6\x18\x9a\xfe\xc3\xb1gn\xa0*k\x07\xc2&c]\xb3\x98\xaa\xac\x9d\xe8'\xce!\xa8\x81@7\x05\x95?\xc2\xe5\x8b\xa6Ss\xbe\x10!$\x02\xe3\x13\x09_\xb6\x0d\xfd\x849\xf5\x9c\xb7\xaeY\x8c-}\x05\xeeR\x0bBH\xf5.\xc7\x91\xfd\x94\xcd\x1d\x8b\xe3\xa3,\x00\x84\xb8f=\xe6\x86 \xe9\x0c\xe8'\xcc!|\xd96\x02\xe3\x13\x11B\xa2\xe6|!\xe5\x8b\xa6\xfb\xad\xcb\xaf\x0c\xa8#\x1c:\xefu4w\xf5\xbb\xe6\xbe\xbcs\x03UG\xf6S\xb7\xce\xa2\x8e4\xa1\x1f3\x99\xaa\xac]\xd4\x5c(@\xa5\xd3\xa3\xc82\x95\xab_F\x1d\x19\xd3\xec\x00\xf8\xc8\xc7v#l\xf1:\x9c'>\xa6b\xe9\xf3\xcd\xd2\xe1W\x06\x08\xe1\xbd\x1ar^\xb1\xdb\xb0\xef\xdb\x85\x10\x10|g?:>\xbd\x10Ig\xa0b\xf5\xcb\xb8/\x99\x89x#\x83\x887\xdeE\x15iB\x08\xa8X\xb5\x18!\x9a\xc5\xf9\x1ah\xee\xea\xe7\xe3\xe6/\xfc\x0b\x80\x22!\x94\x86\x9f\x9a\xe3\xf0~<\xb2\x8cP$\x02n\xe9F\xf0]\xfdp[,\x04\xdcr\x1bB\x91\xa8:\x92\x8d:\xcaD\xc7\x99\x8b\x10\x8a\xe4m\x22\x8d\xe8j\x0e\x9a\xe2\xd6\x14\x9a\xd5\x04\x1a\x82\xe3\xc4'\xbe\xfb\xf6\xc3\xd98N\x1cC\x15e\x22\xe2\xd5\xb7q~\xfe1\xe5+_A\x0a\xd1\xe3<q\xac\xc5\xa9\xef/\xb7\xa6\xe0g\x13\x90\x1a5Tw/\xe4w\xc3q\x97Zp[,\x84\xcdZ\x88J\xa7G\x1deB\x08\x09\xeb\xf6\xcd\xa8#cjS\xb6q]\xcdAs\xf5\xb5Z\x06\xb8K\xcd\x08!\xe1*\xf6\x8e\x12\x1e\xd9N\xd5\xe7\xc7\x08(.\xe4\xca\x9b\xaf\xa2\xbd;\x19\xd7\xdfO\x12\x10\x1b\xe7\xd5\xf5_\x9f\x5c\xa3\xa3\xba\xb8\x10\xc7\xe7\xde\x19e\xf0oz\xa3\xf9M\xafV\xe1\xd6\x14Z-\x00\xea\xc8\x18j,\xa5T\x9f+B{O2!w\xf7\xa5l\xc5\x12\x00t\x03\x87\x121\xfbOTn\xdb\x84uo&\xa1OL\xf4\xe9\x92?\xcc& \xcaD\xe5\xf6\xcd\xb8\xfe~\xea*\x8d\xe9\xb5\xf5\x16\xb4\x98[S\xf03\x00\x8d\xdf\xd3\xdc\xdd\x17\xc7\xd7^\x07\xb4w\xf5E{O2\xaaw\xd3\x91$\x90\x0f\x1d@\x0a1\x10:v\x12\xd6\xbd\x99\x5cY\xbf\xdaW\xafl\xf9\x92\xfa\x84\xa2M\xdeE\x13\xbb\x8c|(\x9b\xc0\xd8\xaet\xf8\xfd\xf5\xdf\x02\x9b;\xa2\xb4Z\x1f\xa0\x1f4\xd4\xd7\xd6\xcb\xd6\xaf\xa6l\xdd*\xdc\xa5\x16Lk\xd2\x09K\x99\x85|\xe8\x00\xa5/-\xc0#\xdb\x11B\xa2\xc3\xe3\x93\x08\x8c\xed\xe6\xd3\xa9\x8e2\x111\xfbO\xdc\x94\x9eI`\xe7\x9f\xca\xcb\xdfMG\x91\xe5\x16qk\x0a\xad6\x0c\xaatz\xa2\x16.A\x0a1\xa0\xd8\xec\xc8\xb9\x07\x09\x88\xed\x06\x02:\xfc~4\xa1OL\xc2y:\x1f\xdd\xc0\xa1\xe8\x06\x0e\xa5\x22c\x0b\x8a\xf0\xea\x0b\x8c\xedF\xcc\x9at\xf4\x83\x86\xa0\xc82\x8e\xfc|\xc2Sf!\x14\x09\xc5f\xc7v\xe8@\x8b\xb85\x05\xbf\x02\xa0\x08\x09\xa5\x89(\x07u\x89\xc3\xf4\xfaJTQ&\x14!\xe1**\xe2\xfb\xf1c\xa8|/\x13k\xeeA\x14!\x11\xf9\xdc\x0bD>\xf7\x02\xc1=z\xe1**\x22\xb8G/\x9c\xb5r\xf6\xcf>\xa1bO&\xe8\x0ctxh\x14\xda{\x92Q\x84\x84\xfd\xb3\xe3-\xe6\xd6\x18Z\xad\x13\xacCP\x978n^\xbb\x89+\x19[\xa8x\xef\xafxlv.\xaf}\xab\xf6\xae\x845\xf7 \xb6\xdc\x83\xb8\xce\x15\xf1/k7\x11\xdc\xa5+\x8a,\xf3\xe3\xda5\x98S\x17\xa2\xd2\xeb\x09}\xe8\x11\x00B\xeeMB\xfe\xf48U\xf9\xd7_$m\xa7y\xc0\x8du6*\xbd\x9eN\xd3\x9f\xe6\x96\x8c\x9d\x04u\xee\x8a\x10\x10\x10mD\xd3#\x81\xd2\xd7_\xa3*?\x9f\xa8\xb9/\x10\xdc\xa5+\x005\xa5\x16$\xbd\x1e!\xc0c\x93\x09\x1f?\x11\x80\xe0.q>\x9b\xaesE\xad\xc2\xed\xe7h\xf5\x0c\xb8\x1a\x81F#\xd1\xff1\x9f\xef\xa7N\xa1\xda\x5c\x8a\xdbf' \xda\x84G\x96)Y\xb4\x10}R\x1f\xaaN\xe7\xa3\xc82\x81F#\x86\xc1C\xb0\xe6\x1c\xa4\xeat>\x9a.]){\xe7\x1d\x9fM\xcfu:\xc2_|&\xd8\x18<\xb2L@\xb4wt\xf0\xd8\xecDN\x7f\x0a\x95\xce\x80\x10\x12\xb6\xe3\x9f\xd2a\xf0\x03\xc4n\xdbA\xec\xb6\x1dt\x18|\xbfw2Ux\x8e\x0b))\xd8\xf3O\xffd\xf3:O\xb7}F\x01?\x8c\xd4X,\x5c\x5c\xb8\x88\x1ff?G@\xb4\xd1W\xd7\xbct\x19*\x9d\x81[\xd3\xd2\x10BB\x9f\xd4\x87@\xa3\x11\x8f,S\x99\x93\x8b\x10\x12\xa5o\xbd\x8d\xb6g\x02]\xb6o\xbf\xcaf\xd3v\xdbi*|}\x99\x1a\x8b\x85\xcb[\xb6Ry0\x87@\xa3\x91\x7f]\xb1\x9c\x90\x84\x9e\x94\xbc\xf6:\x95\x07sp\xdbdnzy\x06\xa2V\x9f\xb3\xb0\x88\x8a\x839T\x1e\xcc\x01\xc0\xd0'\x89\xe8\xa7g\xf8\x82Rg\xf3\xf2\x96w\x88\x99\xff<\x81\xc6\x867J\xdae\x22\x84\x90\xbcW\x03\xf0\xc82\x97\xd3\xb7R4f,U\xa7\xbe&f\xde\xf3t\xdd\xb9\x8d\x90\x84\x9e\x00\x18\x92\x92|\xf5\xed\xa7NS\xf2\x9f\xcb@H\x94\xaeY\x8b\x22\xdb1\xf4\xe9\x03B\xf29\x0fP\xbaf\xad\xaf\x8eb\xb3S4f,%\xaf-\xa3\xc6b\xf1\x8b[Shq'\xe8\x91e\xcav\xbf\xc7\x95\xcc=H\x12D?=\x83\xf0Q\x0f_#g\xe8\x9b\xe4\xab_q \x97 \x93\xb7Y\xdc\x96\xbd\x07\xb5^\xcf\x0f\x0b^D\xdf\xc7\xdb\x1cj,\xa5\x98W\xadE>\xfe) \x11>\xeaa\x8c3\xa7Sq \x97\xcb\xe9[\xb1~\xfc\x19\xe1\xa3\x1e\x22b\xf4\xc3\xa8\xf5\xfaF\xb9\xdd\x08Z\x14\x80\xf2\xec\x5c,\xab\xd7!!\xae!\xd4\xa0\xb1h\xafs\x1d\x1f\xb8\x1fM\x5c\x17\xe4\x93_\xfb\xe4\xab-\xa5\x04\x19\xa3\xb9\xb8\xe4/T\x1c\xc8\xad\xad!a\xe8{/\xc6\x99\xde\xb5\xbe\xd0!\x83\x09\x1d2\x98K\x9b\xb7r%s\x0fe\x99{\x89\x18\xf5\x10Q\x93\xc6\xb5\xef\xcbPyv.\x977o\xa5\xdar\x89\x88\xd1\x0f\x135il\x93\x8e\x03\x5c\xda\xb4\x95js)\x005f\x0b5f\x0b\x81\xd1Q\x98W\xad\xc5Yp\xcewi\xe2\xba\xa0\xd2\xe9\xf1\xc82A\xa6hnZ0\xf7\x1a]Q\x93\xc6\x131\xfaa\xcav\xef\xa1l\xf7\x1e*\xb2s\x9a\xdd\x07\xf8\xb7/\xa0x\xd7\xb1/\xbe\xf2:a\xc3\xee\xe7\xd6I\xe3\x082\xdd\xd8\xee\xad\xfd\xd4i_\xfd\xf2\xfd\xde\x0eO\xad\xd7\xe3,(B\xd7\xab'\xf6\x93\xf9\xc4\xaey\x03\xb5AG\xd1\x1f\xa7\x01`\x9c9\xad\xd1\xc0\xaa\xf5z\xa2&\x8d#t\xc8 ~\x98\x9f\xea\xd3\xed/\xfc\x1c\x06\x95\xab.\xffB.\x84@\x08\x85\xc8I\xe3\xf8\xf7\xcf>$r\xd28\x82\xe3:\x13\xbb\xe6\x0dB\x87\x0eF\x08\x05\xb5AG\xe5\xc7\x9f\x22\x84B\xa01\x8a\x0e\xc9M\x1f\xdb\xa96[\xb8\xb4i+\x8e\x82B\x1f/\x7f\xe1W\x00nym1A\xc6(\x10\x0a\xe5Y\xd9\x14\x8d\x9fL\xe9\xc6-xl\xd7\x7f]U\xebC@(T\xec?P+/|;#5f\x0b\x08\x05M\x5cW\x9c\x85\x85 \x14t\xbd\x1a?\xb0\xe5\xb1\xc9\x94n\xdcB\xd1\xf8\xc9\x94ge\x83P\x082Fq\xcbk\x8b\xfdq\x07\xf0\xb3\x09t\xe8\xd7\x07]\xef\x04~\xdc\x99I\xd9\xaeL<V+\x976l\xa6b_6\xa693\xe9\xd0\xafo\xa3u;\x8dy\x04\xeb\xd1cT\x97\x94\xf0\xfd\xf3\x7f\x02\xa0\xa6\xc4L\xe9\x86t_\x00\x8a\xa7?\x83\xfd\xa4wQ%\xd0\x18\xdd\xa0\x1e\xeb\xc7\x9f`^\xbe\x9a\xea\xda\xa1Pm\xd0\x131f\x14\x9d\x1e\x1d\x85\xda\xe0\xff\x19\x02\xff\xe6\x01\xb5\x06\xa3\xa7L\xa4k\xc6fB\x87>\x80P\x14\x5c%%\x5c\x98\xfb\x02\xc5\xd3f\xe2((l\xb0\x9e\xaew\x82O\xdeq\xf6,\x8e\xb3g\x11B`\xcf;\x89\xe3l\x01BQ\xd0\xf5\xee\x89P\x94\xda\xdf\xf5\x8f(;\x0a\x0a)\x9e6\x93\x0bs_\xc0UR\x82P\x14B\x87>@\xd7\x8c\xcdDO\x99\xd8,\xe7\xa1\x05\xbb\xc3A\xa7. l\xf8\x10.\xa5mB\xce;\x85\xfc\xd5I\x0a\x1f\x9f@\xd8\x83C\x89\x99\xf3\xcc5\xa4\x82L\xd1 \x04\xda8\xef[\xa0.\xb1\x17\xd1)O\x22\xe7\x9d\xa2x\xea\xd3\xe8z\xf7\xf2\x0d5\xdan\xde\xc5S\x8fM\xa6d\xf9J\xca\xf7e\xfb\xf4\xe8\x13{\x11\x95\xf2$\xfa\xc4\x1b_4m\x0c->\x1f\xa0O\xec\x85~\xfd\x1a\xca\xf7\xed\xa74m\x13\xd5%f\xca?\xc8\xc2z\xe4(\x9d\x1e\x1fC\xf4\xd4\xc9>Y\xc7\xd9\x02\x10\x0a\x81%\xe6\xab\xe6\xaf\xde\xfe\xc0y\xf6\xac\xaf_P\x1b\xf4\x94\xae\xdf\xc8\x8f\xdbw\xf9\xde\x04\x83bLD\xa7<I\xd8\x83\xc3ZJ\xdb\x87V;!\x12\xf6\xe00:\xf4O\xe6\xf2\xf6\x9d\x94\xae\xdb\x88\xdbj\xc5\xb2n\x03W>\xd8G\xcc\xdc9t\xbc\xaf\x1f\x1e\xab\x15\xa1(\xc8_~\x89\xc7&\xe3\xba\xf8?\xc8_\xe5\xe1\xb1\xd9\x10\x8a\xc2\xc5e+\x00\x08\x8a1\xf2\xed\xb0\x91T\x97\xfc4\xe5\x8d\x9e6\x99\xc8\xc7\x1fEmh\xfeY\x80\x86\xe0w\x1f\xd0\x14\xd4\x06\x03\xc6\xa9S\xb8}\xff^:\xf6O\x06\xa1P}\xb1\x84\x0b\xb3\xe7rn\xf24<V\xab\xb7\xc76\x99\xf0X\xad\xe8\x13{\xa3K\xec\xe5Mw\xa1\xd4\xabS}\xb1\xc4Wv\xfb\xfe\xbd\x18\xa7Niu\xe7\xa1\x95\x03P\x87\xa0\x18\x13\xb7\xaeXF\x97\x8d\xeb|\xce\xc9_~\x89\xe3\xbb\xef@(DO\x9b\x02B!l\xc4p\x8c\xd3R\x08\x1b1\x1c\x84B\xe5\x91#\xde& \x14\xb4\xdd\xe2\xe8\xb2q\x1d\xb7\xaeXFPL\xdb\x1d\xe0n\xf5SbWC\x7fG\x22\xddvo\xe7\xca\xfb\x1fpq\xe9_\xf0\xd8l\x00\x9c\x9f5\x1b\xa1( \x04\x973\xb6aY\xbb\xde\xfb?\xde,\xbai\xde\x5c\xc2G\x8ehKj>\xb4i\x00\xea\x10>r\x04\x1d\x7fw\x9f\xd7\xd9\xb7\xd7\xe2\xb1V\x02P4\xf1\xc9zr\xc6\x19\xd3\x89\x1c\xf7D\x9b\xa4zch\x93&\xd0\x10\xd4\x06\x03\xc6\x19\xd3\x88\xcf9\x80\xfe\x8eD_\xaa#\x14\xf4w$\x12\x9fs\x00\xe3\x8ci\xed\xea<\xb4S\x06\x5c\x8d\xa0\x9bb\xe8\xbae3\xf2\x17_b~\xebmLO\xcd@\x7f\xe7o\xdb\x9b\x86\x0f\x01\x00f\xb3\xb9\xdd\xbf\x14\xd1\xdf\xf9[\xe2\xeeLoW\x9bW\xa3\xee\xa3*\x15\x80\xc3\xe1\xf8\xc5\x88\xfc\xd2Pi\xb5\xda\x0a\xb3\xd9|}\xc9_\x19j}\xbe\xa0R\xab\xd5G\xf3\xf2\xf2\xae'\xff\xabC\xad\xcfGU\xd1\xd1\xd1+\xcdf\xb3\xafM\xfc\x7f@qq1f\xb3\x19\x95J\xb5R5e\xca\x94\xa3Z\xad\xf6hFF\x06\xb5\xdf\xd3\xfd\xaa\xe1t:\xc9\xc8\xc8@\x08\xb1w\xc9\x92%\xf9*\x80\xe4\xe4\xe4\x89\x1a\x8d\xa6\x22--\xedW\x1d\x04\xa7\xd3IZZ\x1a\x0e\x87#_\xa3\xd1L\x84\xab\xf6\x9b\xbe\xf9\xe6\x9b\x84\xac\xac\xac\x8f\x9cNg\xe8\xf0\xe1\xc3IL\xf4\xef\xcc\xed\xffu\xe4\xe5\xe5\x91\x95\x95U\xe7\xfc}u\x9f\xce\xd6[L\xff\xe8\xa3\x8fn-((H?\x7f\xfe|\xff\xb0\xb00\x12\x13\x13\x89\x8d\x8d\x05\xfe\xb9>\x9ev:\x9d\x94\x94\x94\x00p\xfe\xfcy\xf2\xf2\xf2(//G\x08\xf1\xa6F\xa3y\xa9\xceyhd\xc7\xf1\xd0\xa1C\xfd\xcf\x9c9\xf3LEEE\x7f\x87\xc3\x11\xdaN\xbc\xdb\x0a\x17\x84\x10{\xf1~9~\xe1\xe77\xff\x17\xd7q\x00\x14\xc6\xb0\x7f\x82\x00\x00\x00\x00IEND\xaeB`\x82\x00\x008k\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x007\xf8IDATx\xda\xec\xddOllY~\x17\xf0[\x93\x8e\xf2\x8f\xc4o\xa4\xe1\x9fF\xc4~,\x185\x04\xd9#\x88h\x10#WGH\x84M\x9eGb\x93\x95\xab\x17,\xb2\x18\x9e{\xc5\xec\xde\xb5\xc4bv\xe3\xc7\x08\x09\x89\xc5+\xaf\x88X\xa4\xedeHP\xdb\x1a\x04\x1d\xfe\xa8\xed0R\x18\x02y6\x22L$4\x9a\xe7D\x84\x00I\xcc=\xed\xe3i\xf7\xeb\xaa{\xcf\xad\xbaUu\xef\xad\xcfG\xba\xaa\xf7\x5c\xd7\xf5\xe7\xdcr\xd5\xb7\xce\xb9\xe7w\x06\xb7\xb7\xb7\x19Pn\xe3+o\xed\x15\x17[7\xdf\xf8\xe0Hk\x00\xd05\x9f\xd1\x04\x90\xe4 n\x00 \xf0A\xdfl|\xe5\xad\xad\xe2b\xb7\xd86cO\x1f\x00\x08|\xd03\xf9\x83\x7f\xeb\xe5\x03\xa0s\x06\xce\xe1\x83\xe96\xbe\xf2\xd6\xa3\xe2\xe2*\xfc\xf3\xc1\x8f\x1f\xdf|\xe3\x83+\xad\x03@W\xe8\xe1\x83r{\xaf\x85\xbd \xd7,\x00\x08|\xd0\x1f\x93\xc2\xdd^\xec\xf9\x03\x00\x81\x0f\xba\xac\x08u\xc3\xe2bs\xd2U\xd9]\xcf\x1f\x00\x08|\xd0qe\x134r\xcd\x03@W\x98\xb4\x01\x13\xc4R,/+v{\xfb\xe6\x1b\x1f\x9ci-\x00\xdaN\x0f\x1fL6J\xd8G\x89\x16\x00\x04>\xe8\xb0\x940\xf7$\xf6\x04\x02\x80\xc0\x07]R\x84\xb8Q\xf6\xe9R,\xd3\x8c\xb4\x18\x00\x02\x1ft\xcf\xc1\x82\xf6\x05\x00\x81\x0fV-\x96b\xd9\xae\xf3+\xb1G\x10\x00\x04>\xe8\x88Y\xc2\x9b^>\x00ZMY\x16\x88\xe2\xea\x19\xdf\x9b\xf1\xd7\x95h\x01\xa0\xb5\xf4\xf0\xc1\xc7\xe6\xe9\xa9\x1bi>\x00\x04>h\xbfyB\xdb\xbe\xf5u\x01\x10\xf8\xa0\xc5\xe2\xc4\x8b\xcd9o\xc6\xb9|\x00\x08|\xd0b\xa3\x96\xdc\x06\x00\x08|\xd0\xb4\xb8Z\xc6n\x037\xb5\xa9D\x0b\x00\x02\x1f\xb4S\xde\xe0m\x09|\x00\xb4\x8e\xb2,\xac\xb58\xd1\xe2*K_J-\xc5\xe3\x9bo|p\xa5u\x01h\x0b=|\xac\xbbQ\xc3a/\xc85+\x00\x02\x1f\xb4\xc7\x22f\xd6\xee)\xd1\x02\x80\xc0\x07-P\x84\xb2\xbdl\xfeR,\x13o:s.\x1f\x00\x02\x1f\xb4\xc2\x22C\x99\x9a|\x00\x08|\xb0J\xb1\x14\xcb\x93\x05\xde\xc5f\xecA\x04\x00\x81\x0fVd\x19=p#\xcd\x0c@\x1b(\xcb\xc2\xdaYP)\x96i\x94h\x01`\xe5\xf4\xf0\xb1\x8e\xf6\x96\x14\xf6\x02\xe7\xf2\x01 \xf0\xc1\x0a,3\x84\x8d\x94h\x01@\xe0\x83%*\xc2\xd7\xb0\xb8\xd8^\xe6]fw=\x8a\x00 \xf0\xc1\x92\x8cVp\x9f\x86u\x01X)\x936X\x1b\xb1\x14\xcb\xcb\x15\xdd\xfd\xdb7\xdf\xf8\xe0\xccQ\x00`\x15\xf4\xf0\xb1NFkz\xdf\x00\x08| \xf0-\xc1~\xeca\x04\x00\x81\x0f\x16\xa1\x08[!\xecm\xae\xf8a\x8c\x1c\x09\x00\x04>\xe8w\xd8\x12\xf8\x00\x10\xf8`\x116\xbe\xf2\xd6Nq\xb1\xdb\x82\x87\xb2\x19{\x1a\x01@\xe0\x83\x86\xb5\xa9,\x8a\xc0\x07\xc0\xd2)\xcbB\xaf\xc5U.\xbe\x97\xb8\xfb\xcd\xfd\xaf\xd5\xbc\x9b\xf0{\xaf\xb2\xf4s\x04\xbfx\xf3\x8d\x0f.\x1c\x1d\x00\x96E\x0f\x1f}7J\xd8\xe7\xba\xd8\xde)\xb6\xadb\x9b%\x88]\x14\x01n+\xde\xc6y\xc2\xfe\x0a1\x03 \xf0A\x83\xca\xc2\xd5ivW\x10y\xab\xd8\xc6\xc5\xf6j\x9e;\x8a\xb71,\xfe\xf9\xc5b;.\xd9u\xdf\xfa\xba\x00\x08|\xd0\x80\x22T\x855l_\x1ff\x0d\xc3\xaf\xcf\x8b\xedq\x11\xce\xf6\x16\xb1\xfaE\x18\xae-\xb6Q\xf1\xcf\xcf\x16\xdbav\xd7\x83\xf8\xba\x91#\x04\xc0\xb2\xbc\xa1\x09\xe8\xb1\x87\xbd{\x97\xc5v\x14z\xe1\x96u\xe7\xb1\xc70\x0f[\x9c\x9d\x1b\xb6\xdd\x07\x8f\xed\xc8!\x02`\x19\xf4\xf0\xd1KqU\x8b\x10\xae\xc2\xd0j\x18\xb6\xddYf\xd8\x9b\x10\xfe\xee\x87{\x1f\xc7\xc7\xf4(\xf6@\x02\xc0\xc2\xe9\xe1\xa3\xcf\xc2\xb0\xedU\x9b\x1eP|<\xa3x\x0e\x9f\xf3\xf8\x00\x10\xf8`\xce`\xd5\xe6\xc7\x17\x86{_9R\x00,\x83!]\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00\x00\x81\x0f\x00@\xe0\x03\x80%\x19\x0c\x06\x8f\x8am\xa8%@\xe0\x03\xa0\x9fao\xa7\xb8\xb8\xd2\x12 \xf0\x01\xd0\xcf\xb0wP\x5c|Xl\x1bZ\x03\x16\xcbZ\xba\x00,;\xe8=*.\xc6\xc5\xf6\xe4\xfeg\xb7\xb7\xb7gZ\x06\x04>\x00\xfa\x11\xf6\xc2\x10\xeeI\xb1mj\x0dX\x1eC\xba\x00,+\xec\xdd\x0f\xe1\xbe\x1e\xf6\xce\xb5\x0e,\x96\x1e>\x00\x16\x1d\xf4>5\x84\x0b\x08|\x00\xf4'\xec\xa5\x0c\xe1\x9ei)X,C\xba\x00,*\xecM\x1b\xc2\x05\x96L\x0f\x1f\x00M\x07\xbd\xbaC\xb8gZ\x0d\x04>\x00\xba\x13\xf6\xcc\xc2\x85\x162\xa4\x0b@Sao\x94\xdd\xf5\xd6\xd5\x0a{j\xf0\xc1\xe2\xe9\xe1\x03`\xde\xa0\x17\x86p\x8f\x8am_k\x80\xc0\x07@\xff\xc2^\x18\xc2\x1d\x17\xdb\xf6\x8c7\xa1\x06\x1f,\x81!]\x00f\x0d{\xa3\xecn\x08w[k@\xbb\xe9\xe1\x03\xa0n\xd0kr\x08\xf7L\x8b\x82\xc0\x07@\xbb\xc2\xde\xbcC\xb8\xc0\x0a\x18\xd2\x05 5\xec\x8d\xb2\xe6\x87p\xcf\xb4,,\x9e\x1e>\x00\xaa\x82\x9eY\xb8 \xf0\x01\xd0\xe3\xb0\xb7\x95\xdd\x15R^\xc8\x10\xae\x1a|\xb0\x1c\x86t\x01\x98\x16\xf6\xf6\x8a\x8b\x8b\xcc\xf9z \xf0\x01\xd0\xcb\xb0\x17\x86p\xdf+\xb6\x8d\x05\xde\x8d\x1a|\xb0$\x86t\x01x\x18\xf4\xb6\xb2\x05\x0e\xe1\x02\xab\xa1\x87\x0f\x80\xfb\xb0\xb7\xec!\xdc3\xad\x0e\x02\x1f\x00\xcb\x0b{\xcb\x18\xc2\x05V\xc4\x90.\xc0z\x07\xbd\xadluC\xb8g\x8e\x00,\x87\x1e>\x80\xf5\x0d{f\xe1\x82\xc0\x07@\x8f\xc3\xde\xca\x87p\xd5\xe0\x83\xe51\xa4\x0b\xb0^A/\xac\x9a\x11\x82\x96^=X#z\xf8\x00\xd6'\xec\x0d\x8b\x8b\xab\x96\x84=5\xf8@\xe0\x03\xa0\xe1\xb0\x97\x17\x17\xefgf\xe1\xc2Z2\xa4\x0b\xd0\xef\xa0\x17\x86p\xc3,\xdc\xdd\x96=\xb43G\x07\x04>\x00\x9a\xb1\x15\xc3U\x98\x8d\xbb\x13\x7f\xb6\xabY@\xe0\x03\xa0'noo/b\xd8\xfb\x94xN_\xf0\xfa\xe52\x02\xe1\x99\xa3\x03\x02\x1f\x00\x8b\x0f\x83ge\xe1\xeb\xb5@8\xcc\xf4\x0c\x82\xc0\x07@\x7f\x03a\x11\xfe\xae\x16t\xdb\xc0\x12\x98\xa5\x0b@\xa98\xc3wSK\x80\xc0\x07@?\xc3^\x98\xe5{\xd0\xf0\xcd\xaa\xc1\x07\x02\x1f\x00-\x12\x96`+\xab\xddwXl7\x9a\x09\x04>\x00:(N\xda\xd8/\xd9\xe5\xf2\xf6\xf66/.G5o\xfaL\xeb\x82\xc0\x07@;\xe4\x15\xd7\x7f4\xd4[\x84\xbeP\xd8\xf9\xb9\xe6\x02\x81\x0f\x80\x0e\x19\x0c\x06\xa3\xac\xbc\x0c\xcb\xe9\xc3\x99\xb6\xc5\xbfC\xf8\xbbL\xbc\xf93-\x0c\xcb\xa5,\x0b\xc0]\xc0\xd9\xca\xeeV\xa5\x18\x16[\x98\xa8P\xba*E\x11p\x06=n\x8b\xf0\xfc\xf3\x8a\xdd&M\xe4\xd8\xcb\xee\x8a<[\xaf\x17\x04>\x80V\x84\x9a\x9d\xec\xe3\x82\xc2\xc36\x86\x94x\x0e\xdd\xfb\xf1\xbf\xe71L]\x15\xdbY\x5cAcQB\x98++\xc3rX\xdc\xff\xd5\x84\x10|\x15{\x06\xdf+\xbbq5\xf8@\xe0\x03Xt\xc8\x0b\x81d/\xebF]\xb9\xad\x07\xff\xde\xcd\x1e\xf46\x16\xcf\xe5>\x04\x9e4\x19\x00cOgY\x19\x96\xeb\xecn\xe6\xee\xb40wR\xdcF8\x9f\xef\xa9W\x1c\x08|\x00\xcb\x0ay\x8fb\xc0\x0b!f\xbbc\x0f\x7f\xab\xe2\xfa\xef\x87\xc0\xe2y\x86 v\x16\x02`\x9cD1\xab\xaa2,yq\xfb\xaf\xcan \x9c\xcf\x17\xc3\xf5\xa4\xe1p5\xf8`\x05L\xda\x00z\x1b\xf4\xe2\x0a\x11W\xc5\xf6\xa2\x83a/\x18\xd6\xd87\xf4X\x86\x12*\xef\x15\xcf\xfbU\xb1\x8d\x8bm\xaff\x9b\x85\xfb{R\xb2\xcby\x11\xe6\xc6\x897\x17\xee{R}\xbeW^\x9d \xf0\x014\x19\xf4\x9ee\xdd\x9e@\xb05\xe3\xefm<\x08\x7f\xe1\xbc\xba<\x0e\xd5V9\xaa\xb8>O}\x00\xb1\x17pR\xe0\xbc\xf0*\x85\xe53\xa4\x0b\xf4\xd1\xc1\x1cA\xef&\xfbxrD\xd8^\xad0\xa4l6t\x1b!\xf8>+B\xdfi\x08u\x93&M\xc4\xc9\x16e\xbd\xa0\xc7u'[\x84\xfd\x8b\xdb=\x8c\xf7\x7fO\x0f\x1f\x08|\x00\xf3\x09=KE\xc8\x08\xe7\xb0\xed'\xfeJ8\xa7\xec,n\x17U\xe7\xa7-\xd9\xdb\xd9\xc7%b\x86\xf1r\x9e\x1e\xcb0\x5c\xfb$\x9e\xef\x97\xdf\x0f\xcf\xc6\xf3\x1c\x8f*B\xf0\xc1\x8c\xc7#\x8fC\xc5\xf7\xe7\xf3\xe9\xe1\x03\x81\x0f\xa0\x11G%\x81/\x84\x9d\xfb\x99\xad'm~\x12\x0fz\xd4\xbe\xff8\xe3\xd0\xec\xf0\xc16K/`\xf8\x9d\x17q\xe8;O\x08\x92Gs\x06\xe10\xb4{\x15\xefC\x0f\x1f\x08|\x00\x8d\x04\xa5\x8b\x22\xcc\x84U\x1f\x1e\x0eQ\x1e\x17\xdb\xb8\xeb5\xe0b\xfd\xbbq\xdc\x1e\xd6\x13\x1ce\xf5'\xa6\x84\xe0\xf7\x0f\x8b\xed\xf3%\xfb\x5c\xc7\xf5r\xe7y\xcc\xaf\xe2\x04\x92\xf7\x17\x5c?\x10\x98\xc2\xa4\x0d\xa0\xafB/_\x18\x8a\x0c\xe7\x90}\xb6\x08\x1a\xa3>\x16\xfc\x0d\x01\xaa\xd8B\x0f\x5c\x08~\x8f\x8b\xed\xdd,}\x89\xb3\xa0\xaa\xc7\xed\xa0\xa1\xc7y\x16\x1f\x1b \xf0\x014\x16\x84Bo\xde\xa3\xd0;\xd5\xb2\xf3\xf2\x16\xf9\x9c\xaf^\x0b\x7f\xa1\x00\xf2u\xc9\xaf\x84`\xf8\x97J\xae?or\xd8;<6\xafL\x10\xf8\x00h6\xfc\x1d\x14\xdbV\xf1\xdf/\x17\xdb\xe9\x84\xdd>Wq3#-\x09\x02\x1f\x00\xdd\x08\x7fa\xf5\x8dp\x0e\xdd}\xaf_\x18\xea\x0e\xb3\x93\xcb\xce\xdd{>i\xbd\x5c@\xe0\x03\xa0\xdd\xc1\xef\xa3^\xbf\xec\xae\xa0\xf3N\xc9\xae!\x10\xe6Z\x0c\x04>\x00\xbak\xee\xf5r\x01\x81\x0f\x80\x96\x8ae\x5c\xca\x8aR_\x9a\x5c\x01\x02\x1f\x00\xddV\x15\xe6\x0e4\x11\x08|\x00tT\x5c/w\xb7d\x97\xd3>\xd6*\x04\x04>\x80u\x09{a\xbd\xdc\xbcb7\xbd{ \xf0\x01\xd0a!\xcc\x95\xad\xbb{\xa8\x0c\x0b\x08|\x00t\xd4`0\xd8\xca\xca{\xef\xc2j\x1c&j\x80\xc0\x07\xb0\x90 rTl\x17q\xb8\x91\xc5Q\x86\x05\x04>\x80\x95\x84\xbdaq\xf1\xb4\xd8\xb6\x8b\xed*\x96\x0ba1\xed\xfc\xa4d\x97\xb0^\xeex\x01\xf7;\x16\xe6A\xe0\x03x8\x84\x18z\x9f>\x8c\xb3HY\x5c;O\x92/\x22\xecew\xb5\xfeB\x98?\x13\xe6A\xe0\x03\xd6P\x11\x00\x0eb\x18x\xdd\x8b\x18\x16h\xa6\x9dGS\xda\xf9\xdeq\xd3eX\x1e\x84\xbd{B\x1f\x08|\xc0\x1a\x86\x90\xaa\xf2 C\xad\xd4X;\x97\xf5\xee5\xbe^\xee\x84\xb0woC\xe8\x03\x81\x0fX/\x07Y\xf9\x04\x82\x91&ZJ;\x1f-\xa0\x0cKY\xa0\x13\xfa@\xe0\x03\xd6A\xecu*+\x0frl\xa5\x87F\xday\xab\xb8xV\xb2\xcbu\xd1\xce\xf9\x02\xeezXl\x97B\x1f\x08|\xc0z+\xebu\xba\xc9\xac\xf4\xd0\x94q\xc2qh\x5c,\xed\x22\xf4\x81\xc0\x07\xac\xb9Q\xc9uGj\xc1\xcd/\x96a)[/7\x94a9Y\xd4\xfd\xd7\x0c}J\xb6\x80\xc0\x07\xf4,\x88\x84\xb07mi\xaf\xd0\xbbg\xa5\x87f\x8c+\xae_x/\xaa\xd0\x07\x02\x1f\xb0\xbe\xf6J\xae\xd3\xbb\xd7L\xa8\xaeZ/\xf7y\xd1\xce\x17\xcbx,\xf1x\x8eb\x98\x9f&\x94l9q\xe4@\xe0\x03\xfa\x11D\xb6\xb2\xe9\xab=\xe8\xddk\xa6\x8d\xab\xca\xdd4^\x86%!\xf4\x85p9\xac\x08}\xbbj/\x82\xc0\x07\xf4CY\xef\xde\x89\xde\xbdF\xb4r\xbd\xdc\x18\xfa\xaa\x86\x91\xf7c\xef$ \xf0\x01=\x0d|z\xf7\xe6\x14g\xbc\xee\x97\xec\x12\xca\xb0\xac\xac\x9d\xe3Z\xbd\x87\x15\xbb}\xbdx\x1e{\x8e&\x08|@7\xc3H\x18j\x9c6k\xf4rY\xe7\x94\xf5\x5cU\x98\x1b\xad\xfa\x01\xc6\xba\x7f\xa7\x15\xbb\x8d\xe3\xf0? \xf0\x01\x1d3,\xfb\x80\xd7<s\x07\xea\xbd\xac\xbc\x0c\xcbi\x8b\x8aY\x87\xe0Y5s\xf7\xc4\xcc]\x10\xf8\x80\xee)=\x7fO\xf3\xcc\x15\xf6\xaa\xd6\xcb\x0dZsn\x5c\x8d\x99\xbb\x86\xf9A\xe0\x03:f8\xe5\xe7\x97\x0bX\xcbu\xddT\x95a9l[\x1b\xc7!\xfc\xbcb\xb7\xfdX\xb7\x11\x10\xf8\x80\xb6\x8b=P\xd3\x02\xc9\x99\x16\x9a\xabm\xb7\xb2\xf2\xde\xbb\xd6\x96\xbb\x89\x13H\xaa\xce\xe7;\xb2\xfc\x1a\x08|@7\x94}`\x0b|\xf3\xc9\xb3\xf22,\x07-/w3\xca\xca\x87v\xc3s\x1b;\xcc \xf0\x01\xed7\x14\xf8\x9a\x17\xd7\xcb-+\xc3r\x1eK\xa1\xb4\xd6\x83\xf3\xf9\xca8\xc7\x13\x04>\xa0\x03\xa6\xf5\xf0]*\xb6<\x97\xaa\xa1\xda\xbc\x0bO\xa2x\x0d\x84@7ih7\xcc\xe4\xfdb,\xe5\x02\x08|@\xcbM+\xafq\xa5if\x13'3l\x97\xecr\xdc\xa22,)\xc2y\x88\x0f\x87v\xc3D\x93\x1d\xf5\x19A\xe0\x03\xbacZ}8\x1f\xe6\xb3\x85\xbd\xaa2,K_/w^q\x16qxNz\xf5@\xe0\x03:\x1aN2\x81\xafQ\xa17\xacl\xa2\xc6Q\x17K\xdd\x84\x90\xa7W\x0f\x16\xeb\x0dM\x00,\xd0\xdb\xc5\xb6\x15\xb7a\xbc\x0ceZ\x9c\xbfW?@\x87\xb6{V\xb2\xcb\xb5\xde1@\xe0\x03\x96*N\xca8\xd3\x12\x8d\x19W\x5c\x7f\xa0\x89\x80i\x0c\xe9\x02\xb4\x5c,\xc3R\xb6^\xeey\x9c\xf1\x0a \xf0\x01t\xd4\xb8\xe2z\xbd{\x80\xc0\x07\xd0U\x83\xc1\xa0j\xbd\xdcc\x93\x1d\x00\x81\x0f\xa0\xbba/\xcct\xceKv\x09eX\xf4\xee\x01\x02\x1f@\x87\x85\xb0WV\x86%\xb7b\x09 \xf0\x01t\xd4`0\x08\xcb\xd2=-\xd9%\x94a9\xd2R\x80\xc0\x07\xd0]Uan\xa4\x89\x00\x81\x0f\xa0\xa3\x06\x83\xc1^V]\x86\xe5LK\x01\x02\x1f@w\xe9\xdd\x03\x04>\x80\xbe\x1a\x0c\x06yV^\x86\xe5\xb0\x8b\xeb\xe5\x02\x02\x1f\x00\xd9\xf7\xd7\xcb-+\xb3\x12\xca\xb0\x98\xa8\x01\x08|\x00\x1d\x96g\xe5eX\x0e\x94a\x01\x04>\x80\x8e\x8a\xeb\xe5\xee\x97\xecrY\x84\xbd\xb1\x96\x02\x04>\x80\xee\xca+\xae\xb7\xa2\x06 \xf0\x01t\xd5`0\x18e\xe5eX\x8e\x95a\x01\x04>\x80\xee\x86\xbd\xb0^n\xd9D\x8c0Q#\xd7R\x80\xc0\x07\xd0]a\xa8\xb6l\xa2\xc6\x912,\x80\xc0\x07\xd0Q\xb1\x0c\xcb\xb3\x92]\xae3eX\xaa\xdap'\xf6\x92\x02%\xde\xd0\x04\x00+S\x15\xe6\x94a\xf9d\xb8\x1b\x16\x17;q\x0ba\xf9\xfe\xbc\xc7\xb7\x8b\xedL\x0b\x81\xc0\x07\xd0\xc6\xf0\xf2\xa4d\x97\xb0^\xee\x89\x96\xfa\x84\xf7\xa7\xfc|G\xe0\x83r\x86t\x01Vc\x5cq\xbd2,\x13B\xf0\x94\x9f\x1b\xd2\x05\x81\x0f\xa0]\x06\x83A\x08se\xeb\xe5\x862,\x17Z\xeaS\xae\xa6\xfc|\xa8i@\xe0\x03hS\xd8\x0b\xbdQy\xc9.\xa1\x0c\x8b\xde\xbdz\x81\x0f\xa8\xe0\x1c>`\x19!g\x98\xdd\x9dd\x1f\xb6\xfb\x7f\x87\x1e\xae/\xaf\xe1yj!\xec\x95\x95a\xc9M\xd4\xa8\x1d\xf8v5\x0d\x08|\xc0\xea\x9dL\x099\xc3x\xdd\xba\x04\xdf0\xb9\xe0i\xc9.\xd7E\xd8S\x86\xa5~\xe0\x03*\x18\xd2\x05\x96\x15\xf8&\xd9[\xb3v\xa8\x0as#/\x15@\xe0\x03\xfa\x16\xf86c\xafW\xef\x15\xcf3\x84\xdb\xb2\xa1\xc7s\xeb\xe5V\xba*i\xdf\xa1\xe6\x01\x81\x0fX\xa1x\x9e\xde\xcd\x94\xabGk\xd2\x0cz\xf7\xe6\x7f\x1d]i\x05\x10\xf8\x80v\x1b\xafk\xd0\x19\x0c\x06yV^\x86\xe5\xb90\x03\x08|@\x1fL\xeb\xe1\xda(\x02QoC_,\xc3RVf%\xf4|\xe6^\x1e\x80\xc0\x07t^\xec\xc1\x9a\xb6RB\x9f\x03O\x08\xbaeeX\xac\x97\x0b\x08|@\xef\xc2\xcf$\x9b}\xec\xe5\x8b\x13\x09\xf6Kv\xb9,\xc2\xde\xd8\xcb\xa2\xd6\x17\x87\xc1\x94\xedL\xeb\x80\xc0\x07\xb4\xe3\xc3:L\xde\xb8\x9eru\xde\xc3\xa7\x5c\xf5\x9c\xac\xa8\x01\x08|@/M\x0bA\xbd\xea\xe5\x8b\xcf\xa5\xac\x0c\xcb\xa9^)@\xe0\x03z)\x0eaN\xed\xe5\x8b\x93\x1c\xba\x1e\xf6\xac\x97\x0b\x08|\xc0\xda\x9b\x16\x866{\x12\x84\x0e\xb2\xf22,G\xca\xb0\x00\x02\x1f\xd0k\x15\xbd|\xcf\x06\x83\xc1VW\x9f[|\xec\xcfJv\x09\xcf\xdbz\xb9\x80\xc0\x07\xac\x85\xb2\x9e\xbcq\x87\x9fWU\x98\xcb\x95a\x01\x04>`-\xc4\x19\xbb\xd3\xea\xf2\xed\x0e\x06\x83\xce\x0d\xed\xc62,OJv9W\x86\x05\x10\xf8\x80u3*\xb9.\xef\xe0\xd0nU\xef\x9e\x89\x1a\x80\xc0\x07\xac\x978q\xe1p\xca\xd5au\x8aqW\x9eK\xec\x91\xdc.\xd9\xe5\xb8x\xbe\x17\x8e: \xf0\x01\xeb\x18\xfa\xf2\xe2\xe2r\xca\xd5\x9d\x18\xdaU\x86\x05\x10\xf8\x00\xaa\xed\xc5P4\xc9\xd7\x8b@\xb5\xd3\xf2\xc7\x1f\xc2^\xd9z\xb9G&j\x00\x02\x1f\xb0\xd6\xe2\xd0\xee\xa8d\x97q[\x1f{<\xcf\xf0i\xc9.\xd7\xb1\x17\x13@\xe0\x03\xd6>\xf4\x85Y\xbb\xcf\xa7\x5c\xdd\xe6s\xdf\xaa\xc2\xe8\xc8\xd1\x05\x04>\x80\x8fC_8\xcf\xed\xf8\xc1\x8f\xc20\xef;\xc5\xcf[\x19\x9a\x06\x83A\x18\x8a.[/\xf7\xdcz\xb9@\x1b\xbc\xa1\x09\x80\x96\x85\xbeQ\x9c\x04\xb1Ul\xa3\x96\xcfl\xad*\xc32rD\x01\x81\x0f`r\xe8\xdbk\xfbc,Bi\x9e\x95\xaf\x97\xfb\xdcz\xb9@[\x18\xd2\x05\xa8\x1f\xf6B\x0fdY\x99\x950\x14\x9dk)@\xe0\x03\xe8\xae0\x94[V\x86\xe5@\x19\x16@\xe0\x03\xe8\xa8\xb8^\xee~\xc9.\x97\xd6\xcb\x05\x04>\x80n\xcb+\xae\xb7\xa2\x06 \xf0\x01t\xd5`0\x18e\xe5eXN\x95a\x01\x04>\x80\xee\x86\xbd\xaa\xf5r\x03\xbd{\x80\xc0\x07\xd0a!\xcc\x95\x95a9T\x86\x05\x10\xf8\x00:*\xae\x97[\xd6{w\x9dU\x17a\x06\x10\xf8\x00Z\xac\xaa\x0cK\xae\x0c\x0b \xf0\x01tT,\xc3\xf2\xa4d\x97seX\x00\x81\x0f\xa0\xdb\xaa\x86jsM\x04\x08|\x00\x1d\x15\xcb\xb0l\x97\xecr\xac\x0c\x0b \xf0\x01\xb4/\xc4\x8d\x8b\xed\xa2\xd8v*\xf6\x0beX\xcaz\xf7\xc2z\xb9\xca\xb0\x00\x02\x1f@\xcb\xc2\xde(\xbb[\x16-\xf4\xda\x9d\x15\xff/\x0blyV>Q\xe3\xc8D\x0d@\xe0\x03hW\xd8\x0b=z/\x1e\xfc(\x84\xb9\xaf\x17??\x8beW\x1e\xee\x1b\xfe\xff\xb4\xe4\xe6\xae\x8b\xb0\x97kU\xa0+\xde\xd0\x04\xc0\x1a\x84\xbd0<{6\xe5\xea\xb0T\xda\xcbb\x9f\xc3\xec\xe3^\xbbq\xc5M\x1e\xcc\xf88\xf6\xe2m_\x15\xdb\xeb\xbd\x83\x17)?s\xce \xf0\x01L\x16B\xd2F\xc5>\xcfB\x90+B\xd9?\xc9\xca\xd7\xcb\x0deXNfy\x10\xe1\xf7b\x99\x97I\x8fg714N|L\xaf\xfd\xffU\x0c\x8bU?\xbb\xb2:\x08\x08|\x00}\x11\x02\xdaVB\xe8\x0b\xd7\xff\x83\x8a}F\xf3<\x90\x22`\xddO\x18\x09\x8fi\xbb\xa1\xe77),>\x991@\x86UC^\x0f\x81I\xbd\x8f\xe1g\xcek\x04\x81\x0f`%\xc2\xf9vavnv7\xeb\xf6\xc9\x1c7\xf5\xbc\x89\x1e\xb1p\x1b\xb1\xa7\xef$K\xec\xd9[\xa2\xcd\xec\xd3k\x06'?\xc6\x09\x01\xf2|\xc2ng1\x1c\x9exu\x82\xc0\x07\xd0d\xe8\x0bAm/\x06\xad\xf1\x84PS\xe5\xff\x15\xdb\x7fn\xf0\xf1\x84\x9e\xb0a\x0c\xa2\xfb=n\xfaIa\xb1\xaa\xe4\x0d\xd00\xb3t\x81u\x0b~g\xc5\xb6U\xfc3L\xd2\xb8\xa9\xf1\xab?Xl\xff8\xd6\xf0{\xd4\xe0\xe3\x19\x15\x17\xef\xae\xd1!\x08=~CC\xbf \xf0\x01,#\xf8\xe5\xd9\xddy}\xc75\x7f\xf5\xbe\x86\xdfN\x83\x8f%\xf4v\xbdS3\x80vQX\x99D\xd8\x03\x81\x0f`\xa9\xa1\xefU\xeca\xfb\x0f3\x84\xbe\x0f\x8b\xd0\x977\xd5\xdbW<\x8eqq1\xecq\xe8{\x1e\xdb\x1a\x10\xf8\x00\x96+\xae\xbe\xf1Wf\xfc\xf5P\xca%L\xc0\xc8\x1b\x0a}a\xe6k\xe89\xbc\xecY3\xbfS<7\xcb\xd0\x81\xc0\x07\xb0\x92\xb0\x17z\xe7\xe6\x0dk\xa1\x94Kca&N.\x19f\x93g\xb7vM\xe8\xad\xfcr\xec\xbd\x04\x04>\x80\x95\x08A\xadl\xb6\xeeivW\x97\xaeJ\xa3\xe5E\xe2Ps\x08}\xc7\x1dn\xdb\x10\xf6\x86J\xaf\x80\xc0\x07\xb02q\xbd\xdc\xb2\x9e\xb9\x10\xf4FqF\xef;\x15\xc1o!%F:<\x83\xf72\x86\xbd\x0b\xaf4\x10\xf8\x00V)\x84\xb4\xb2\x957\xf2\xfb\xd9\xa4aH2\x06\xbf\xb7\xb3O\xf7\xba].2\xd8tp\x06\xaf\xb0\x07\x02\x1f\xc0\xea\xc5\xe2\xcbe+n\x9cO:\xef,\xd6\xf0\x1b\x15\xff|\x9c}\x5c\xc7o\xe1\x05\x84;4\x83\xf74Sc\x0f\x04>\x80\x96\xa8\x0aiyE\x00\xbb\x0au\xfc\x8a\xed\xd1\xb2&$t`\x06o\xa8\xb1\xb7'\xec\x81\xc0\x07\xb0r\xb1\x0c\xcbvEp9k\xe3co\xf1\x0c\xdew\xd5\xd8\x83v\xb3\x96.\xb0Na\xafj\x0d\xd70dz\xd0\xc2\xc7\x1d\x1e\xf3}\xcfY\xe8\xe9\xcb\x8b\xed\xab\xc5\xf6\xb7[\xf0\xf0\xdeQv\x05\x04>\x806\x09a\xael\xa2\xc6QK\x87$\xc3P\xeen\xcb\x1eS\x08\xc7{m\xed\x0d\x05\x04>`\x0d\xc52,\xcfJv\xb9\x8e\xeb\xeb\xb6\xd1N\xcb\x1e\xcf}\x8d=3q\xa1#\x9c\xc3\x07\xac\x8bq\xc5\xf5m^\xfak\xa3E\x8f%L\x1a\xd9\x11\xf6\xa0[\xf4\xf0\x01\xbd\x17\xcb\xb0\x94\x0d\x89\x9e\xb7uE\x88\xf8\xd8\xdb\x14\xf6\x94]\x81\x0e\xd2\xc3\x07\xac\x83q\xc5\xf5m\xee\xdd{\xd4\x92\xc7q,\xecAw\xe9\xe1\x03zm0\x18T\xad\x97\xfb\xbc\xe5\xc3\x93!`\x9d\xc6\xe0\x17\xb6\xedU\x84=eW@\xe0\x03hk\xd8\x0b\x01)/\xd9\xe5\xa6\xe2\xfa\x95\x8b\xb3`\xcf*\x9e\xe3\xf7\x16\xf8\x10\x94]\x81\x1e0\xa4\x0b\xf4Y\xf2z\xb9\x1d\xb6\xc8\x19\xbc\xc2\x1e\xf4\x84\x1e>\xa0\x97\x06\x83A\x08B\xfb%\xbb\x842,G=x\xaa\x8b\x08|\xca\xae@\xcf\xe8\xe1\x03\xfa\xaa*\xcc\x8dz\xf2<\x9b\x0e|\xc2\x1e\x08|\x00\xed7\x18\x0c\xf6\xb2\xf22,\xa7=Z!\xa2\xc9\xc0\x17\xca\xael\x09{ \xf0\x01\xb4=\xecU\xad\x97\x1b\x1c\xf4\xe8)75kW\x8d=\x10\xf8\x00:\xa3\xaa\x0c\xcba\x11j\xaez\x12n\x87\x0d\xddT(\xbb\xb2#\xec\x81\xc0\x07\xd0\x85\x00\xb4\x95\x95\xf7\xde\x85\xf3\xd3\x8ez\xf4\x94\x9b\x18\xce}\xae\xc6\x1e\xf4\x9fY\xba@\x9f\xe4Yy\x19\x96\x83\x9e\xf5bm\xcd\xf9\xfb\xca\xae\xc0\x9a\xd0\xc3\x07\xf4B\x1c\xde,+\xc3r\xde\xc3p3k\x0f\xdf\x8d\xb0\x07\xebE\x0f\x1f\xd0\x17UC\xb5y\x0f\x9f\xf3\xee\x8caO\xd9\x15X3z\xf8\x80\xce\x1b\x0c\x06\xa3\xac|\xb6\xeaq\x8f\xca\xb0\xdc?\xe7Yz\xf7\xae\x85=XOz\xf8\x80\xae\x07\x9f\xaa2,\xad_/wFu\x03\x9f\xb2+\xb0\xc6\xf4\xf0\x01]\x17f\xe5\x96M\xd48\xeaK\x19\x96\xd7l\xd5\xd8\xf7T\xd8\x83\xf5\xa6\x87\x0f\xe8\xacX\x86\xe5Y\xc9.a\xbd\xdc\xbc\xa7O\x7f\x98\xb8\xdf\xb1\xb2+\x80\x1e>\xa0\xcb\xc6\x15\xd7\x1f\xf4\xf8\xb9\xa7\x0c\xe9\x1e\x0a{@\xa0\x87\x0f\xe8\xa4X\x86\xa5l\x96j(\xc3r\xd2\xd3\xe7\xbe\x95\x95\x0fc\x07\xca\xae\x00\x02\x1f\xd0yUaf]{\xf7\xc2$\x95Q_\xc3. \xf0\x01kb0\x18T\xad\x97\xfb\xbc\xe7\xa5GvJ\xc2\x9e\xb2+\xc0\xa78\x87\x0f\xe8Z\xd8\x0beX\xf2\x92]\xfaZ\x86\xe5\xa1\xe1\x84\x9f]\x0a{\xc04z\xf8\x80\xae\x09a\xae\xec\xfc\xb5|\x0d\xca\x8flM\x09{\xca\xae\x00\x13\xe9\xe1\x03:#\xae.\xf1\xb4d\x97P\x86\xe5\xa8\xe7m\x10z8\x1f\x0eg\x1f\x0b{@\x15=|@\x97T\x85\xb9\xd1\x1a\xb4\xc1\xc3\xf3\xf7\xd4\xd8\x03\x92\xe8\xe1\x03:a0\x18\xece\xd5eX\xce\xd6\xa0)\x86\xf1\xf2]a\x0fH\xa5\x87\x0f\xe8\x0a\xbd{wB\x0f\x9f\x1a{\x80\xc0\x07\xf4\xcb`0\xc8\xb3\xf22,\x87=]/w\x92\x835z\xae@C\x0c\xe9\x02m\x0f{[Yy\x11\xe5P\x86\xe5h]\xdaC\xd8\x03\x04>\xa0\x8f\xf2\xac\xbc\x0c\xcb\x81\x19\xaa\x00\x02\x1f\xd0Qq\xbd\xdc\xfd\x92].\x9d\xcb\x06 \xf0\x01\xdd\x96W\x5c\x7f\xa0\x89\x00\x04>\xa0\xa3\x06\x83\xc1(+/\xc3r\xbc&eX\x00\x04>\xa0\x97a/\xac&Q6\x11c\x1d\xd6\xcb\x05\x10\xf8\x80^\x0bC\xb5e\x135\x8e\xccV\x05\x10\xf8\x80\x8e\x8aeX\x9e\x95\xecr\x9d\xadQ\x19\x16\x00\x81\x0f\xe8\xa3\xaa0\xa7\x0c\x0b\x80\xc0\x07tU,\xc3\xf2\xa4d\x97\xb0^\xee\x89\x96\x02\x10\xf8\x80\xee\x1aW\x5c\xaf\x0c\x0b\x80\xc0\x07t\xd5`0\x08a\xael\xbd\xdcP\x86\xe5BK\x01\x08|@7\xc3^(\xc3\x92\x97\xec\x12\xca\xb0\xe8\xdd\x03\x10\xf8\x80\x0e\x0ba\xaf\xac\x0cKn\xa2\x06\x80\xc0\x07t\xd4`0\xd8).\x9e\x96\xecr]\x84=eX\x00\x04>\xa0\xc3\xaa\xc2\xdcH\x13\x01\x08|@G\x0d\x06\x83\xbd\xac|\xbd\xdcs\xeb\xe5\x02\x08|@\xb7\xe9\xdd\x03\x10\xf8\x80\xbe\x1a\x0c\x06yV^\x86\xe5\xd0z\xb9\x00\x02\x1f\xd0\xdd\xb0\xb7\x95\x95\x97Y\x09eXL\xd4\x00\x10\xf8\x80\x0e\xcb\xb3\xf22,\xd6\xcb\x05\x10\xf8\x80\xae\x8a\xeb\xe5\xee\x97\xecrY\x84\xbd\xb1\x96\x02\x10\xf8\x80\xee\xca+\xae\xb7\xa2\x06\x80\xc0\x07t\xd5`0\x18e\xe5eXN\x95a\x01\x10\xf8\x80\xee\x86=\xeb\xe5\x02\x08|@\xcf\x850WV\x86\xe5H\x19\x16\x00\x81\x0f\xe8\xa8X\x86\xe5Y\xc9.\xd7\x992,\x00\x02\x1f\xd0iUa.W\x86\x05@\xe0\x03:*\x96ayR\xb2\xcb\xb92,\x00\x02\x1f\xd0mU\xbd{\xbd\x9e\xa8Q\x04\xde\xbdb\xbbM\xdc\x8eV\xf88\x0fj<\xce\x91\x975\x1d\xfd{\x1c\x15\xdb\xc5k\xaf\xe7\x93\xf8\xc5T\xe0\x03\x985D\x14\x17\xdb%\xbb\x1c\xdf\xde\xde^\xf4\xb9\x0d\x8a\xe7wR\x5c\x9c'\xee\xfe4\x9e\xef\xb8\xec\xe3T5\x83\xfa!=\xb2t\xf5\xfd(\xbcn_LxO\x0a#\x10\xef\xc7\xf5\xbd\x05>\x80\x86C\xc4:\x95a\x19\xd5\xd8w\x15a*\x1c\xa7\x8d\x05<\x17h\xcb\xfbQx\x8d\xefW\xec\xf6\xac\xcf=}\x02\x1f\xb0\xaa\x10q\xb4.\x135b\xb9\x99\xc3\xc4\xddw\x97\xf9\xa1S\xdc\xd7Nq\xf14q\xf7C\xa5s\xe8\xa8\xd4/\x97\xb9\xc0\x07\x90\x1e\x22\xb6*B\xc4u\x11\x1c\xf25k\x96p~\xdeu\x8d}\x97\xf9\xb8R\x5c\xae\xe11\xa3\x1f\xefG\xc3,\xbd\x07{W\xe0\x03H7\xae\xb8~\xb4n\x0d\x12{3S{\x19\xb6\x9711\x22L(\xa9\xf1\x01g\x15\x14\x10\xf8\x00\x92C\xc4\xf9\xba\xae\x97[s\x02\xc7Q<\x0frQ\xc7)\xdcvj\xef\xdesk\x1c\x83\xc0\x07\xf0\x89\xa0Rq\xfdh\xcd\xdb'\xf5\xf9\x87!\xa8E\xf6\xaaU-uw/L\xae\xc9\xbd\xac\xe9\xb0\xab\x1a\xfb\xde\x08|\x00\x15\xe2L\xb8\xb2\x10\xf1|\xddO\xfa\xaf9\x81\xe3\xd9\x22\xca\xb4\xc4\xdbL\x0d\x93#\xab\xa0\xd0\x83\xbf\xb9\xe4\x9eu\x81\x0f\xa0<D<\xaa\x08\x11z\x8a>\xf9\xa1\x92:\x81#_\xd0\xfd\xa7\x9c\xc4~\x1e\x87\xa1\xa1\xebFYu\xef\xdd\xa5\xc0\x070\x7f\x888\xd0St'\xb6Cj\x90\xdbo\xb2LK\xc2Rw\x0f\x03\xfa\xc8\xd1\xa2'\x7fsW\xc5Ex\xedO\xeb\xe9;\x0e\xd7\xf7\xf9=\xea\x0d/\x03\xa0\xa1\x10QV\xd4\xf4\xd2\xea\x0c\x9f\xfa\x00\x1a\xc7\x99\xb8)\xb3d\xf3\xf8a\xd5\x84\xd4\xe3p\xa4\xe6\x1e=\xfb\x9b\x0b\xab\xfa\x0cc\xed\xc9\xb0m\x15\xdbY\xb1]\xac\xc3\x97Q\x81\x0fhB^q\xbd\x92\x1e\xd3\xdb\xe5\xc3\x84\xfdB1\xe6\xd1\xbc\xa19.u\x972QC\xcd=\xfa\x1e\xfc.\xd6\xedy\x1b\xd2\x05\xe6\x92\xd0Ku\xaa\xa4G\xe9\x07\xcf\xf3\xd4P=O\x99\x96\x9a\xeb\xe5\x0a\xe8 \xf0\x01$\x87\x88uZ/wVy\x96V\x0abs\xce\xb6L\x9d\xa8\xa1\xe6\x1e\x08|\x00\x9fP5D\xe8<\xb0\x0a5W\xe08\x98\xa5\x97/\x9e\xb3\xb4\x9f\xb0\xab\x99\xd4 \xf0\x01|\x22Dl\x15\x17\xcfJv\x09eG\x8e\xb4TR\xe8\x1bgiu\xc26fl\xd3\xd4\xdfQs\x0f\x04>\x80Z!\x22\x17\x1ejI\xed\xe5\xdb\x8f=v\xa9\xc1|\x94\xa5\xcd\x04Vs\x0f\x04>\x80O\x84\x88aV^\xcb\xed\x5c\x19\x96zjN\xe08J<N\xa9\x135\xd4\xdc\x83\xd5\xbd\x9fn\x85\xf7\xd4&\xebmN\xa2,\x0b0\x8b\xca\xde=M4\x93<\x06\xaf\xaa\xc9\x15\xa1L\xcb^B\x8f\x5cj\x19\x16\xe7Z\xae\xee\xc3>\x84\xf2\xf0A\x7f_\x1b.\xfc\x7f\xb7\x22\x9c_<\xd8\xce\x1c\xbbN\x1d\xeb\xbdx\xbc\xb7&\x1d\xe7b\x9fi\xc7\xf9d\xde\x11\x13\x81\x0f\xa8\xfb\xa6\x15\x02\xc9v\xc9.\xc7fy\xce&\xbc\xa1\xc7Zy/\x12C\xf7I\xc5\x87K\xca0\xb1\x9a{\xcb\xff\x1b\xda\x8a\xc1~\xaf\xe2oi\x92\x8d\x18\x14v\x1f\xdc^8\xffs\xdcD\xafz\xecez?q\xf7//\xea4\x80\xf8\xfa\xbd\xca\xd2f\x96W>\x8e\xd4\xe7U\xdc\xce`\x01\xcfe\x18\x8f\xf7\xfe\x1c\xc7\xf9Eq;\xf7K\xbf\xcd\x14\xfe\x0c\xe9\x02u\xdf\x84\x8f*z\x1f\x94a\x99/\xf4\x85\x0f\xed\x94\x09\x1c\x9b1\x1c\x96\x05\xc2\x94\x0fK\xc7k\x89_\x96\x8a-\xf4\xd6\xbc\xcc\xee&<m7t\xd3\xbb1\x10\x5c\xcd;,\x18\xbf\xac\xa5\x9eZ0\x9e\xa76d\xd5mg\xe9e\x84NZz\xbc\xc3P\xedY\x0c\x9a\xfb\x0d\xdc\xe4v\xfc2\x18\x8es\xed\xba\x9c\x02\x1fPG^\xf1&|d\xa2Fc\xed\x9c\xb4\xdf\xa47\xfd\xd8\x83\x94\xf2\x01\xa3\xe6\xde\xf2\x82\xdeU\xfc\xb0\xde^\xe0]\x85\xe1\xfb\xf7\x8b\xfb\x9akv|\xf1\x9a\x08_\x02.\x13v\x0d\xef\x05'\x0bh\xaf\xd0\xf3\x99\xb2\xde\xf3e\xd6\xd2\xd3G\xe2H\xc8E\x966a\xaa\xae\x8d\xf8\x85a\xa7\xce/\x09|@\xf2\xb7\xd5\xe2\xe2i\xc9.\xd7\x86\x06\x9b\x11C\xd8q\xe2\x1b\xff\xc1\x8c\x81Q\xcd\xbd\xe5\xfc\xdd\x1c\xc5\xa0\xb7\xb9\xc4\xbb}\x1az\x96\xe6\xec}\x1b%\xee\xb7[\xd1\xd3\x5c\xb7\xbd\xc2c\x1e\xa7>\xc66~\xc1|p\xcc7\x16x77u\xbf\xac\x09|@\xaa\xaa7aC\x83\xcd:\xc8\xd2V\xe08\x88a\xfc\xfe\xc3f\x98\xa5\xf5\xee\xa9\xb9\xb7\x1c\xabZ\xb3u\xb7Fp\x9a\xf4\xa5#<\xeew\x13w\xcf\x1f\xbe\x06\x1bx\x9fI\x09J\xef\xc6\xc7\xd8\xb6\xb0\x97W|1nJ\xed\x9eU\x81\x0fHy\x13\x1bf\xe5C\x13j\xb85,\x86\xb1<a\xd7\x8d\xd7\xf6K\xf9\x9dS\xc7ki\xe6i\xe7\xcb9\xef\xfbI\x0c \xb3\xbe\x06COUjA\xf0\xf1\xbc\x0dUc(\xf7<>\xb6\xb6\xbdO\x86\xc7\xff\xac\xad\xaf+\x81\x0fH\xfd\xd6]f\xa4\x89\x16\x12\xfa\x8e\x12?\xf4\xf7\xefkye\xd5\xe7\x0c\x99X\xb3\xfc\xe0~\x5cq<N\xb3\xbb\xde\xb4\xb7\x8b\xed\xb3a\xa6h\xdcv\xee\xff]\xfc\xfcq\xb1}9\xde\xd6M\x8d\x87\xf0\xacN\xa1\xee)\x7f\xdb)\xf77\xd7\xd0n\x8d\xa1\xdcV\xd6\x8cL\x98\xd0\xf6\xa9\xd0z\x7f\xcc\x1f\x1c\xef\xfb\xe3\xfcv\xbcn\xda\xb1\xbe\x99\xe5\x0b\x9b\xb2,@\xd5\x1bY\xe8!(;\xff\xe8\xb9:`\x0b\x15>DS\xcad\x84\xe3\x94r\xceV\xeex-]\xf8p\xde\x7f-\xb4\x84\x9f\x1d\xa5\x0eK\xc6c\x16\xb6\x93\x07%wR{\x93B\x10\x19\xce\x18X\xaf\xe2\x04\x84\xf7R^[\xc5\xbe'3\xbe\xbeB\xd8K\x19\xca\x1d\xb5\xf4\xf5\x9bZ\xf32|\x81;\x98v\xfe\xdd\x83\xe3|\xf6\xe0=8\xf4\x1c\xee=x\x0d\x8dgy\x80z\xf8\x80\xaao\xad\x07\x15\xdf\xb6s-\xb585&p\x84\x0f\x83\xaa\xe1\xb0\xcb6\x0e\x85\xad\xc11<\x89\x7f+a;,\xb6\xad\xe2g\xa3Y\xcfA\x0b\xbd\x86q\x82\xd4\xdbYz\xef\xdb\xce\x9c\x8f\xff4a\xd7\x99\x86vk\x0c\xe5\x1e\xb7\xf8T\x84Qb\xd8\x1b\xd6\x9dl\x11\x9esx\xbddw\xbd\x7f\xa7\x02\x1f\xb0\x08U\xb5\xdc\xac\x97\xbb\xbc\xde\x83\x9b\x06ng\xa4)W\x1a\x08B\xd0k\xeco&\x06\x87Q\x8d\xd7\xd0\xbc\x8f\xbf\xf1\xa1\xdd\x1aC\xb9\xd7YKOE\x88\xa7RT\xf5\xee\xdd\xc4\xb0\xf7j\x8e\xe3}Ul{\xb3~Q\x10\xf8\x80iob\xa1G\xa0l\xb6\xa7\xde\xa2%\xa91\x81\xa3\xcc\xf36\xcej\x5c\xa3cx\xb2\x88/G\xb1\xc7+\xa5\x07x\xaf\x81\xd7`\xeam\xd4\x99\xb5;\xce\xd2\x86r\xf7Z\xfc\xe5r\x98\xf2<W\xfd\xf8\x05>`\x9a\xaa0\xe7\xc4\xff\xe5\x06\x86\xd4\x09\x1c\x93\x5cg\x86\xde\xfb,\xe5\xd8n,q\x15\x8e\xa4\xa1\xdd\xf8xR\x86r\x0f[\xfee%\xa5]W>\x14-\xf0\x01\x93\xde\x88GY\xf9l\xcfS+4\xac\xc4\xac!\xfb\xc0\xd0{\xaf\xbf\x0c\x5cei\xe7\xd8\x0d\x1b\xb8\xbb<\xf1\x8bG\xe9\xd0n\x8d\xa1\xdc\xf3\x9e\x14t_y`\x15\xf8\x80Io\xc4Uo\xb0z\xf7V\xf3\xc1\x1eB\xf6q\xcd_Sso=\xa4|\x01\xdbi\xe05\x18\xbe8\x8cR\xc3a\xc9j\x1f)\xb3Z[Y\x82eF;\xab~\x00\x02\x1fP\xf7\x8d\xf8PY\x8f\x95\xcak\xee\xefX\xad\x87\x94\x1e\xa4GM\xdcQ\x1c^=L\xd8u\xe2\xd0n<?8\xa5\xa4\xcc\xa8G\xef5[\x02\x1f\xd0\x1a\xf1D\xeb\xb2\xde\xbbp.\x98\x89\x1a\xab\x0f\xe4u<\x9d\xf7\xdc-:!e\xc8~\xb7\xa9;\x8b\xc3\xac)\xabp<\x99\xf0\xfaKy\x0f9\xeeY\xcft\xbe\xea\x07 \xf0\x01\xaf\xbf\x11+\xc3\xd2\xde@\x1ezF\x9e\xcex\x5c\xe9\xb1\x15Mj\x18ei\xa5Z\xc6\x0f^\xc3\xa3\x84\xe0\xd9\xda\x12,S\xa4\xb4\xfdf\xf1\xdc\xc7\x02\x1f\xd0\x860\x11\xbe\x85\x97\xcd\x98\x0b'O\x8f\xb5\xd4\xca\x03\xf9,\xb6\xe7Y\xf6\x0a\xa6\x84\xcc\xab\xc4`\x16\xc2N^c\xf9\xb1\xbd\x8e}\xb1<K\xdc/,\x818.9\xafq\xa1,\xad\x06\xa4\x86\x89\x5c\x13\xad4\x90\x8f\xb2\xf9\x86\xe4\xc2\x07\xee\xb8-\x1f\xa4\xc5c9[\xc2\xdd\x5c\x14\xcf\xf7\xa0\xa3\xc7;\x84\x82\x9d\xb8\x85\x7f\x0f\xe3U[Y\xda\x12^\xcb\x0a}\xe3\xc4\x952\x0e\xe2c\xaf\xaa\xb9w\xd8\xb5z\x91a\xe8\xb9h\x83\xeb\xc4\xe3\x12j\x9b\x0eC\x00^\xf6\x17h\x81\x0f\xb8\x0f\x13\xdb%\xbb\x1c+\xc3\xb2\xf2\x0f\xffy\x87e7\xe2m\x8cZ\xf2\xb4v\x1d\xd9O\x1d\xe3\xbd\x18\xec\x86m\x0au\x09\xc2k\xea\xaa\x22\xccmd\xe5\x85\xdc\x83.\x97`\x09\x8f\xfbE\xe2\xbe\xe1\xd8\xbe\x88\xeb\x94\x87\xedd\x19_\xc4\x0c\xe9\x82\x0f\x9a\xaa0q\x93)\xc3\xd2\x86\x0f\x93\x8d\x06ng\xdf\x04\x8e\xf6}\xd9*\xb609\xe1{10\xecw,\xec\xd5]\x85\xa3\xec}f\xd4\xd5\xe3\x18{\xeb\xcek\xfe\xdaf<\xe6Wq\xa8w\xa1\xa5[\x04>\xa0*L\x1c\x99\xa8\xb1\xd2@\x902Q#|X\xbe\x9bx\x93c\xad\xba\xfa/Y\xf1\x9c\xb6W\xf1\x03\xffI\xd7\x9fS\x8dU8\xa69\xe8A\x09\x96\x10zgY\x0d\xe7\xbe\xf7\xf3\xc3\xe25q\x11\xbf\x044~\x9e\x9f\xc0\x07\xeb\xfd\xc1\xb3U\x11&\xae{R\xe5\xbe\xcbR\x86r\x8fj,\xbd\xb6\x19\x87\x92X\xcd\xdf\x5c\xe8-\x0f\xc1\xe6Y\xd6L\xafm\xdb\xbe<\xce\x12xN\xfb0!,~1\x1ef\xb3/\x81\x18lg\x1f\xf7\xfa\xe5M\x06?\x81\x0f\xd6[\xd5\x9b\xac\xa1\xdc\xd5\x86\x83QV}\xae\xdb\xcd\x83P\x98z\xbc\x0ej,nO3\xc7\xf2Q\x9c\xa8\xf2\xf5\x1e\x06\xbd\x87\x81gT\xf3\xd7\xae\xb3\xfe\xac\xa6\xf10\xf4\x9d\xcfyS\x1b\xf1K\xc1US_\xd0\x04>X\xdf\x0f\xa0aE\x988\xb7$\xd7j\x03B\x96\xde\xbb\xf7*~\xd8\x84@\x91\xb2\xa6\xeaF\xa66\xdf2\x8fe\x18\x96\xbf\xca\xe6\x9b\xa8r\x19CD\x186=|\xb0\xbd\xfd`\xbb\x5c\xf5s\x8d3l\xeb,\xffw\xd1\xb7SF\xc2\xf3)\xb6a\x96\xb6\x1aIR\xf0\x8bC\xbds\x9d\xe3g\x96.\xac\xafq\xc5\xf5z\xf7V\xab\xaa\x08vp3!\xb8\x85\xe3\x96rN\xd8G+ \xacj\xf6uq\xbf\x835\x0a{gY\xfd^\xbd\x10\xeeNb :K\xbc\xafW-y\xbe\xfb5~%\xbc\x0e\xf7\xfa\xf8\xe52\x9c\x0e\x13\x8b-\x1fe\xf3\x9f\xa7\x19\x86z\xcf\xc2)\x01\xb3\x0e\x7f\xeb\xe1\x83\xf5\xecq\xa8Z/\xf7y\xd7ja\xf5\xec\xf8\x0c\x13?4?\xb5\xf2I<\xf1=\xb5ga\xac\xb5\x17z\x1c\x1f\xd5\x0c{7\xf1\xd8=\x0e=D\xe1\xbc\xcc.\x95C\x8a\xcfw\x96\xe06\xee\xeb)\x06\xe1\xef\xb1\xd8\xc2d\x8e\xd0\x03{<\xe7\xcd\x85\xd7\xd1\x8bx\xaa\x87\xc0\x07$\xbd)\xe7\x15\x1f:\xb9\x96Z\xa9\x94\xe1\xd6\xeb8Qc\xda\xef_'\xdc\x86\x09\x1c\x8buR#\xec\x85\xa1\xda\xad\xd0+\xd4\xe1\xd9\xaaG\xd9l%e6\xfa\xfe\xe5#\x04\xf7b\x0bA\xedq\x0c\xf5\xd7s\xdc\xdc\x8bX\xecZ\xe0\x03*\xdf\x94\xad\x97\xdb\xde@\x1ez_\xb7\x13v=(\xf9py\x95\xa5\x0f\xc9?3\x81c!\xc71|\xb8\xa7\x9c\xb3\x17\xbe`\xbd\x1dV\x04\xe9\xf2\xdf]\x0c \xfbs\xdc\xc4\xee:,\xff\x17{\xfc\xc2{l\xf8\x9b\xfbbv\xd7\xebw3\xc3M\xd5\xee\x15\x15\xf8`\xbd>\x84\xaa\xce\xaf)\xeb5b\xf1\xc7\xa7\xaa\xf7\xf5^\xe5\x84\x9ax}\xeaL\xc1\xb1\xd6o\x5c\x9e\xb8\xdf\xb0\xeb\xab\xd8\xc4\xd7m\x13\xaf\xa1|\x9d\xbe|\x84\xd3fb\xaf_x\xce\x875\x83\xdfFVs$F\xe0\x83\xf5R\x15\xe6F\x9ah\xe5\xc7'e\x080\xf5\x8d>\xf5x\xee\xce2D\xc4\xd4\x00\x14\xda2eh\xf3\xb0'\xe7\xca\xa6\x0c]\xa7\xce\x1e_\xbb\xca\x00qVo\x1e\x83_\x9d\xf3\xfc\xf6\xeb\x04d\x81\x0f\xd6\xebC\xa8l\x88\xe9\xd4z\xb9+=>\xc3,mH,y]\xe3\x9a\x138\x8e\x16Q\xdd\x7fM\xa5\x84\xe7\x9b>\x145\x8f\xc3\xb0UC\xd7\xd7q\xe2BJ\x98\xd9^\xd7\xf3Jc\xf0\x0b_\xd2\xdei\xf8\xb5&\xf0\xc1\x1a\x85\x89\x94\x9an\xca\xb0\xacV\xcaP\xfa,\xeb\x1a'O\xe0\xf0\x1ah\xcc0a\x9fq\x0f\xdeW\xc2)\x22)\xe1l\xf4\xe0=&e\xd8\xf2\xd9\xa2\xd7\x95my\xf0\x1b\xd7\x08}\x02\x1f\xf0\xa90W6\xc4t\xd8\x83u,\xbb\xfc\xc1\x19>4S&j\xd4^\xd7x\x86\x09\x1c;\x8e\xc8\xdc_\xaeR\x86s\xcf\x16p\xf7\xbbK~\xba!\x98T\x0e\xe5\xde\xf7H\xc7\xd7b^\xe3\xb6\xd7V\x0c})\xc3\xe0\xc9\x7f\xaf\x02\x1f\xf4\xff\x03h\xab\xe2\x03\x7fR\xf1^\xdas|\xee\xcd\xbc\xaeq\x9c\xc0q\x9a\xb8\xbb\xd7\xc2|R?\x80\xaf\x1a~\x1d\xed,\xf9u{\x94\xf0%\xe5S=\xd25\xd6|\xde\x8e\xf7\xb1\xceR\xfe\xde\x93\x0bz\x0b|\xb0\x1eo\x1aeo\x0a\x07\xca\xb0\xacT\xeaD\x8dy\x87[S\x7f\x7fw\xd6\xc2\xae\xd4\x0a\xe1MO\xd6\xd8Zb\xd8\x1b\x16\x17OS\xde{\xa6\x8c\x1c\xa4\xbe\xbe\x9e\xc6\xfb\xf2\x1ai\x80\xc0\x07=\x960\x11\xe0|\xd6ezh\xec\xf8\xa4,\xb94\xf7\xba\xc6&p\xb4\xee\xd87\x1d\xd0\xf6\x96\xf4\xb8SK\xb0\x5cN+\xf1\x14\x83\xcc\xf3\xc4\xbb\x1c\xaf\xf9k\xf1\xba\xa9\x1b\x12\xf8\xa0\xdf\xaa\x86DrM\xb4R\xa9a;o\xf0\xf5\x90r\xd2\xfc\x86\xd7\xc6\xc25\x16\xf8bx\xdc_\xe2k6\xe5\x1c\xc5Q\xc2k:u2\xd1:\xbf\x16\x1b\x0b\xbb\x02\x1f\xf4\xb7\x07!\xbc\xe1\x96\x9dcs\xac\x0c\xcbJ\x8fO\x9e\xf8\xc1\xd9\xd8q\xaa9\x81\xe3\xa9\x09\x1c\x0b\xd5d\x8f\xdcxI\xaf\xd9\xf0\x98Sz\xa4+\xeb\x0b\xc6\xd7\xe2\xa8\xc6kq\xb8\x86\xef\x11\x8f\xb2\x1a\xe7\xe8\x09|\xb0\x9ea\xa2\xaa\x0c\x8b\xf5rW{|\xb6j\x04\xafF\x8fS\x1c\xc2\xb7\x02\xc7\xe2\xa4\x9ew\xb5\xd7\xd0k)\xbc>vk\xbe7\xcc\xfa\x9aMy=\x5cg\x89\x13\x7f\xe2\x17\x99\xd4\xa1\xdd\x935\x1c\xdaMy\x8d\xa4\xfe-\x0b|\xd0S\x07\x15\xdf\x0c\x8f\x94aY\xa9\xd4\x89\x1a\xcf\x17t\x9cRC\xe4\xf6:\xaco\xdap\xa0~\x95%\x0eU\xce\xdb\xb6\xb1\x17\xffY\xcd_\x9b\xb5\xd7v\x9c\xf8\x9a\x1d\xd5\x9c\x04\x96'\xb6\xd7F[\xbf\x80\x84 \xdat\x0fd\x8de\x16\xcf\x04>XS\xf1\x9bx\xd9\x87\xc0u\x1f*\xfcw\xf8\xf8\x84\x0f\x86\x94a\xb1\x85\xf5\xc2\xc6\x9e\x95\xd4%\x9cr\x138j;\xa9\xd1\xb6;3\xbe\x8e\xc2k\xe3\xc5\x92^\xb3)\xabi\xdc\x7fA9\xab\xf9Z\xac3\xb4\xfb\xa4\xa5K\x00\x86\xc7\xf4~\xf1\xd8\xce\x9a\x08~\xf1\xef-\xb4c\xca)\x1f\xc9!X\xe0\x83\xfe\xa9z\x03\xd0c\xb3\xc2\x9e\x80\x1ao\xd0G\x0b.\x97\x93g\xe9\x138\xd4\xe6\xab'\xb5\xbdB\xdb\x9e\xd5\x091!P\x14\xdbEV\xbfgo\xd6\xd7l\x08\xa4__\xe4\x17\x94\x9aC\xbbm\x9c\xb5{\x7f\xfcv\x1f\x04\xbf\xd1,\x8f3\x06\xc6\xd0\x1e)\x85\xd8\xcf\xeb\x8c\x00\x08|\xd0\xaf@1\xac\xf8&>wy\x0f\xe6R\xb5\xe2\xc9\xbd\xe4\xf3\xa0f\x15?(R\xefc\x7f\x9d\xeb\xa1\xcd\xd8\xb6\xa9\xe7V\x85\xd0\xf7^YH\x08\xa1+\xf4\xb2\xc5\xa0\xf7~I\x18\xb8i\xf8\xfd\xa4\xce\x17\x94\xd1\x9c_P\xf2,}h\xb75\xefa\xb1\x8d^\xef\xb1\x0f\xef\xc1\xa1\xf7\xf5{\xf1\xb8\xe61\xa8?\x9a\x16\xaa\x13\x8f\xef\x5c_\xde\x05>\xe8\x97\xaa7g\xbd{\xab\xfb`\xd8\xca\xd2{e\xf2e\x14\xc3\x8eC\xfb\xa9u\xbe\xf4\xf2\xd53\xaa\xb9\xff\xc3\x90p[l\xaf\xe2\xe5m\xf1\xb3\x0f\xb3\xbb^\xb6\xb2 p3\xc3}\xa6\x84\xb0\x94\xf0q\xda@\x9d\xc8:C\xbb\xbb-:\xb7t/\xe1\xb8>\x8bA\xee\xfe\xd8\x86\xed\xaa\xe6\xf1}\xdda\xdd\xc2\xcc\x02\x1f\xf4'PT\xf5\x1e=_@u\x7f\x9a\x0b\xe3\xf7.\x97\x5c\x0c;\xf5\x83s;\x9e7FZ\x80\xb9*.\xde\x9d\xe3&\xea\x94\xe3\x08ao\x98\x18\xba\x92\xce\x19\xac\xb1\x9aFcA\xb3\xe6\xd0n\xbe\x80\xe2\xd5\x8b\x08|\xd3l\xceq\x9f\xc7\xb3\x9c\x87-\xf0A?\xc2^\xd5\x8c.eXV{|\xf6\xb2\xf4\xd2\x19K\xed\xb9\x88!!u\xf8\xf1\xa0%\x1f\xb2]\x09}\xa1W\xf4x\xc1ws\x1f\xf6R\xbf\xcc=Jx\xbd\x86}R{\xecF\x0d\xf7F\xe7Y\xfa\xb9\xa5\xe3\x15\xff]O\x1a\xce]\xb4\x10\xf6f\x0a\xd8\x02\x1f\xf4CU\x99\x8f\xdcz\xb9+\xfdPH\x1d\x0e=_Q1\xec\xd4\x90i\x02G\xfd\xd0\x17>\x9c\x9f/\xe8\xe6CP\xdfz-\xecU\x85\xf7\x94\x89\x04\xe3,\xad\x87\xb1\xf1s\x82g\x18\xda]\xe5\x17\xd9e\x7f\xf9yw\xd6\xb0'\xf0A?\x02E\x18\xa2)[V\xe9z\xda\x9a\x96,-Lm\xd6\xd8w\x15\xa1\xa4\xce\xda\xa6OL\xe0\xa8\xdd\xbe\xe1\xb8~9knRE8\xef\xf2\x9d\xe2v\x873|\x91\xdb\xa9x?\x09\x81\x22\xb5l\xd0hA\xed\x15B\xe4i\xe2\xee\xcfV\xb5\x22L\xfc\xbby\x9c-\xbe\x177\xdc\xfe\xe3y\xdf\xc7\x05>\xe8\xbe\xaa7\x81\x91&ZY\x18\x0f=\x00\xa9\x135\x8eW|\x8ee^#\x90\x8c\x1d\xdd\x99BLx=\x1c\xce\x11\xfc.c\xd0\xdb*9\xcf\xb3\xea5\xb4U\xf1zM\x0d\x15\xf9\x82\x8b\xb7\x8f\xba\xf0z\x0cm\x10{\xdd>\x9b\xdd\x9d\xb3y\xde\xd0M\xdf\xc4/a!\xe8\x8d\x9ah\xebAq#\xfe\x12!\xda\xf8\xca[gY\x8de\x8a\xa2\xf3\x9bo|0\x5cQ\xa0\x08\xe7\x86\xbdW\xf6\xd8B/\x80#\x0b\xad\xfb2\x10\xfev\xc3\xdf\xe6N\xc9{\xceu\x0cp\xe1}\xe9\xc4\xea8\x9d9\xb6\x8f\xe2q\xbd?\xbe\x8f\x12>WBP|\x15\x8f\xf5\xd9\x22\xbe\xfc\xbd\xe1\xd0@\xa7\xe9\xdd\x83\x0e\x8a=~jb\xf6\xf3\xd8~?\xb8\xb5\xe9q\x19\xd2\x85\xee~\x8b\xcc\xb3\xf2s\xc3\x0e\xf5\x08\x00 \xf0Aw\xc3\xdeVV~\x82\x7f8\xff\xc3D\x0d\x00\x04>\xe8\xb0<+/\x9bp\xa0\x0c\x0b\x00\x02\x1ftT,\x89QV\x86e\xd9+5\x00 \xf0\x01\x0d\xcb+\xae\xb7^.\x00\x02\x1ftU,\x8aZ6\xbd\xffxE+5\x00 \xf0\x01\x0d\x84\xbd\xaa%\xba\xac\x97\x0b\x80\xc0\x07\x1d\x17\x86j\xcb&j\x1c)\xc3\x02\x80\xc0\x07\x1d\x95\xb0DW\xa8\xc8\xaf\x0c\x0b\x00\x02\x1ftXU\x98S\x86\x05\x00\x81\x0f\xba*\x96ayR\xb2\xcby\x5c\xa6\x09\x00\x04>\xe8\xa8q\xc5\xf5\xca\xb0\x00 \xf0AW\x0d\x06\x83\x10\xe6\xca\xd6\xcb\x0deX.\xb4\x14\x00\x02\x1ft3\xec\x852,y\xc9.\xa1\x0c\x8b\xde=\x00\x04>\xe8\xb0\x10\xf6\xca\xca\xb0\xe4&j\x00 \xf0AG\x0d\x06\x83\x9d\xe2\xe2i\xc9.\xd7E\xd8S\x86\x05\x00\x81\x0f:\xac*\xcc\x8d4\x11\x00\x02\x1ft\xd4`0\xd8\xcb\xca\xd7\xcb=\xb7^.\x00\x02\x1ft\x9b\xde=\x00\x04>\xe8\xab\xc1`\x90g\xe5eX\x0e\xad\x97\x0b\x80\xc0\x07\xdd\x0d{[Yy\x99\x95P\x86\xc5D\x0d\x00\x04>\xe8\xb0<+/\xc3b\xbd\x5c\x00\x04>\xe8\xaa\xb8^\xee~\xc9.\x97E\xd8\x1bk)\x00\x04>\xe8\xae\xbc\xe2z+j\x00 \xf0AW\x0d\x06\x83QV^\x86\xe5X\x19\x16\x00\x04>\xe8n\xd8KY/7\xd7R\x00\x08|\xd0]a\xa8\xb6\xac\x0c\xcb\x912,\x00\x08|\xd0Q\xb1\x0c\xcb\xb3\x92]\xae3eX\x00\x10\xf8\xa0\xd3\xaa\xc2\x5c\xae\x0c\x0b\x00\x02\x1ftT,\xc3\xf2\xa4d\x97seX\x00\x10\xf8\xa0\xdb\xaaz\xf7\x94a\x01@\xe0\x83\xae\x1a\x0c\x06!\xccm\x97\xec\x12\xca\xb0\x5ch)\x00\x04>\xe8f\xd8K)\xc3\xa2w\x0f\x00\x81\x0f:,\x84\xbd\xb2\xf5r\x8fL\xd4\x00@\xe0\x83\x8e\x8aeX\x9e\x96\xecr]\x84\xbd\x5cK\x01 \xf0Aw\x8d+\xae\x1fi\x22\x00\x04>\xe8\xa8\xc1`\xb0\x97\x95\xaf\x97{n\xbd\x5c\x00\x04>\xe8\xb6\xaa2,#M\x04\x80\xc0\x07\x1d5\x18\x0c\xf2\xac|\xbd\xdc\xe7\xd6\xcb\x05@\xe0\x83\xee\x86\xbdP\x86\xa5\xac\xccJ(\xc3\x92k)\x00\x04>\xe8\xae0\x94[V\x86\xe5@\x19\x16\x00\x04>\xe8\xa8\xb8^\xee~\xc9.\x97\xd6\xcb\x05@\xe0\x83n\xcb+\xae\xb7\xa2\x06\x00\x02\x1ft\xd5`0\x18e\xe5eXN\x95a\x01@\xe0\x83\xee\x86=\xeb\xe5\x02 \xf0A\xcf\x850WV\x86\xe5H\x19\x16\x00\x04>\xe8\xa8\xb8^\xee\xb3\x92]\xae\xb3\xea\x22\xcc\x00 \xf0A\x8bU\x85\xb9\x5c\x19\x16\x00\x04>\xe8\xa8X\x86\xe5I\xc9.\xe7\xca\xb0\x00 \xf0A\xb7U\xf5\xee\x99\xa8\x01\xc0J\xbc\xa1\x09`~\xb1\x0c\xcbv\xc9.\xc7\xb7\xb7\xb7\x17Z\x0a\xa0{\xbe\xf9\xf8\x0b\xe3\xf8\xcf\xfcK/\xbf}\xd5\xc5\xe7\xa0\x87\x0f\xe6\xf4'\xff\xf8\x07\xc2\x17\xa7\xb2\xde=eX\x00\xba\xed$\xbb[9\xe9e\x11\xfe\xce\x8am$\xf0\xc1\x9ay\xfc\x9d\x1f\xdb\xca\xca\xd7\xcb=2Q\x03\xa0\xbb\xbe\xf4\xf2\xdb!\xf0]\xc7\xff\x86\xa2\xfa/\x8a\xd0wUly\xb1=\x12\xf8\xa0\xe7\xfe\xdc\x1f\xbe\x91\xfd\xea{\xbf\xf2\xf9\x92]\xae\x8b\xb0\x97k)\x80\xce{}$'\xd4[\x0de\xb8\xbe\x17\x86|\x8bmG\xe0\x83\x9e\xfa\xdc\xcb\x1f\xac\xda\xc5P.@?\x8c\xb3\xbbSt&\x09\xc3\xbd\x1f\x16\xa1\xef\xa2\xad\xc3\xbd\x02\x1f\xcc\xe8\xa7\xfe\xe0G\xb3\xf3_>+\xdb%\x94a9\xd1R\x00\xdd\xf7\xa5\x97\xdf\x0e\xa7\xe6T\xbd\xa7\x87\xc9{a\xb8\xf7U\x1c\xee\xdd\x12\xf8\xa0\xe3~\xf7\xdf\x7f\xb7j\x97\x91V\x02\xe8\x95\xd4\x95\x92\xc2y\xdda\xb87L\xf28)\xb6\xa1\xc0\x07\x1d\xf4\xd7\xbe\xfb(\xfb\xd6\xc5\xaf\x97\xed\xf2\xdcz\xb9\x00\xfd\xf2\xa5\x97\xdf\x0e\xe5\xb5\xcek\xfeZ(\xc8\xff~\x9c\xe41Z\xd5$\x0f\x81\x0fj\xfa\xf1?\xfeL\xf6\xad\xb3\xd2\xb0\x17\xce\xf1\xc8\xb5\x14@/\x8dg\xfc\xbd0\xc9\xe3E\xb1]\xc5I\x1e[\x02\x1f\xb4\xd8\x9b\xbf\xf3\xe3\xd9w\xbe\xf3\x9d\xb2]\xac\x97\x0b\xd0S_z\xf9\xed\x10\xf8\xae\xe7\xb8\x890\xdc\xfb\xb0\xa6\xdf\x9e\xc0\x07-\xf3\x17\xfe\xef\x0fe\xbf\xfaK\xff\xa2l\x97\xcb\x22\xec\x1di)\x80^\x1b7t;\xa1\xa6\xdf{q\xb8\xf7`\x91\xc3\xbd\x02\x1f\xd4\xf0c\xd7\x95\x7f2\xca\xb0\x00\xf4_\xd3_\xec\xc3p\xef\xd7\xb3\x05\xd6\xf4\x13\xf8 \xd1_\xfd\xbd\x9f\xa8*\xc3rz{{{\xa6\xa5\x00\xfa-\x96h9^\xd0\xcd\xdf\xd7\xf4kt\x09\xb77\x1c6\xa8\x16&j\xbc\xfa\xd6\xff,\xdd\xe7\x1f\xfd\xdc\xdf\xfc\xc56L\xbd\x07`).b8[\x940\xdc\xbb[|\xae\x84\xde\xc4\xb0\x8d\x8b\xa0y5\xeb\x8d\x0dnoo\x1d2\x886\xbe\xf2\xd6Y\xfc#\xfb\x84P\x86\xe5W\xfe\xd9/O\xfd\xbd|\xefg\xb2\x9f\xb9\xf8\xef\x1a\x10\x80E:\x8e\xc1\xef\xac\xee/\x1a\xd2\x85\x09~\xf4\xf7>\xf7\xfd\x7f\x87\xf5r\xcb\xca\xb0\xec\xbe\xf5\xd3\xd9\xdf\xf8\xad\xefh4\x00\x16-\xf4(\xbe\x7f\xbf\x84[\x9dI\x1e\x02\x1f\xbc\xe6\xc7~\xeb/g\x8f>\xfc\xa9\xec\x87\xbe\xfb\x93\x1f\xfd\xff\xcf\xfc\xf6\x8f\x94\x96a\xf9\xf2\x9f\xfa\xe1\xec\x87\x7f\xf7\x8f4\x1c\x00\xcb\xf2\xd1\x12n\xd9]M\xbf\xa3\x94\x9a~\x86t\xe1\x81\xcf\xff\xec/\xfc\xceO|g\xe3O\xdf\xff\xffs\x7f\xf1e\xf6\xaf~\xf1\x9fO\xdd\xff\xe7\x7f\xf6\xed\xec\x17\xfe\xd3ok8\x00V\xed4\xbb\x1b\xee\x9d\xb8\xde\xafI\x1b\x10\xbd\xb9\xfd\xd5\xf1Od\x1f\x87\xbd\xe03\xbf}S\xfa;?\x9b\xfd/\x0d\x07@\x1b\x84%\xdc\x9e|\xf3\xf1\x17BQ\xe8\xfbI\x1e\xdf_\x04@\x0f\x1f\xc4\xb0\x97M\x98m\xb5\xf1#\x7f\x94\xfd\x89\x1f\xfd\xcd\xec_\xbe\x7f\xfa\xa9\xdf\xf9\xfb?\xf7\xb7\xb2\xbf\xfb\xeb\xffM\xe3\x01\xd0F\xa1\xc7\x22\xf4\xf6\x1d\x855\x80\x05>\x84\xbd)a\xef\xa1?\xfb\xd9?\xc8\xfe\xcf\xef\xff\x9b\xec_\xff\xbb\x7f\xfb\xd1\xff77\xb7\xb2\x7f\xba\xf1#\xce\xdd\x03\xa0\x0b\xc1\xef@\xe0C\xd8\xabQG\xe9\xef\xfc\xe4\xb7\xb3_\xfa\x8f\x17\xd9;_\xfc\xf3\xca\xb0\x00\xd0f\x9f\x18\xda\x15\xf8\x10\xf6jz\xe7\x07\xbf\x95\xfd\xf5\xef\xfe\xa6\x06\x04\xa0\x8d&N\xde\x10\xf8\x10\xf6f\xf0\xf7\xfe\xf0\xd7\xb3\x9f\xfe\xfd\xff\xaa!\x01h\x830l\x1b>\xd7\x8e\xa6\xad\xc6!\xf0!\xec\x09}\x00t\xd3ev7l{\xf2pF\xee$\xca\xb2 \xec\xcd\xe8\xbf\xfc\xc0\xe7\xb2\x9f\xce\x04>\x00\x96\xae\xf6\x12k\x02\x1f\xc2\xde\x0c\xde\xbe\xfd\x1f\xd9\xcf\xff\xde\xafiT\x00\x96%\x0c\xdb\xdeO\xc2\xb8\xaa\xfb\xcb\x02\x1f\xc2^M\x9f\x1f\xfc\xef_\xfe\xf9\xdf\xfd\xb5\xafiU\x80\xb56,\xb6gK\xb8\x9f\xf3\x18\xf2\xc6\xf3\xdc\x88\xc0\x87\xb0W\xcf\xf1\xaf^<\x1fiU\x80\xf5\xf6\xcd\xc7_8X\xf0]\x84a\xdb\x8f\x8a&7qc&m \xec\xd5\xf8\xe3\xfb\x8d\xcb\xaf\x09{\x00\xc2\xdeVq\xf1r\x017=qY\xb4&\xe8\xe1C\xd8\x13\xf6\x00\xa8\xa7\xe9\xde\xbd0l{\xf4z\xed\xbc&\xe9\xe1C\xd8\x13\xf6\x00H\xf4\xcd\xc7_xT\x5c\x5c\x15\xdb\xc6\x9c7u\xbf\xd6m>\xcb$\x8c\xba\xf4\xf0!\xec\x09{\x00\xa4\xdb\x9b3\xec\x85a\xdb<K\xa8\x9d'\xf0\x81\xb0\x07\xc0j\xe43\xfe^X\xf2\xec\xa8N\xed<\x81\x0f\x84=\x00\x96\xec\x9b\x8f\xbf0,.6k\xfc\xca\x5c\xb5\xf3\x04>\x10\xf6\x00X\xbe\xd4\xcf\x86\x8f\x96<\x9b\xb7v^\x93L\xda@\xd8\x13\xf6\x00\xa8\x90X\x8a\xa5\xd1\xdayM\xd2\xc3\x87\xb0'\xec\x01Pm\xda\xe7C\x98\x841\x8eA\xefU[\x1f\xbc\xc0\x87\xb0'\xec\x01P\xed\xf5\xda{\x8d,y&\xf0\x81\xb0\x07@\x0b|\xf3\xf1\x17\xc2gD(\xc5\xb2\xd4\xday\x02\x1f\xc2\x9e\xb0\x07\xc0\xf2\x0c\x8b\xed\xddl\x01K\x9e-\x8bI\x1b\x08{\x00\xd0s\x9f\xd1\x04\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{Z\x15\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x04>\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00\x10\xf8\x10\xf6\x00\x00\x81\x0fa\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=a\x0f\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00@\xe0C\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x13\xf6\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x10\xf8\x84=\x00\x00\x81O\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>aO\xd8\x03\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\x04>a\x0f\x00@\xe0\x13\xf6\x00\x00\xd66\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xe3\xc0'\xec\x01\x00\xf48\xf0\x09{\x00\x00=\x0e|\xc2\x1e\x00@\x8f\x03\x9f\xb0\x07\x00\xd0\xa2\xc0W\x84\xb3G\xc2\x1e\x00@\x8f\x03_\xe1\xa8\x08i[\xc2\x1e\x00@\x0f\x03_\xec\xdd\x0b\x01m\xd4\xc0m\x09{\x00\x00m\x0b|\x85\x83x9W\xb8\x12\xf6\x00\x00\xda\x1b\xf8\xee\x83\xd5f\x11\xda\xf6\x84=\x00\x80\x1e\x05\xbe\x18\xf06'\x84?a\x0f\x00\xa0\x0f\x81oB\xc0{Rg\xf2\x86\xb0\x07\x00\xd0\xe2\xc0\x17\x83\xdd\x93\x84\x10(\xec\x01\x00t1\xf0\x95\x04\xbb\xca\xe0%\xec\x01\x00t;\xf0\x95N\xde\x10\xf6\x00\x00:\x10\xf8\x8a\xd0\x16B\xd6f\xc9.\x07\xc2\x1e\x00@\x87\x03_V=l\xbb\xfb\xfa\xe4\x0da\x0f\x00\xa0#\x81/\x06\xb9\xdd\x84]\x0f\x84=\x00\x80\x0e\x06\xbel\xcap\xed\x04#a\x0f\x00`u\x06\xb7\xb7\xb73\xfdb\x11\xe0^\x15\x17\x1b\x89\xbb_\x16\xdb\xb6\xb0\x07\x00\xb0|3\xf5\xf0\xc5\xc9\x1a\x1b5~E\xd8\x03\x00\xe8R\xe0\xcbfX:M\xd8\x03\x00X\x8d\xdaC\xbaq\xb2\xc6Ka\x0f\x00\xa0\x1bf\xe9\xe1\xcb\x85=\x00\x80\xee\xa8\xd5\xc3\xf7\xe6\xf6W\x1f\x15\x17WY\xbd\xf3\xf7\x84=\x00\x80\x15\xaa\xdb\xc3\xb7'\xec\x01\x00\xf4;\xf0\x1d,\xe9q\x09{\x00\x00\xcb\x0e|on\x7fu'k\xae\xbc\x8a\xb0\x07\x00\xd0\xb6\xc0\x97-\xa7wO\xd8\x03\x00XE\xe0\x8b\x935\xf6\x96\xf1\x80\x8a\xfb\x1a:,\x00\x00\xcdI\x9a\xa5\x1bW\xd6x\xb1\xc4\xc7u]lG\xc56\xfe\x8d\xcb\xaf\xbdr\x98\x00\x00\x16\x1f\xf8\xae\x8a\x8b\xcd\x15<\xbe\x9bb;\x09\xe1\xaf\x08~\x17\x0e\x17\x00\xc0\x02\x02_\x1cb}\xbf\x05\x8f\xf52\xbb\xeb\xf5;\xd1\xeb\x07\x00\x90.\xe5\x1c\xbeQK\x1ek\x98!\x1c\x86\x95\xaf\x8a\x10:\x8e\xb3\x86\x01\x00\xa8P\xda\xc3\x17'k|\xaf\xc5\x8f?\xf4\xfa\xe5\xbfq\xf9\xb5\x13\x87\x12\x00`\xb2\xaa\x1e\xbeQ\xcb\xc3\xde\x91\xb0\x07\x00P\xee\x8d\x8a\xeb\x0fZ\xf6xM\xe2\x00\x00h*\xf0\xc5\xc9\x1a\x9b-y\x9c&l\x00\x004\x1d\xf8\xb2v\x0c\xe7\x1egz\xf3\x00\x00\xe62q\xd2\xc6\x9b\xdb_\xdd*.^\xae\xe81)\xba\x0c\x00\xd0\xa0i=|\xa3\x15<\x96\xe3\x18\xf2\xce\x1c\x16\x00\x80\xfe\x04>\xbdy\x00\x00\xcb\x0e|on\x7fu/[\xfcd\x8d\xd3\x18\xf2\x94T\x01\x00Xv\xe0\xcb\x16\xd7\xbb\x17z\xf3\xc61\xe8]iz\x00\x80\xe5\xf8\xc4\xa4\x8d\x05M\xd68\xcf\x14H\x06\x00X\x99\xd7{\xf8F\x0d\xddn(\x90<\x8eA\xefJ3\x03\x00\xf4'\xf0\x85\xde\xbc0d;\xd6\xb4\x00\x00-\x0b|on\x7f5\x84\xbdY&k\xe8\xcd\x03\x00\xe8B\xe0\xcb\xea\xf7\xeeY\xee\x0c\x00\xa0\x03>\x9a\xb4Qc\xb2F\xe8\xcd\x0b\x93/,w\x06\x00\xd0\x11\xf7=|\x07\x15\xfb\xe9\xcd\x03\x00\xe8x\xe0\x1bM\xb9\xderg\x00\x00]\x0f|q\xb2\xc6\xc6\x83\x9fY\xee\x0c\x00\xa0O\x81/\xfb\xb8wOo\x1e\x00@O\x03_\x08x{z\xf3\x00\x00\xfa\xe9\xff\x0b0\x00\xb2\x10\xef\xec0\x8f}\x9d\x00\x00\x00\x00IEND\xaeB`\x82\x00\x006\xc9\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x02|\x00\x00\x02|\x08\x06\x00\x00\x00d\xed|V\x00\x00\x00\x09pHYs\x00\x00\x17\x12\x00\x00\x17\x12\x01g\x9f\xd2R\x00\x00\x00\x19tEXtSoftware\x00Adobe ImageReadyq\xc9e<\x00\x006VIDATx\xda\xec\xddO\x8c$Y~\x17\xf0\x88\xf5X\xfe\x87]\xbd\xd2\xf2O+\xdc\xd9\x1cX\x0d\x18U\xad\xc0bA\xac\xaa\xc6B\xc2\x5c\xdc5\x12\x17\x9f*\xe7\xc0\xc1\x87\xa5kN\xec\xad\xa3%\x0e\xbem5#$$\x0e\x93u\xc2\xe2\xe0\xa9>\x1a\x1bu\x95\x16\xc1\x98?\xda*\xb3\x92Y\x0c\xee*\xc4\xb2\x96\xd0j\xbbla\x0c\xd8n\xe2M\xbd\xda\xa9\xe9\xc9|\x11\x91\x19\x99\x19\x11\xf9\xf9H\xa1\xec\xae\x8c\xca\x8c|\x99\x95\xf9\xcd\xf7\xe2\xfd^\xfe\xea\xd5\xab\x0cH\xdb\xfa\xca\x97\xf6\xcb\x8b\xd1\xf5{\x1f\x1ei\x0d\x00\xfa\xe63\x9a\x00j9\x8c\x1b\x00\xf4N\xae\x87\x0f\xd2\xb6\xbe\xf2\xa5Qy\xf1\x22\xfe\xf7\xed\xeb\xf7><\xd1*\x00\xf4\x89\x1e>\xa8V\xdc\xf9\xb7^>\x00zG\x0f\x1f$l}\xe5K\xf7\xca\x8b\xcb\xf0\xcf;?~p\xfd\xde\x87\x97Z\x07\x80\xbe\xd0\xc3\x07i\xfb\xaf\x85\xbd\xa0\xd0,\x00\x08|0\x1c\xd3\xc2\xdd~\xec\xf9\x03\x00\x81\x0f\xfa\xac\x0cu{\xe5\xc5\xfdiWe7=\x7f\x00 \xf0A\xcf\xa5&h\x14\x9a\x07\x80\xbe0i\x03\xa6x\xad\x14\xcb,o]\xbf\xf7\xe1\xa9\xd6\x02\xa0\xeb\xf4\xf0\xc1t\xe3\x1a\xfb(\xd1\x02\x80\xc0\x07=V'\xcc=\x8c=\x81\x00 \xf0A\x9f\x94!n\x9c}\xba\x14\xcb,c-\x06\x80\xc0\x07\xfds\xb8\xa4}\x01@\xe0\x83u\x8b\xa5X\xb6\x9b\xfcJ\xec\x11\x04\x00\x81\x0fzb\x9e\xf0\xa6\x97\x0f\x80NS\x96\x05\xa2\xb8z\xc6\xf7\xe6\xfcu%Z\x00\xe8,=|\xf0\xb1Ez\xea\xc6\x9a\x0f\x00\x81\x0f\xbao\x91\xd0v`}]\x00\x04>\xe8\xb08\xf1\xe2\xfe\x827\xe3\x5c>\x00\x04>\xe8\xb0qGn\x03\x00\x04>h[\x5c-c\xb7\x85\x9b\xba\xafD\x0b\x00\x02\x1ftS\xd1\xe2m\x09|\x00t\x8e\xb2,l\xb48\xd1\xe22\xab\xbf\x94Z\x1d\x0f\xae\xdf\xfb\xf0R\xeb\x02\xd0\x15z\xf8\xd8t\xe3\x96\xc3^PhV\x00\x04>\xe8\x8ee\xcc\xac\xddW\xa2\x05\x00\x81\x0f:\xa0\x0ce\xfb\xd9\xe2\xa5X\xa6\xdet\xe6\x5c>\x00\x04>\xe8\x84e\x8625\xf9\x00\xe8\x0c\x936\xd8H\xb1\x14\xcb\x8b%\xdf\xcd\xdb\xd7\xef}x\xa2\xb5\x01X7=|l\xaaU\xf4\xc0\x8d53\x00]\xa0\x87\x8f\x8d\xb3\xa4R,\xb3(\xd1\x02\xc0\xda\xe9\xe1c\x13\xed\xaf(\xec\x05\xce\xe5\x03@\xe0\x835Xe\x08\x1b+\xd1\x02\x80\xc0\x07+T\x86\xaf\xbd\xf2b{\x95w\x99\xdd\xf4(\x02\x80\xc0\x07+2^\xc3}\x1a\xd6\x05`\xadL\xda`c\xac\xa8\x14\xcb,o]\xbf\xf7\xe1\xa9g\x01\x80u\xd0\xc3\xc7&\x19o\xe8}\x03 \xf0\x81\xc0\xb7\x02\x07\xb1\x87\x11\x00\x04>X\x862l\x85\xb0w\x7f\xcd\x871\xf6L\x00 \xf0\xc1\xb0\xc3\x96\xc0\x07\x80\xc0\x07\xcb\xb0\xf5\x95/\xed\x94\x17\xbb\x1d8\x94\xfb\xb1\xa7\x11\x00\x04>hY\x97\xca\xa2\x08|\x00\xac\x9c\xb2,\x0cZ\x5c\xe5\xe2{5w\xbf\xbe\xfd\xb5\x86w\x13~\xefeV\xff\x1c\xc1/^\xbf\xf7\xe1\xb9g\x07\x80U\xd1\xc3\xc7\xd0\x8dk\xecsUn\xef\x94\xdb\xa8\xdc\xe6\x09b\xe7e\x80\x1b\xc5\xdb8\xab\xb1\xbfB\xcc\x00\x08|\xd0\xa2T\xb8z\x96\xdd\x14D\x1e\x95\xdb\xa4\xdc^.rG\xf16\xf6\xca\x7f~\xb1\xdc\x8e\x13\xbb\x1eX_\x17\x00\x81\x0fZP\x86\xaa\xb0\x86\xed\xeb\xc3\xaca\xf8\xf5i\xb9=(\xc3\xd9\xfe2V\xbf\x08\xc3\xb5\xe56.\xff\xf9\xd9r{\x92\xdd\xf4 \xben\xec\x19\x02`U\xde\xd0\x04\x0c\xd8\xdd\xde\xbd\x8br;\x0a\xbdp\xab\xba\xf3\xd8cX\x84-\xce\xce\x0d\xdb\xee\x9dc;\xf2\x14\x01\xb0\x0az\xf8\x18\xa4\xb8\xaaE\x08Wah5\x0c\xdb\xee\xac2\xecM\x09\x7f\xb7\xc3\xbd\x0f\xe21\xdd\x8b=\x90\x00\xb0tz\xf8\x18\xb20l{\xd9\xa5\x03\x8a\xc73\x8e\xe7\xf09\x8f\x0f\x00\x81\x0f\x16\x0cV]>\xbe0\xdc\xfb\xd23\x05\xc0*\x18\xd2\x05\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x10\xf8\x00\x00\x04>\x00X\x91<\xcf\xef\x95\xdb\x9e\x96\x00\x81\x0f\x80a\x86\xbd\x9d\xf2\xe2RK\x80\xc0\x07\xc00\xc3\xdeay\xf1\x8dr\xdb\xd2\x1a\xb0\x5c\xd6\xd2\x05`\xd5A\xef^y1)\xb7\x87\xb7?{\xf5\xea\xd5\xa9\x96\x01\x81\x0f\x80a\x84\xbd0\x84{Rn\xf7\xb5\x06\xac\x8e!]\x00V\x15\xf6n\x87p_\x0f{gZ\x07\x96K\x0f\x1f\x00\xcb\x0ez\x9f\x1a\xc2\x05\x04>\x00\x86\x13\xf6\xea\x0c\xe1\x9ej)X.C\xba\x00,+\xec\xcd\x1a\xc2\x05VL\x0f\x1f\x00m\x07\xbd\xa6C\xb8\xa7Z\x0d\x04>\x00\xfa\x13\xf6\xcc\xc2\x85\x0e2\xa4\x0b@[ao\x9c\xdd\xf4\xd65\x0a{j\xf0\xc1\xf2\xe9\xe1\x03`\xd1\xa0\x17\x86p\x8f\xca\xed@k\x80\xc0\x07\xc0\xf0\xc2^\x18\xc2\x9d\x94\xdb\xf6\x9c7\xa1\x06\x1f\xac\x80!]\x00\xe6\x0d{\xe3\xecf\x08w[k@\xb7\xe9\xe1\x03\xa0i\xd0ks\x08\xf7T\x8b\x82\xc0\x07@\xb7\xc2\xde\xa2C\xb8\xc0\x1a\x18\xd2\x05\xa0n\xd8\x1bg\xed\x0f\xe1\x9ejYX>=|\x00T\x05=\xb3pA\xe0\x03`\xc0ao\x94\xdd\x14R^\xca\x10\xae\x1a|\xb0\x1a\x86t\x01\x98\x15\xf6\xf6\xcb\x8b\xf3\xcc\xf9z \xf0\x010\xc8\xb0\x17\x86p?(\xb7\xad%\xde\x8d\x1a|\xb0\x22\x86t\x01\xb8\x1b\xf4F\xd9\x12\x87p\x81\xf5\xd0\xc3\x07\xc0m\xd8[\xf5\x10\xee\xa9V\x07\x81\x0f\x80\xd5\x85\xbdU\x0c\xe1\x02kbH\x17`\xb3\x83\xde([\xdf\x10\xee\xa9g\x00VC\x0f\x1f\xc0\xe6\x86=\xb3pA\xe0\x03`\xc0ao\xedC\xb8j\xf0\xc1\xea\x18\xd2\x05\xd8\xac\xa0\x17V\xcd\x08AK\xaf\x1el\x10=|\x00\x9b\x13\xf6\xf6\xca\x8b\xcb\x8e\x84=5\xf8@\xe0\x03\xa0\xe5\xb0W\x94\x17\xcf3\xb3pa#\x19\xd2\x05\x18v\xd0\x0bC\xb8a\x16\xeen\xc7\x0e\xed\xd4\xb3\x03\x02\x1f\x00\xed\x18\xc5p\x15f\xe3\xee\xc4\x9f\xedj\x16\x10\xf8\x00\x18\x88W\xaf^\x9d\xc7\xb0\xf7)\xf1\x9c\xbe\xe0\xf5\xcbU\x04\xc2S\xcf\x0e\x08|\x00,?\x0c\x9e\xa6\xc2\xd7k\x81p/\xd33\x08\x02\x1f\x00\xc3\x0d\x84e\xf8\xbb\x5c\xd2m\x03+`\x96.\x00Iq\x86\xef}-\x01\x02\x1f\x00\xc3\x0c{a\x96\xefa\xcb7\xab\x06\x1f\x08|\x00tHX\x82-U\xbb\xefI\xb9]k&\x10\xf8\x00\xe8\xa18i\xe3 \xb1\xcb\xc5\xabW\xaf\x8a\xf2r\xdc\xf0\xa6O\xb5.\x08|\x00tCQq\xfdGC\xbde\xe8\x0b\x85\x9d\x9fj.\x10\xf8\x00\xe8\x91<\xcf\xc7Y\xba\x0c\xcb\xb3\xbb3m\xcb\x7f\x87\xf0wQ\xf3\xe6O\xb50\xac\x96\xb2,\x007\x01g\x94\xdd\xacJ\xb1Wna\xa2BrU\x8a2\xe0\xe4\x03n\x8b\xf0\xf8\x8b\x8a\xdd\xa6M\xe4\xd8\xcfn\x8a<[\xaf\x17\x04>\x80N\x84\x9a\x9d\xec\xe3\x82\xc2{]\x0c)\xf1\x1c\xba\xe7\xf1\xbfg1L]\x96\xdbi\x5cAcYB\x98K\x95ayR\xde\xff\xe5\x94\x10|\x19{\x06?H\xdd\xb8\x1a| \xf0\x01,;\xe4\x85@\xb2\x9f\xf5\xa3\xae\xdc\xe8\xce\xbfw\xb3;\xbd\x8d\xe5c\xb9\x0d\x81'm\x06\xc0\xd8\xd3\x99*\xc3r\x95\xdd\xcc\xdc\x9d\x15\xe6N\xca\xdb\x08\xe7\xf3=\xf2\x8a\x03\x81\x0f`U!\xef^\x0cx!\xc4l\xf7\xec\xf0G\x15\xd7\x7f?\x04\x96\x8f3\x04\xb1\xd3\x10\x00\xe3$\x8ayU\x95a)\xca\xdb\x7f\x99\xba\x81p>_\x0c\xd7\xd3\x86\xc3\xd5\xe0\x8350i\x03\x18l\xd0\x8b+D\x5c\x96\xdb\xfb=\x0c{\xc1^\x83}C\x8fe(\xa1\xf2A\xf9\xb8_\x96\xdb\xa4\xdc\xf6\x1b\xb6Y\xb8\xbf\x87\x89]\xce\xca07\xa9ys\xe1\xbe\xa7\xd5\xe7{\xe9\xd5\x09\x02\x1f@\x9bA\xefq\xd6\xef\x09\x04\xa39\x7fo\xebN\xf8\x0b\xe7\xd5\x15q\xa8\xb6\xcaQ\xc5\xf5E\xdd\x03\x88\xbd\x80\xd3\x02\xe7\xb9W)\xac\x9e!]`\x88\x0e\x17\x08z\xd7\xd9\xc7\x93#\xc2\xf6r\x8d!\xe5~K\xb7\x11\x82\xef\xe32\xf4=\x0b\xa1n\xda\xa4\x898\xd9\x22\xd5\x0bz\xdct\xb2E\xd8\xbf\xbc\xdd'\xf1\xfeo\xe9\xe1\x03\x81\x0f`1\xa1g\xa9\x0c\x19\xe1\x1c\xb6\x83\x9a\xbf\x12\xce);\x8d\xdby\xd5\xf9i+\xf6V\xf6q\x89\x98\xbdx\xb9H\x8fe\x18\xae}\x18\xcf\xf7+n\x87g\xe3y\x8eG\x15!\xf8p\xce\xe7\xa3\x88C\xc5\xb7\xe7\xf3\xe9\xe1\x03\x81\x0f\xa0\x15G\x89\xc0\x17\xc2\xce\xed\xcc\xd6\x93.?\x88;=j\xdf?\xce84\xbbwg\x9b\xa7\x170\xfc\xce\xfbq\xe8\xbb\xa8\x11$\x8f\x16\x0c\xc2ah\xf72\xde\x87\x1e>\x10\xf8\x00Z\x09J\xe7e\x98\x09\xab>\xdc\x1d\xa2<.\xb7I\xdfk\xc0\xc5\xfaw\x93\xb8\xdd\xad'8\xce\x9aOL\x09\xc1\xef\x1f\x96\xdb\xe7\x13\xfb\x5c\xc5\xf5r\x179\xe6\x97q\x02\xc9\xf3%\xd7\x0f\x04f0i\x03\x18\xaa\xd0\xcb\x17\x86\x22\xc39d\x9f-\x83\xc6x\x88\x05\x7fC\x80*\xb7\xd0\x03\x17\x82\xdf\x83r{7\xab\xbf\xc4YP\xd5\xe3v\xd8\xd2q\x9e\xc6c\x03\x04>\x80\xd6\x82P\xe8\xcd\xbb\x17z\xa7:v^\xde2\x1f\xf3\xe5k\xe1/\x14@\xbeJ\xfcJ\x08\x86\x7f)q\xfdY\x9b\xc3\xde\xe1\xd8\xbc2A\xe0\x03\xa0\xdd\xf0wXn\xa3\xf2\xbfo\x97\xdb\xb3)\xbb}\xae\xe2f\xc6Z\x12\x04>\x00\xfa\x11\xfe\xc2\xea\x1b\xe1\x1c\xba\xdb^\xbf0\xd4\x1df'\xa7\xce\xdd{:m\xbd\x5c@\xe0\x03\xa0\xdb\xc1\xef\xa3^\xbf\xec\xa6\xa0\xf3Nb\xd7\x10\x08\x0b-\x06\x02\x1f\x00\xfd\xb5\xf0z\xb9\x80\xc0\x07@G\xc52.\xa9\xa2\xd4\x17&W\x80\xc0\x07@\xbfU\x85\xb9CM\x04\x02\x1f\x00=\x15\xd7\xcb\xddM\xec\xf2l\x88\xb5\x0a\x01\x81\x0f`S\xc2^X/\xb7\xa8\xd8M\xef\x1e\x08|\x00\xf4X\x08s\xa9uw\x9f(\xc3\x02\x02\x1f\x00=\x95\xe7\xf9(K\xf7\xde\x85\xd58L\xd4\x00\x81\x0f`)A\xe4\xa8\xdc\xce\xe3p#\xcb\xa3\x0c\x0b\x08|\x00k\x09{{\xe5\xc5\xa3r\xdb.\xb7\xcbX.\x84\xe5\xb4\xf3\xc3\xc4.a\xbd\xdc\xc9\x12\xeew\x22\xcc\x83\xc0\x07pw\x081\xf4>}#\xce\x22ey\xed<M\xb1\x8c\xb0\x97\xdd\xd4\xfa\x0ba\xfeT\x98\x07\x81\x0f\xd8@e\x008\x8ca\xe0u\xef\xc7\xb0@;\xed<\x9e\xd1\xce\xb7\x8e\xdb.\xc3r'\xec\xdd\x12\xfa@\xe0\x0360\x84T\x95\x07\xd9\xd3J\xad\xb5s\xaaw\xaf\xf5\xf5r\xa7\x84\xbd[[B\x1f\x08|\xc0f9\xcc\xd2\x13\x08\xc6\x9ah%\xed|\xb4\x842,\xa9@'\xf4\x81\xc0\x07l\x82\xd8\xeb\x94*\x0frl\xa5\x87V\xdayT^<N\xecrU\xb6s\xb1\x84\xbb\xde+\xb7\x0b\xa1\x0f\x04>`\xb3\xa5z\x9d\xae3+=\xb4eR\xe3yh],\xed\x22\xf4\x81\xc0\x07l\xb8q\xe2\xba#\xb5\xe0\x16\x17\xcb\xb0\xa4\xd6\xcb\x0deXN\x96u\xff\x0dC\x9f\x92- \xf0\x01\x03\x0b\x22!\xec\xcdZ\xda+\xf4\xeeY\xe9\xa1\x1d\x93\x8a\xeb\x97\xde\x8b*\xf4\x81\xc0\x07l\xae\xfd\xc4uz\xf7\xda\x09\xd5U\xeb\xe5>-\xdb\xf9|\x15\xc7\x12\x9f\xcfq\x0c\xf3\xb3\x84\x92-'\x9e9\x10\xf8\x80a\x04\x91Q6{\xb5\x07\xbd{\xed\xb4qU\xb9\x9b\xd6\xcb\xb0\xd4\x08}!\x5c\xeeU\x84\xbe]\xb5\x17A\xe0\x03\x86!\xd5\xbbw\xa2w\xaf\x15\x9d\x5c/7\x86\xbe\xaaa\xe4\x83\xd8;\x09\x08|\xc0@\x03\x9f\xde\xbd\x05\xc5\x19\xaf\x07\x89]B\x19\x96\xb5\xb5s\x5c\xab\xf7I\xc5n_+\x1f\xc7\xbeg\x13\x04>\xa0\x9fa$\x0c5\xce\x9a5z\xb1\xaas\xca\x06\xae*\xcc\x8d\xd7}\x80\xb1\xee\xdf\xb3\x8a\xdd&q\xf8\x1f\x10\xf8\x80\x9e\xd9K}\xc0k\x9e\x85\x03\xf5~\x96.\xc3\xf2\xacC\xc5\xacC\xf0\xac\x9a\xb9{b\xe6.\x08|@\xff$\xcf\xdf\xd3<\x0b\x85\xbd\xaa\xf5r\x83\xce\x9c\x1b\xd7`\xe6\xaea~\x10\xf8\x80\x9e\xd9\x9b\xf1\xf3\x8b%\xac\xe5\xbai\xaa\xca\xb0<\xe9Z\x1b\xc7!\xfc\xa2b\xb7\x83X\xb7\x11\x10\xf8\x80\xae\x8b=P\xb3\x02\xc9\xa9\x16Z\xa8mGY\xba\xf7\xae\xb3\xe5n\xe2\x04\x92\xaa\xf3\xf9\x8e,\xbf\x06\x02\x1f\xd0\x0f\xa9\x0fl\x81o1E\x96.\xc3r\xd8\xf1r7\xe3,=\xb4\x1b\x1e\xdb\xc4\xd3\x0c\x02\x1f\xd0}{\x02_\xfb\xe2z\xb9\xa92,g\xb1\x14Jg\xdd9\x9f/\xc59\x9e \xf0\x01=0\xab\x87\xefB\xb1\xe5\x85T\x0d\xd5\x16}x\x10\xe5k \x04\xbaiC\xbba&\xef\x17c)\x17@\xe0\x03:nVy\x8dKM3\x9f8\x99a;\xb1\xcbq\x87\xca\xb0\xd4\x11\xceC\xbc;\xb4\x1b&\x9a\xec\xa8\xcf\x08\x02\x1f\xd0\x1f\xb3\xea\xc3\xf90\x9f/\xecU\x95aY\xf9z\xb9\x8b\x8a\xb3\x88\xc3c\xd2\xab\x07\x02\x1f\xd0\xd3p\x92\x09|\xad\x0a\xbda\xa9\x89\x1aG},u\x13B\x9e^=X\xae74\x01\xb0Do\x95\xdb(n{\xf12\x94iq\xfe^\xf3\x00\x1d\xda\xeeqb\x97+\xbdc\x80\xc0\x07\xacT\x9c\x94q\xaa%Z3\xa9\xb8\xfeP\x13\x01\xb3\x18\xd2\x05\xe8\xb8X\x86%\xb5^\xeeY\x9c\xf1\x0a \xf0\x01\xf4\xd4\xa4\xe2z\xbd{\x80\xc0\x07\xd0Wy\x9eW\xad\x97{l\xb2\x03 \xf0\x01\xf47\xec\x85\x99\xceEb\x97P\x86E\xef\x1e \xf0\x01\xf4X\x08{\xa92,\x85\x15K\x00\x81\x0f\xa0\xa7\xf2<\x0f\xcb\xd2=J\xec\x12\xca\xb0\x1ci)@\xe0\x03\xe8\xaf\xaa07\xd6D\x80\xc0\x07\xd0Sy\x9e\xefg\xd5eXN\xb5\x14 \xf0\x01\xf4\x97\xde=@\xe0\x03\x18\xaa<\xcf\x8b,]\x86\xe5I\x1f\xd7\xcb\x05\x04>\x00\xb2\xef\xaf\x97\x9b*\xb3\x12\xca\xb0\x98\xa8\x01\x08|\x00=Vd\xe92,\x87\xca\xb0\x00\x02\x1f@O\xc5\xf5r\x0f\x12\xbb\x5c\x94ao\xa2\xa5\x00\x81\x0f\xa0\xbf\x8a\x8a\xeb\xad\xa8\x01\x08|\x00}\x95\xe7\xf98K\x97a9V\x86\x05\x10\xf8\x00\xfa\x1b\xf6\xc2z\xb9\xa9\x89\x18a\xa2F\xa1\xa5\x00\x81\x0f\xa0\xbf\xc2Pmj\xa2\xc6\x912,\x80\xc0\x07\xd0S\xb1\x0c\xcb\xe3\xc4.W\x992,Um\xb8\x13{I\x81\x8474\x01\xc0\xdaT\x859eX>\x19\xee\xf6\xca\x8b\x9d\xb8\x85\xb0|{\xde\xe3[\xe5v\xaa\x85@\xe0\x03\xe8bxy\x98\xd8%\xac\x97{\xa2\xa5>\xe1\xf9\x8c\x9f\xef\x08|\x90fH\x17`=&\x15\xd7+\xc32%\x04\xcf\xf8\xb9!]\x10\xf8\x00\xba%\xcf\xf3\x10\xe6R\xeb\xe5\x862,\xe7Z\xeaS.g\xfc|O\xd3\x80\xc0\x07\xd0\xa5\xb0\x17z\xa3\x8a\xc4.\xa1\x0c\x8b\xde\xbdf\x81\x0f\xa8\xe0\x1c>`\x15!g/\xbb9\xc9>l\xb7\xff\x0e=\x5coo\xe0yj!\xec\xa5\xca\xb0\x14&j4\x0e|\xbb\x9a\x06\x04>`\xfdNf\x84\x9c\xbdx\xdd\xa6\x04\xdf0\xb9\xe0Qb\x97\xab2\xec)\xc3\xd2<\xf0\x01\x15\x0c\xe9\x02\xab\x0a|\xd3\xecoX;T\x85\xb9\xb1\x97\x0a \xf0\x01C\x0b|\xf7c\xaf\xd7\xe0\x95\x8f3\x84\xdb\xd4\xd0\xe3\x99\xf5r+]&\xdawO\xf3\x80\xc0\x07\xacQ<O\xefz\xc6\xd5\xe3\x0di\x06\xbd{\x8b\xbf\x8e.\xb5\x02\x08|@\xb7M65\xe8\xe4y^d\xe92,O\x85\x19@\xe0\x03\x86`V\x0f\xd7V\x19\x88\x06\x1b\xfab\x19\x96T\x99\x95\xd0\xf3Yxy\x00\x02\x1f\xd0{\xb1\x07k\xd6J\x09C\x0e<!\xe8\xa6\xca\xb0X/\x17\x10\xf8\x80\xc1\x85\x9fi\xee\x0f\xb1\x97/N$8H\xecrQ\x86\xbd\x89\x97E\xa3/\x0e\xf9\x8c\xedT\xeb\x80\xc0\x07t\xe3\xc3:L\xde\xb8\x9aqu1\xc0\x87\x5c\xf5\x98\xac\xa8\x01\x08|\xc0 \xcd\x0aA\x83\xea\xe5\x8b\x8f%U\x86\xe5\x99^)@\xe0\x03\x06)\x0ea\xce\xec\xe5\x8b\x93\x1c\xfa\x1e\xf6\xac\x97\x0b\x08|\xc0\xc6\x9b\x15\x86\xee\x0f$\x08\x1df\xe92,G\xca\xb0\x00\x02\x1f0h\x15\xbd|\x8f\xf3<\x1f\xf5\xf5\xb1\xc5c\x7f\x9c\xd8%<n\xeb\xe5\x02\x02\x1f\xb0\x11R=y\x93\x1e?\xae\xaa0W(\xc3\x02\x08|\xc0F\x883vg\xd5\xe5\xdb\xcd\xf3\xbcwC\xbb\xb1\x0c\xcb\xc3\xc4.g\xca\xb0\x00\x02\x1f\xb0i\xc6\x89\xeb\x8a\x1e\x0e\xedV\xf5\xee\x99\xa8\x01\x08|\xc0f\x89\x13\x17\x9e\xcc\xb8:\xacN1\xe9\xcbc\x89=\x92\xdb\x89]\x8e\xcb\xc7{\xeeY\x07\x04>`\x13C_Q^\x5c\xcc\xb8\xba\x17C\xbb\xca\xb0\x00\x02\x1f@\xb5\xfd\x18\x8a\xa6\xf9Z\x19\xa8v:~\xfc!\xec\xa5\xd6\xcb=2Q\x03\x10\xf8\x80\x8d\x16\x87v\xc7\x89]&]=\xf6x\x9e\xe1\xa3\xc4.W\xb1\x17\x13@\xe0\x036>\xf4\x85Y\xbbOg\x5c\xdd\xe5s\xdf\xaa\xc2\xe8\xd8\xb3\x0b\x08|\x00\x1f\x87\xbep\x9e\xdb\xf1\x9d\x1f\x85a\xdew\xca\x9fw24\xe5y\x1e\x86\xa2S\xeb\xe5\x9eY/\x17\xe8\x8274\x01\xd0\xb1\xd07\x8e\x93 F\xe56\xee\xf8\xcc\xd6\xaa2,c\xcf( \xf0\x01L\x0f}\xfb]?\xc62\x94\x16Yz\xbd\xdc\xa7\xd6\xcb\x05\xba\xc2\x90.@\xf3\xb0\x17z SeV\xc2Pt\xa1\xa5\x00\x81\x0f\xa0\xbf\xc2Pn\xaa\x0c\xcb\xa12,\x80\xc0\x07\xd0Sq\xbd\xdc\x83\xc4.\x17\xd6\xcb\x05\x04>\x80~+*\xae\xb7\xa2\x06 \xf0\x01\xf4U\x9e\xe7\xe3,]\x86\xe5\x992,\x80\xc0\x07\xd0\xdf\xb0W\xb5^n\xa0w\x0f\x10\xf8\x00z,\x84\xb9T\x19\x96'\xca\xb0\x00\x02\x1f@O\xc5\xf5rS\xbdwWYu\x11f\x00\x81\x0f\xa0\xc3\xaa\xca\xb0\x14\xca\xb0\x00\x02\x1f@O\xc52,\x0f\x13\xbb\x9c)\xc3\x02\x08|\x00\xfdV5T[h\x22@\xe0\x03\xe8\xa9X\x86e;\xb1\xcb\xb12,\x80\xc0\x07\xd0\xbd\x107)\xb7\xf3r\xdb\xa9\xd8/\x94aI\xf5\xee\x85\xf5r\x95a\x01\x04>\x80\x8e\x85\xbdqv\xb3,Z\xe8\xb5;-\xff\x9f\x0alE\x96\x9e\xa8qd\xa2\x06 \xf0\x01t+\xec\x85\x1e\xbd\xf7\xef\xfc(\x84\xb9\xaf\x95??\x8deW\xee\xee\x1b\xfe\xff(qsWe\xd8+\xb4*\xd0\x17oh\x02`\x03\xc2^\x18\x9e=\x9dquX*\xedE\xb9\xcf\x93\xec\xe3^\xbbI\xc5M\x1e\xcey\x1c\xfb\xf1\xb6/\xcb\xed\xf5\xde\xc1\xf3:?s\xce \xf0\x01L\x17B\xd2V\xc5>\x8fC\x90+C\xd9?\xc9\xd2\xeb\xe5\x862,'\xf3\x1cD\xf8\xbdX\xe6e\xda\xf1\xec\xd6\x0c\x8dS\x8f\xe9\xb5\xff\xbf\x8ca\xb1\xeag\x97V\x07\x01\x81\x0f`(B@\x1b\xd5\x08}\xe1\xfa\x7fP\xb1\xcfx\x91\x03)\x03\xd6\xed\x84\x91pL\xdb-=\xbeia\xf1\xe1\x9c\x012\xac\x1a\xf2z\x08\xac\xd5\xfb\x18~\xe6\xbcF\x10\xf8\x00\xd6\x22\x9co\x17f\xe7f7\xb3n\x1f.pSO\xdb\xe8\x11\x0b\xb7\x11{\xfaN\xb2\x9a={+t?\xfb\xf4\x9a\xc1\xb5\x8fqJ\x80<\x9b\xb2\xdbi\x0c\x87'^\x9d \xf0\x01\xb4\x19\xfaBP\xdb\x8fAk2%\xd4T\xf9\x7f\xe5\xf6\x9f[<\x9e\xd0\x13\xb6\x17\x83\xe8\xc1\x80\x9b~ZX\xac*y\x03\xb4\xcc,]`\xd3\x82\xdfi\xb9\x8d\xca\x7f\x86I\x1a\xd7\x0d~\xf5\x07\xcb\xed\x1f\xc7\x1a~\xf7Z<\x9eqy\xf1\xee\x06=\x05\xa1\xc7o\xcf\xd0/\x08|\x00\xab\x08~Evs^\xdfq\xc3_\xbd\xad\xe1\xb7\xd3\xe2\xb1\x84\xde\xaew\x1a\x06\xd0>\x0a+\x93\x08{ \xf0\x01\xac4\xf4\xbd\x8c=l\xffa\x8e\xd0\xf7\x8d2\xf4\x15m\xf5\xf6\x95\xc71)/\xf6\x06\x1c\xfa\x9e\xc6\xb6\x06\x04>\x80\xd5\x8a\xabo\xfc\x959\x7f=\x94r\x09\x130\x8a\x96B_\x98\xf9\x1az\x0e/\x06\xd6\xcc\xef\x94\x8f\xcd2t \xf0\x01\xac%\xec\x85\xde\xb9E\xc3Z(\xe5\xd2Z\x98\x89\x93K\xf6\xb2\xe9\xb3[\xfb&\xf4V\xbe\x1d{/\x01\x81\x0f`-BPK\xcd\xd6}\x96\xdd\xd4\xa5\xab\xd2jy\x918\xd4\x1cB\xdfq\x8f\xdb6\x84\xbd=\xa5W@\xe0\x03X\x9b\xb8^n\xaag.\x04\xbdq\x9c\xd1\xfbNE\xf0[J\x89\x91\x1e\xcf\xe0\xbd\x88a\xef\xdc+\x0d\x04>\x80u\x0a!-\xb5\xf2Fq;\x9b4\x0cI\xc6\xe0\xf7V\xf6\xe9^\xb7\x8be\x06\x9b\x1e\xce\xe0\x15\xf6@\xe0\x03X\xbfX|9\xb5\xe2\xc6\xd9\xb4\xf3\xceb\x0d\xbfq\xf9\xcf\x07\xd9\xc7u\xfc\x96^@\xb8G3x\x9fej\xec\x81\xc0\x07\xd0\x11U!\xad\xa8\x08`\x97\xa1\x8e_\xb9\xdd[\xd5\x84\x84\x1e\xcc\xe0\x0d5\xf6\xf6\x85=\x10\xf8\x00\xd6.\x96a\xd9\xae\x08.\xa7]<\xf6\x0e\xcf\xe0}W\x8d=\xe86k\xe9\x02\x9b\x14\xf6\xaa\xd6p\x0dC\xa6\x87\x1d<\xeep\xcc\xb7=g\xa1\xa7\xaf(\xb7\xaf\x96\xdb\xdf\xee\xc0\xe1\xbd\xa3\xec\x0a\x08|\x00]\x12\xc2\x5cj\xa2\xc6QG\x87$\xc3P\xeen\xc7\x8e)\x84\xe3\xfd\xae\xf6\x86\x02\x02\x1f\xb0\x81b\x19\x96\xc7\x89]\xae\xe2\xfa\xba]\xb4\xd3\xb1\xe3\xb9\xad\xb1g&.\xf4\x84s\xf8\x80M1\xa9\xb8\xbe\xcbK\x7fmu\xe8X\xc2\xa4\x91\x1da\x0f\xfaE\x0f\x1f0x\xb1\x0cKjH\xf4\xac\xab+B\xc4c\xefR\xd8Sv\x05zH\x0f\x1f\xb0\x09&\x15\xd7w\xb9w\xef^G\x8e\xe3X\xd8\x83\xfe\xd2\xc3\x07\x0cZ\x9e\xe7U\xeb\xe5>\xed\xf8\xf0d\x08X\xcfb\xf0\x0b\xdb\xf6:\xc2\x9e\xb2+ \xf0\x01t5\xec\x85\x80T$v\xb9\xae\xb8~\xed\xe2,\xd8\xd3\x8a\xc7\xf8\xbd%\x1e\x82\xb2+0\x00\x86t\x81!\xab\xbd^n\x8f-s\x06\xaf\xb0\x07\x03\xa1\x87\x0f\x18\xa4<\xcfC\x10:H\xec\x12\xca\xb0\x1c\x0d\xe0\xa1.#\xf0)\xbb\x02\x03\xa3\x87\x0f\x18\xaa\xaa07\x1e\xc8\xe3l;\xf0\x09{ \xf0\x01t_\x9e\xe7\xfbY\xba\x0c\xcb\xb3\x01\xad\x10\xd1f\xe0\x0beWF\xc2\x1e\x08|\x00]\x0f{U\xeb\xe5\x06\x87\x03z\xc8m\xcd\xdaUc\x0f\x04>\x80\xde\xa8*\xc3\xf2\xa4\x0c5\x97\x03\x09\xb7{-\xddT(\xbb\xb2#\xec\x81\xc0\x07\xd0\x87\x004\xca\xd2\xbdw\xe1\xfc\xb4\xa3\x01=\xe46\x86s\x9f\xaa\xb1\x07\xc3g\x96.0$E\x96.\xc3r8\xb0^\xac\xd1\x82\xbf\xaf\xec\x0al\x08=|\xc0 \xc4\xe1\xcdT\x19\x96\xb3\x01\x86\x9by{\xf8\xae\x85=\xd8,z\xf8\x80\xa1\xa8\x1a\xaa-\x06\xf8\x98w\xe7\x0c{\xca\xae\xc0\x86\xd1\xc3\x07\xf4^\x9e\xe7\xe3,=[\xf5x@eXn\x1f\xf3<\xbd{W\xc2\x1el&=|@\xdf\x83OU\x19\x96\xce\xaf\x97;\xa7\xa6\x81O\xd9\x15\xd8`z\xf8\x80\xbe\x0b\xb3rS\x135\x8e\x86R\x86\xe55\xa3\x06\xfb>\x13\xf6`\xb3\xe9\xe1\x03z+\x96ay\x9c\xd8%\xac\x97[\x0c\xf4\xe1\xef\xd5\xdc\xefX\xd9\x15@\x0f\x1f\xd0g\x93\x8a\xeb\x0f\x07\xfc\xd8\xeb\x0c\xe9>\x11\xf6\x80@\x0f\x1f\xd0K\xb1\x0cKj\x96j(\xc3r2\xd0\xc7>\xca\xd2\xc3\xd8\x81\xb2+\x80\xc0\x07\xf4^U\x98\xd9\xd4\xde\xbd0Ie<\xd4\xb0\x0b\x08|\xc0\x86\xc8\xf3\xbcj\xbd\xdc\xa7\x03/=\xb2\x93\x08{\xca\xae\x00\x9f\xe2\x1c>\xa0oa/\x94a)\x12\xbb\x0c\xb5\x0c\xcb]{S~v!\xec\x01\xb3\xe8\xe1\x03\xfa&\x84\xb9\xd4\xf9k\xc5\x06\x94\x1f\x19\xcd\x08{\xca\xae\x00S\xe9\xe1\x03z#\xae.\xf1(\xb1K(\xc3r4\xf06\x08=\x9cw\x87\xb3\x8f\x85=\xa0\x8a\x1e>\xa0O\xaa\xc2\xdcx\x03\xda\xe0\xee\xf9{j\xec\x01\xb5\xe8\xe1\x03z!\xcf\xf3\xfd\xac\xba\x0c\xcb\xe9\x064\xc5^\xbc|W\xd8\x03\xea\xd2\xc3\x07\xf4\x85\xde\xbd\x1b\xa1\x87O\x8d=@\xe0\x03\x86%\xcf\xf3\x22K\x97ay2\xd0\xf5r\xa79\xdc\xa0\xc7\x0a\xb4\xc4\x90.\xd0\xf5\xb07\xca\xd2E\x94C\x19\x96\xa3Mi\x0fa\x0f\x10\xf8\x80!*\xb2t\x19\x96C3T\x01\x04>\xa0\xa7\xe2z\xb9\x07\x89].\x9c\xcb\x06 \xf0\x01\xfdVT\x5c\x7f\xa8\x89\x00\x04>\xa0\xa7\xf2<\x1fg\xe92,\xc7\x1bR\x86\x05@\xe0\x03\x06\x19\xf6\xc2j\x12\xa9\x89\x18\x9b\xb0^.\x80\xc0\x07\x0cZ\x18\xaaMM\xd482[\x15@\xe0\x03z*\x96ay\x9c\xd8\xe5*\xdb\xa02,\x00\x02\x1f0DUaN\x19\x16\x00\x81\x0f\xe8\xabX\x86\xe5ab\x97\xb0^\xee\x89\x96\x02\x10\xf8\x80\xfe\x9aT\x5c\xaf\x0c\x0b\x80\xc0\x07\xf4U\x9e\xe7!\xcc\xa5\xd6\xcb\x0deX\xce\xb5\x14\x80\xc0\x07\xf43\xec\x852,Eb\x97P\x86E\xef\x1e\x80\xc0\x07\xf4X\x08{\xa92,\x85\x89\x1a\x00\x02\x1f\xd0Sy\x9e\xef\x94\x17\x8f\x12\xbb\x5c\x95aO\x19\x16\x00\x81\x0f\xe8\xb1\xaa07\xd6D\x00\x02\x1f\xd0Sy\x9e\xefg\xe9\xf5r\xcf\xac\x97\x0b \xf0\x01\xfd\xa6w\x0f@\xe0\x03\x86*\xcf\xf3\x22K\x97ayb\xbd\x5c\x00\x81\x0f\xe8o\xd8\x1be\xe92+\xa1\x0c\x8b\x89\x1a\x00\x02\x1f\xd0cE\x96.\xc3b\xbd\x5c\x00\x81\x0f\xe8\xab\xb8^\xeeAb\x97\x8b2\xecM\xb4\x14\x80\xc0\x07\xf4WQq\xbd\x155\x00\x04>\xa0\xaf\xf2<\x1fg\xe92,\xcf\x94a\x01\x10\xf8\x80\xfe\x86=\xeb\xe5\x02\x08|\xc0\xc0\x850\x97*\xc3r\xa4\x0c\x0b\x80\xc0\x07\xf4T,\xc3\xf28\xb1\xcbU\xa6\x0c\x0b\x80\xc0\x07\xf4ZU\x98+\x94a\x01\x10\xf8\x80\x9e\x8aeX\x1e&v9S\x86\x05@\xe0\x03\xfa\xad\xaawo\x90\x135\xca\xa0\xbb_n\xafjn\xe3.\x04\xf3\x06\xc7kr\x0d}\xfd\xbb\x1c\x97\xdb\xf9k\xaf\xe7\x93\xf8\xc5T\xe0\x03\x98\xf3\xcd5\x04\x83\xed\xc4.\xc7\xaf^\xbd:\x1f\xe2c/\x1f\xd7IyqVs\xf7\xa2\x03\x87\x5c\xf7\x18Bal\xe7[\xd2\xc7\xf7\xa3Iy\xf1\xfe\x94\xf7\xa40\x02\xf1<\xae\xef-\xf0\x014|sU\x86\xa5~\x88\xba\xbf\xce^\xbe\xd8\xbb\xb1[sw\xbd{\xf4\xf1\xfd(\xfc-\x1eT\xec\xf6x\xc8=}\x02\x1f\xb0\xcc\xb0\x93Z/\xf7h\xe8\x135b\x11\xe9g-\x87\xc3e\x98\xd4\xdcOal\xfa\xea\xb0\x07\x7f\x87\x02\x1f\xd0\xbbo\xd3\xa3\xf2\xe2Qb\x97\xab28\x14\x1b\xd2\x1cu?h\xd6\xd2\xcb\x17\xef\xf3~\x8d]\x15\xc6\xa6\xaf\xefG{\x15_>\xef\xda\x1dj;\x08|\xc02L*\xae\x1foJC\xc4b\xd2Ok\xee\xbe\x8e\x10\x5c\xf7>\x15\xc6\x06\x81\x0f\xe0\xfb\xdf\xa6\xf7+\xbe%\x9fm\xe0\xb0`\x08U\xd75\xf6[i/_\x83\xde\xbdM\xea\x91\x05\x81\x0f\xa0\x86\xaa\x19\x9c\xe3Mk\x90x\xaeb\xdd\x99\xadE\x9c\xf0\xb2\xaa Z\x87\xa1\x5c\xfa\xec\xb2\xc1\xbe\xd7Cm\x04\x81\x0fhM\x9c\x09\x97\xea1z\xba\xc1\xc3\x82!\xf0]\xd5\xd8\xef\xfe*\x02V,\x99S\xa7w\xef,\x96\x98\x81\xbe~\xe1\x0a\xef9g\x0d\xfeN\x05>\x80D\x80\xb8W\x11T\xc27\xe7b\x83?t^6x\xfc\x87\xcb\xec\xe5\xabQ2\xe7\xae\xb1W7\x030\xce\xaa{\xef.\x04>\x80z\xdf\x8cS3\xe1\x0e7}\xbd\xdc\xb8\x84\xdcE\x8d]\xb7\xb2\xe5\xf6\xf2\x1df\xf5f->5Q\x83\x81\xfc\xed\x85\xd7\xf1^6\xbb\xa7\xef8\x5c?\xe4\xf7\xa87\xbc\x0c\x80E\xc5\xb2\x07\xa9\xa2\xa6\x17\xd6\xcb\xfdD\xd8z^g\xbf\xb2][\xafUX\xa3'\xf6\xd6F\xf7\xc82\xc8\xd0\x17V\xf5\x09K\x08\xee\x94\x97a\x1b\x95\xdbi\xb9\x9do\xc2\x97Q\x81\x0fhCU0p\xd2\xff\xc7\x1f:\xa7\xe5\x07N\xe8e\xa8\xaa\xf7u\xdb\xcb\xd7v\xe8\xaa\xdb\xbb\xb7\xf1=\xb2\x0c:\xf8\x9do\xda\xe36\xa4\x0b,$\x96\xf6H\x85\x17\xab3|\xda\xb8n8k\xf3\x5c\xbe\x06\xbd{gzdA\xe0\x03\xb8\x1b \x8a\xc4.Vg\x98\x22\x9eOt\x5cc\xd7\xb6\xcf\xe5\xab:\xcf\xf2V\xe1Y\x02\x81\x0f\xe0VUi\x0f\xab3\xa4CU\x9d\x9a_\xad\xf4\xf2\xc5\xe5\xee\x0ej\xecz\xacG\x16\x04>\x80\xbb\x01\xe2qb\x97Ps\xeeHKM\x17\x83p\x9d\xf6i\xab\x97\xaf\xa8\xb1\x8f\x1eY\x10\xf8\x00>\xa1*\xac\x14N\xfa\xaf\xd5\x86u{\xf9F\x0b\x86\xf3:\xbd{G\x9e3\x10\xf8\x00n\x03\xc4^y\xf10\xb1\x8b\x93\xfek\x88\xe1\xaaN\x8fZ\xe8\xe5+\x16\xb8\xab:\xbfk\xbd\x5cX\xcf\xfb\xe9(\xbc\xa7\xc6\xf7\xd5\xa5Q\x96\x05\x98Ge\xef\x9e&\xaa\x1d\xfa&5\x96\xa4\x0b\x0e\xc2~M\xcf\x89\xacQ#\xf1\xd6\xd8\xb3\xb1\x96\x0f\xfbp~fx\x8enk\xc3\x85\xff\xa7f\xbd\x87\x1e\xe1\xf3;\xdb\xa9\xf3d{\xf5\x5c\xef\xc7\xe7{4\xedy.\xf7\x99\xf5<\x9f,\xda\xfb.\xf0\x01M\xdf\xb4B0\xd8N\xec\xe2\xa4\xff\xe6B/\xdf\x075\xf6+\xe6\x08fu\xc2\xb7\xd29\xab\xfd\x1b\x1a\xc5\xe7q\xbf\xe2oi\x9a\xad\x18\x14v\xef\xdc^\xa8\xeb8i\xa3W=~Ax^s\xf7\xb7\x97\xb5\xcer\x0cG\x97Y\xbdY\xe5\x95\xc7Q\xf7q\x95\xb7\x93/\xe1\xb1\xec\xc5\xe7\xfb`\x81\xe7\xf9\xfd\xf2vn\x97~\x9b+\xfc\x19\xd2\x05\x9a\xbe\x09\x1fU\xf4>8\xe9\xbf\xa1\xf8aUgq\xf7\x83&\xe7\xf2\xc5\x0f\x9a\xdd\x1a\xbbz\xceV\xf4e\xa9\xdcBo\xcd\x8b\xecf\xc2\xd3vK7\xbd\x1b\x03\xc1\xe5\xa2\xc3\x821\xf8?\xad\xb9\xfbd\x89k>O\xb2\xfa\xcb\xff\x9dt\xf4\xf9\x0eC\xb5\xa71h\x1e\xb4p\x93\xe1\xf5\xf2~\x08\xc2\xa1\xb7\xbfi\xdb\x0b|@\x13E\xc5\x9b\xb0\x93\xfe\x17k\xdb6\xf7\xab\xbb\xef\x13C\x82+\x09z\x97\xf1\xc3z{\x89w\x15N\x0bx\x1e\x96\xe4[0\xf4\x85/\x00u\xd7|>YB{\x85\x9e\xcf\x875v\xbd\xc8:z\xfaH\x1c\x099\xaf\xf9\x85\xab\xa9\xad\xf8\x85aG\xe0\x03\x96\xf2m\xb5\xbcx\x94\xd8\xc5I\xff\x8b}\xc8\x86\x9e\x80:\xc5\x98k\xf5\xf2\xd5\xec\xddS:g\xf9\x7f7G1\xe8\xdd_\xe1\xdd>\x0a=K\x0b\xf6\xbe\x8dk\xee\xb7[\xde\xcfa\x8b\xed\x15\x8eyR\xf7\x18\xbb\xf8\x05\xf3\xces\xbe\xb5\xc4\xbb\xb9nz\x1a\x86\xc0\x07\xd4U\xf5&lXpqE\x8b\xfb\xd5\xdaG\x8f\xec\xd2\xadk\xcd\xd6\xdd\x06\xc1i\xda\x17\x90p\xdc\xef\xd6}=.R6h\xca\xfbL\x9d\xa0\xf4n<\xc6\xae\x85\xbd\xa2\xe2\x8bq[\x1a\xf7\xac\x0a|@\x9d7\xb1\xbd,\xdd[t\xd6\xd5\xf3h\xfa$\x0e\xad>\xa9\xb1k\xb2\x97\xaff\xef\x9e\xd29\xab\xb1\xc8\xdf\xc5\xc5\x82\xf7\xfd0\x06\x90y_\x8f\xa1\xa7\xaa\xce\xb9\xa5[\x8b\x84\xcb;\xaf\xdb\xbaC\xb9g\xf1\xd8\xba\xf6>\x19\x8e\xffqW_W\x02\x1fP\xf7[w\xcaX\x13\xb5\xa6n1\xe6b\xce\xebn\xe9\x91]M\x88\x0f=\xa8\xa9\xa1\xfa\xf0\x5c?\xcbnz\xd3\xde*\xb7\xcf\x86\x99\xa2q\xdb\xb9\xfdw\xf9\xf3\x07\xe5\xf6v\xbc\xad\xeb\x06\x87\xf0\xb8\x0c\x22;\x0b<\x84q\xcd\xfb[hh\xb7\xc1P\xeeu\x17\xdfojLh\xfbTh\xbd}\xce\xef<\xdf\xb7\xcf\xf3[\xf1\xbaY\xcf\xf5\xf5<_\xb0\x05>\xa0\xea\x8d,\x84\x87\xd4\xf9GO\x9d\xf4\xdfz@\xa8\xf3\xc11\xb5\x97\xaff\xef\xdeq\x17\x87\xc3\x06\xecdJh\x09\x1f\xe6_,\x9f\x87{\xe5\xb6\x1fz\xac\xc29Y\xb3\x86\xd8\xc3\xdfX\xf8\x90/\xb7\x10v\xc2\xf3\xfe\xa4\xc1\xfd\x1f-\xf0z\xbcl\x10\xb0\x16\x19\xda\x0da\xaf\xceP\xee\xb8\xa3\xef7U\xeb\x8a\xdf\xba\x88!o\xef\xf69\x9f\xf2<\x9f\xc6\xeb\xc2c\xbdw'\xe8\xd7\xfd\x02.\xf0\x01s}k=\xac\xf8\xb6]h\xa9\xd6C_h\xd3\xab:\x1f~\xd3>t+~G\xe9\x9c\xd5?\x9f'\xb1\xdd\xafcP\x1b\xc5\x0f\xf3\xf39o\xefe|\x8d\xbc\x95\xd5\xef}\xdbY\xf0\xf8\x9f\xd5\xd8u\xae\xa1\xdd\x06C\xb9\xc7\x1d>u\xa4N(\x0eao\xaf\xe9d\x8b;A\xffA|\x1e\x04>\xa0uG\x15\xdf\xba\x9d\xf4\xbf<u\x82\xf4\xe1\xdd\x99\x985{\xf7<g\xeb\x0b\x04!\xe8\xb5\xd6\xfe18\x8ck\xee~\xd8\xc2\xf1\xb7>\xb4\xdb`(\xf7\xaa\xab_T\xe2\xdf]U\xef\xdeu\x0c{/\x17x\xbe/co\xf0\x5c_\x14\x04>`\xd6\x9bX\xe8\x11H\x15\x0b\xbd\xe8\xe2\x89\xd3C\x11'TT\x9d0\xbf\xf5\xda\x87`UH\xf4\x9c\xad\xef\xf9<YF\xd0\x8e=^u\xca\xf9\xec/x?/\x1b\xdcF\x93\xa1\xddIVo(w\xbf\xc3_T\xf6\xea<\xceu\x1f\xbf\xc0\x07\xccR\x15\x0c\x0c\x0b._Qc\x9f\xc3\x18\xd0\xc3\x07\xec\xae\xe7\xcc\xebd\xd6\x97\x83\x15\xae\xc2Qkh7\x1eO\x9d\xa1\xdc'\x1d?\xe7\xb4N\xbb\xae}(Z\xe0\x03\xa6\xbd\x11\x8f+\xc2\x83\xb5WW \xb6qe/_|\xbe\xaa>\xf4=g\xc3}\x9d\x5cf\xf5\xce\xb1\xdbk\xe1\xee\xc2\xeb\xacN\xb9\x98\xe4\xd0n\x83\xa1\xdc\xb3\x81\x14t_{`\x15\xf8\x80io\xc4Uo\xb0z\x8aVg\x5c\xf3C85\xfcn\xa2\xc6\xf0\xd5\x09\xf3;\x8b\xdeI\x1c\x96\x1c\xd7\xdc=\xb5\xdek\x9dY\xad\x9d,\xc12\xa7\x9du\x1f\x80\xc0\x074}#\xb6\xf6\xea\x0a\xc5\xb6\xae:G\xab\xea\x83\xf3\xc8s6xuz\x90\xee\xb5\xf4\x9a\x0c\xf7U\xa7,\xcc\xd4\xa1\xddx~p\x9d\x02\xc5\xe3\x01\xbdnG\x02\x1f\xd0\x19\xf1<\xb0TO\x90\xb5W\xd7\x17\xc2\xaf\xe7\xfc]\xcf\xd9f\xa83!`\xb7\xad;\x8b\xc3\xacuV\xe1x8\xe5\xdc\xc1:\xaf\xc7\xe3\x81\xad\xdeS\x08|@\x97(\xc3\xd2A\x0d\x8a1O\x0d\x8b\x9e\xb3\x8dx\x8d\xac\xe3\x1c\xb1q\xcd/\x22\x93;_*\xc75\x82ggK\xb0\xccP\xa7\xed\xef\x97\x8f}\x22\xf0\x01kWc\xc6\x9c\xb5W\xd7\x1f\xc6\x9b\xf6\xf2Y\xe3\x98e\x86\xcc\xcb\x9a\xc1,\x84\x9d\xa2\xc1\xf2c\xfb=\xfb\x92rZs\xbf\xb0:\xce$q^\xe3R\xbd\xe1%\x0b\xdc\x09\x14)\x85&Z\xeb\x87\xeb\xcb8\xeb\xf1\xfd\x06\xbf6\xee\xf0\x17\x8c\xd3\x15\xdc\xcdy\xd9n\xbd\x9c\xac\x12C\xc1N\xdc\xc2\xbf\xf7\xe2U\xa3\xac\xde\x12^\xabz]Nj\xae\x94q\x18\x8f\xbd\xaa\xe6\xde\x93\xbe-\xfb\x17\xbeT\x95mpU\xf3y\x09\x93\xab\xf6B\x00^\xf5\x17h\x81\x0f\xb8\x1df\xd9N\xecr\xac\xa4Gg>\x5c\x8b\x9a\x1f,]_\xe3x\xd73\xfa\xa9\x80\xb7\x1f\x83\xdd^\x97B]\xcd/\x16\x97\x15an+K\xcf$\x0f\xfa\x5c\x82\xa5h\xf0e,<\xb7\xef\xc7\xbf\xe5\xb0\x9d\xac\xa2G\xd3\x90.\xf8\xa0\xa9\x1afQ\xd2\xa3[\xea>\x17\x85\xa6\xea\xc7\x97\xadr\x0b\xc3\xee\xdf\x8b\x81\xe1\xa0ga\xaf\xe9*\x1c\xa9\xf7\x99q\x9f\xbf\x8ce\xf5&\xb1|*\xf8\x85\xb0\x1c\x87z\x97Z\xbaE\xe0\x03\x8a\x8ao\xe6GN\xfa\xef\x94Z\xcf\x85\xe7\xac\xdb_\xb2\xe29m/\xe3\x07\xfe\xc3\xbe?\xa6\x06\xabp\xcc\xfc\x223\x80\x12,!\xf4^\xcc\xf1{\xb7\xbd\x9f\xdf(_\x13\xe7\xf1K@\xeb\xe7\xf9\x09|\xb0\xd9\x1f<\xa3\xf2\xe2Qb\x97\xab\x81T\xb9\x87\xae\xfc\xcd\x85\x1e\xda\x10l\x1eg\xf5\xd6\x90\xed\xdb\x97\xc7y\x02\xcf\xb3!L\x08\x8b_\xb2\xf6\xe6l\x83[\xdb\xd9\xc7\xbd~E\x9b\xc1O\xe0\x83\xcdV\xf5&k(\x17\xda\x09z\xf7\xe2D\x95\xaf\x0d0\xe8\xdd\x0d<\xe3\x86\xbfv\x95\x0dg5\x8d\xbb\xa1\xefl\xc1\x9b\xda\x8a_\x0a.\xe3\xb9~\x0b3i\x036\xf7\x03(\xbc)\xa5N\x9cW\xd2\x03\xda\xf9[\x0b\xe7f\x9d.\x18\xf4B\xafQ\x08\x13\xe7\xd9'\x87\xf5O\xef\xfc;\x9c\x8b\xbb\xbd\xe6\xc0\x13\x86$\xc3\xca0\x075\x7f\xe5|h\xa7\x1f\xdc\x86\xbe\x18\xd4\x1e/xs\x1f\x05\xbf8\x13z\xbc\xc8\x0cf\x81\x0f6\xd7\xa4\xe2z\xbd{,\xf3C1\x17\xf6\x92B\x0f\xd1I\x0cD\xa75\xef\xebeG\x1e\xefA\x83_\x09+q\xec\x0f\xf1\xcbe8\x1d&\x16[\x0eA|\xd1\xf34C\x90?\x0d\xa7\x04\xcc;\xfc-\xf0\xc1f\xf68T\xad\x97\xfb\xb4o\xb5\xb0\xa0\x83\x7fg\xf7\x1a\x86\xbd\xeb\x18\x0e&}\x9c\xc0\x10\x1f\xef<\xc1\xed\xa3\x19\xaaC\x5c\xef9>\xa6\xfd8\xa22n\x18\x86_\x17^G\xa1\x9cK6O\xe8s\x0e\x1fl\xe6\x87PQ\xf1\xa1Sh)X\xd8I\x83\xb0\x17f\xb8\x8eB\xafP\x8f\x83\xcfQ6_I\x99\xd0F\x93!\xbf\x10B/m\xb9\x85\xc0\xf7\xa0\xdc\x9ed7\xe7.\xce\xeb\xfd8\xc4+\xf0\x01\x95o\xca\xd6\xcb\x85\xe5~\xb1\x0a\x1f\xeeu\x8aK\x87/Xo\x85\x15A\xfa\xfcw\x17\x03\xc8\x22\xbdW\xbbq\xe4a\xd0B\x98\x8f\xa1~T\xfe\xf7\x8b\xe5v\x9c5_21\x98\xc4*\x0b\x02\x1f0\xf5M\xb9\xea\xfc\x9aP\x86\xe5HK\xc1\xc2\x8a\x9a\xfb\xed\xf5}\x15\x9b8j0i\xa3\xcd\x9a\x86\x98\x9e\x87\xbf\xf3\xd8\xeb\x17\x1e\xf3\x93\x86\xc1o+k8\x12#\xf0\xc1f\xa9\x0ascM\x04\x0b\x07\xa0\xd0\xdbUgh\xf3\xc9@\xce\x95\xad3t\xfd\xacf\x88\xd9\xb8\xca\x00\xa1g7\xd6;\x0d\xc1\xef\xb8\xc1\xaf\x1e4\x09\xc8\x02\x1fl\xd6\x87Pj\x88\xe9\x99\xf5r\xa1\x15u\xce\xaf\xba\x1eBQ\xf38\x0c[5t\x1dF\x0e\xf6k\x86\x99\xed\xb6\xea\xce\xf54\xf8\x85/\xdd\xef\xb4\xfcZ\x13\xf8`\x83\xc2^\xd5z\xb9\x812,\xd0\x8e\xbd\x1a\xfbL\x06\xf0\xbe\x12N\x11\xa9\x13\xce\xc6w\xdec\xea\x0c[>^\xf6\xba\xb2\x1d\x0f~\x93\x06\xa1O\xe0\x03>\x15\xe6RCLO\x86X\x12\x01\xd6\xf4\xe5\xaa\xcep\xee\xe9\x12\xee~w\xc5\x0f7\x04\x93\xca\xa1\xdc\xdb\x91\x838)\xa5hp\xdb\x1b+\x86\xbe:\xc3\xe0\xb5\x83\xb1\xc0\x07\xc3\xff\x00\x1ae\xe9\xde\xbb\xdb\xda_\xc0\xe2\xea~\x00_\xb6\xfcw\xbe\xd2\x1e\xb1\xf2\xfe\xea\xac\xeaq\xfd\xfa{O\x9c\x14Vg\xad\xd9\xedx\x1f\x9b\xacN8\xae]\xd0[\xe0\x83\xcdx\xd3H\xbd)\x1c*\xc3\x02\xab\xb5\x84\xc9\x1a\xa3\x15\x86\xbd\xbd\xf2\xe2Q\x9d\xf7\x9e\x19#\x07\xe3\x9aw\xf5(\xde\x97\xd7H\x0b\x04>\x18\xb0\xf8f\x99*\xc3r6\xef2=\xc0B\x7f\x9bm\x07\xb4\xfd\x15\x1dw\xdd\x12,\x17\xb3J<\xc5 \xf3\xb4\xe6]N\xe2}n\xaa\xab\xb6nH\xe0\x83a\xab\x1a\x12)4\x11\xacEk\x81/\x86\xc7\x83\x15\x1dw\x08{u\xceQ\x1c\xd7x\xef\xa9\x13f\xeeo\xf8\xfbTkaW\xe0\x83\xe1\xf6 \x847\xdc\xd496\xc7\xca\xb0\xc0\xda\xb4\xd9#7Y\xd1{J8\xe6\x875v\xad\xac/\x18O#\x19\xd7\xbc\xeb\x8d\x1c\xda\x8d=\x9b[m\xdd\x9e\xc0\x07\xc3}\xa3H\xf5\xeeY/\x17\x96\xa3\xeeyW\xfb-\xfd\xad\x87\xbf\xe3\xdd\x86\xef\x0d\xf3\xdc\xcf\xa8f\xb0\xbc\xcajN\x02\x8b_8\xeb\x0e\xed\x9el\xe0\xd0n\x9d\xd7\xc8\x99\xc0\x07\x9b\xed\xb0\xe2\x9b\xe1\x912,\xd0\xbe\xd8sUk\xa8r\xd1\xb5cc/\xfe\xe3\x86\xbf6\xefl\xdeIV\xaf\xb7i\xdcp\x12XQ\xb3\xbd\xb6\xb2\x8e\x96j\x09A\xb4\xed\x1e\xc8\x18n\xeb|)?\x15\xf8`C\xc5o\xe2\xa9\x0f\x81\xab!T\xf8\x87\x0e\xab\xbb<X1o9\x95\xd8\xb3\xf7\xfe\x8a\xdeS\xea\xac\xa6\x11<mz\x9aH\xc3\xa1\xdd\x87qX\xb9k\xc21=/\x8f\xed\xb4\x8d\xe0\x17\xc3^h\xc7:\xe7J\xd6\x0e\xc1\x02\x1f\x0cO\xd5\x1b\x80\x155`\xb9\xea\xd6\x8f\x0b\xbdV\xa7MBL\x08\x14\xe5v\x9e5\xef\xd9\x9b7|\x84@\xfa\xb5\x1a\xbb\xce}\x9aH\xc3\xa1\xdd.\xce\xda\xbd}\xfev\xef\x04\xbf\xf1<\xc7\x19\x03ch\x8f\xed\x1a\xbb\x9f5\x19\xa9\x11\xf8`@\xe2\x9b\xc5n\xc5\x1b\xc4\x89\x96\x82\xe5\x89\x1f\xc2u\xcf\xad\x0a\xa1\xef\x83TH\x08\xa1+\xf4\xb2\xc5\xa0\xf7<\x11\x06\xae\xdb|\x1c\x0dJ\xb0\x04\xe3\x05\xeby\x86\xb0Xwh\xb73\xefa\xb1\x8d^\x9f\xc8\x12\xde\x83C\xef\xeb\xf7\xe2\xf3Z\xc4\xa0~oV\xa8\xae\xf9\xfc.\xf4\xe5\xfd\x0d\x7f\x9a0(Uo\xcez\xf7`5\xc6\xe5\xf6\xa2\xc1\xfe\xbb\xb7A\xa1\xfc\xe0\xbf\x0doMfh^\xc7\xfb\xfc\xa0\xc5\xc7P\xd4\x0c\x1f\xcf\x16\xfd\x22\x19\xc2b<'\xf1y\x9d\xb6\x0a\x01iV\x9d\xbf\x15\xdb\xaf\xf9\xbc>\x8e\xe1\xee\xf6\xe7!\xdc\xde_\xe0~\x9f4-\xcc\xac\x87\x0f\x06\x22\x9eg\x93z\x03y\xba\x84\xea\xfe\xc0\xf4\x00sY^\xbc\xbb\xc0M4\x0d{{5CW\xads\x06\x1b\xac\xa6q\x1b4\xdbh\xb3\xd3\xac\xfe\xd0n\xb1\x84\xe2\xd5\xcb\x08|\xb3,\x12\xf6\x8e\xe79\x0f[\xe0\x83a\x84\xbd\xaa\x19]\xca\xb0\xc0\xeaC_\xe8\x81:^\xf2\xdd\xdc\x86\xbd\xba_\xe6*\xcf+\x8b\xef'u{\xec\xc6-/\xcdXd\xf5\x86\xa6\xd7>kw\xc6p\xee\xb2\x85\xb07W\xc0\x16\xf8`\x18\x8e*z\x04\x0a\xeb\xe5\xc2ZB_\xf8p~\xba\xa4\x9b\x0f\xe7\x09\x8e^\x0b{U\xe7\x0e\xd6\x99H0\xc9\xea\xf50\xb6~Np\xc3Y\xbb\xbbq\xb6\xf2\xba\x8cV|\x7f\xef\xce\x1b\xf6\x04>\x18\x808\x8b.\xb5\xac\xd2UG\xceu\x81M\x0d}\xe1t\x8b\xb7\xb3\xf6&U\x84\xf3\xbf\xde)owo\x8e/r;\x15\xef'!P\xd4\xe9\xb5jm(wJ{\x85\x10\xf9\xac\xe6\xee\x8f\xe7-m\xd3\xc2q\x86\xa0\xfd [~/n\xb8\xfd\x07\x8b\xbe\x8f\x0b|\xd0\x7fUo\x02cM\x04k\x0f}!\xc4\x8c\xca\xed\xc9\x02\xc1\xef\x22\x06\xbd\xd0\xab7\x99\xb1O\xd5\xd0\xee(\x11\xf6FY\xfd\x922\xc5\x92\x8b\xb7\x8f\x1b\xb4\xd3d\x8d\xcf\xebe\xecu\xfblvs\xce\xe6YK7\x1d\x1e\xfb\xd3\x18\xf4\xc6m\xb4u^\xde\x88\xbfD\x88\xb6\xbe\xf2\xa5\xd3\xac\xc12E\xd1\xd9\xf5{\x1f\xee\xad\xe3xc\xfd\xae\xd4\xac\xbc0\xe4\xb2\xe7\x99\x85n\x89\x7f\xbb\xe1os'\xf1\x9es\x15\x03\x5cx_:\xb1:No\x9e\xdb{\xf1y\xbd}~\xef\xd5\xf8\x5c\x09A\xf1e|\xaeO\x971\xc1NY\x16\xe87\xbd{\xd0C\xb1\xc7OM\xcca>\xb7\xdf\x0fn]:.C\xba\xd0\xdfo\x91E\x96\x9e\xda\xffD\x8f\x00\x00\x02\x1f\xf47\xec\x8d\xb2t\x11\xe5p\xfe\x87\x89\x1a\x00\x08|\xd0cE\x96.\x9bp\xa8\x0c\x0b\x00\x02\x1f\xf4T\xac\x80\x9f*\xc3r\x91\x98\xc1\x07\x80\xc0\x07\xf4@Qq\xbd\xf5r\x01\x10\xf8\xa0\xafbQ\xd4\xd4\xf4\xfe\xe3\xb8\x1e%\x00\x08|\xd0\xc3\xb0\x17j9\xa5&bX/\x17\x00\x81\x0fz.\x0c\xd5\xa6&j\x1c)\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<N\xec\x12*\xf2+\xc3\x02\x80\xc0\x07=V\x15\xe6\x94a\x01@\xe0\x83\xbe\x8aeX\x1e&v9\x8b\xcb4\x01\x80\xc0\x07=5\xa9\xb8^\x19\x16\x00\x04>\xe8\xab<\xcfC\x98K\xad\x97\x1b\xca\xb0\x9ck)\x00\x04>\xe8g\xd8\x0beX\x8a\xc4.\xa1\x0c\x8b\xde=\x00\x04>\xe8\xb1\x10\xf6ReX\x0a\x135\x00\x10\xf8\xa0\xa7\xf2<\xdf)/\x1e%v\xb9*\xc3\x9e2,\x00\x08|\xd0cUan\xac\x89\x00\x10\xf8\xa0\xa7\xf2<\xdf\xcf\xd2\xeb\xe5\x9eY/\x17\x00\x81\x0f\xfaM\xef\x1e\x00\x02\x1f\x0cU\x9e\xe7E\x96.\xc3\xf2\xc4z\xb9\x00\x08|\xd0\xdf\xb07\xca\xd2eVB\x19\x16\x135\x00\x10\xf8\xa0\xc7\x8a,]\x86\xc5z\xb9\x00\x08|\xd0Wq\xbd\xdc\x83\xc4.\x17e\xd8\x9bh)\x00\x04>\xe8\xaf\xa2\xe2z+j\x00 \xf0A_\xe5y>\xce\xd2eX\x8e\x95a\x01@\xe0\x83\xfe\x86\xbd:\xeb\xe5\x16Z\x0a\x00\x81\x0f\xfa+\x0c\xd5\xa6\xca\xb0\x1c)\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<N\xecr\x95)\xc3\x02\x80\xc0\x07\xbdV\x15\xe6\x0aeX\x00\x10\xf8\xa0\xa7b\x19\x96\x87\x89]\xce\x94a\x01@\xe0\x83~\xab\xea\xddS\x86\x05\x00\x81\x0f\xfa*\xcf\xf3\x10\xe6\xb6\x13\xbb\x842,\xe7Z\x0a\x00\x81\x0f\xfa\x19\xf6\xea\x94a\xd1\xbb\x07\x80\xc0\x07=\x16\xc2^j\xbd\xdc#\x135\x00\x10\xf8\xa0\xa7b\x19\x96G\x89]\xae\xca\xb0Wh)\x00\x04>\xe8\xafI\xc5\xf5cM\x04\x80\xc0\x07=\x95\xe7\xf9~\x96^/\xf7\xccz\xb9\x00\x08|\xd0oUeX\xc6\x9a\x08\x00\x81\x0fz*\xcf\xf3\x22K\xaf\x97\xfb\xd4z\xb9\x00\x08|\xd0\xdf\xb0\x17\xca\xb0\xa4\xca\xac\x842,\x85\x96\x02@\xe0\x83\xfe\x0aC\xb9\xa92,\x87\xca\xb0\x00 \xf0AO\xc5\xf5r\x0f\x12\xbb\x5cX/\x17\x00\x81\x0f\xfa\xad\xa8\xb8\xde\x8a\x1a\x00\x08|\xd0Wy\x9e\x8f\xb3t\x19\x96g\xca\xb0\x00 \xf0A\x7f\xc3\x9e\xf5r\x01\x10\xf8`\xe0B\x98K\x95a9R\x86\x05\x00\x81\x0fz*\xae\x97\xfb8\xb1\xcbUV]\x84\x19\x00\x04>\xe8\xb0\xaa0W(\xc3\x02\x80\xc0\x07=\x15\xcb\xb0<L\xecr\xa6\x0c\x0b\x00\x02\x1f\xf4[U\xef\x9e\x89\x1a\x00\xac\xc5\x1b\x9a\x00\x16\x17\xcb\xb0l'v9~\xf5\xea\xd5\xb9\x96\x02\xe8\x9f\xaf?\xf8\xc2$\xfe\xb3\xf8\xf2\x8bo]\xf6\xf11\xe8\xe1\x83\x05\xfd\xc9?\xfe\x81\xf0\xc5)\xd5\xbb\xa7\x0c\x0b@\xbf\x9dd7+'\xbd(\xc3\xdfi\xb9\x8d\x05>\xd80\x0f\xbe\xf3c\xa3,\xbd^\xee\x91\x89\x1a\x00\xfd\xf5\xe5\x17\xdf\x0a\x81\xef*\xfe7\x14\xd5\x7f\xbf\x0c}\x97\xe5V\x94\xdb=\x81\x0f\x06\xee\xcf\xfd\xe1\x1b\xd9\xaf}\xf0\xab\x9fO\xecrU\x86\xbdBK\x01\xf4\xde\xeb#9\xa1\xdej(\xc3\xf5\xbd0\xe4[n;\x02\x1f\x0c\xd4\xe7^\xfc`\xd5.\x86r\x01\x86a\x92\xdd\x9c\xa23M\x18\xee\xfdF\x19\xfa\xce\xbb:\xdc+\xf0\xc1\x9c~\xea\x0f~4;\xfb\x95\xd3\xd4.\xa1\x0c\xcb\x89\x96\x02\xe8\xbf/\xbf\xf8V85\xa7\xea==L\xde\x0b\xc3\xbd/\xe3p\xefH\xe0\x83\x9e\xfb\xdd\x7f\xff\xdd\xaa]\xc6Z\x09`P\xea\xae\x94\x14\xce\xeb\x0e\xc3\xbda\x92\xc7I\xb9\xed\x09|\xd0C\x7f\xed\xbb\xf7\xb2o\x9e\xffFj\x97\xa7\xd6\xcb\x05\x18\x96/\xbf\xf8V(\xafu\xd6\xf0\xd7BA\xfe\xe7q\x92\xc7x]\x93<\x04>h\xe8\xc7\xff\xf83\xd97O\x93a/\x9c\xe3Qh)\x80A\x9a\xcc\xf9{a\x92\xc7\xfb\xe5v\x19'y\x8c\x04>\xe8\xb07\x7f\xe7\xc7\xb3\xef|\xe7;\xa9]\xac\x97\x0b0P_~\xf1\xad\x10\xf8\xae\x16\xb8\x890\xdc{\xb7\xa6\xdf\xbe\xc0\x07\x1d\xf3\x17\xfe\xef\x0fe\xbf\xf6\xcb\xff\x22\xb5\xcbE\x19\xf6\x8e\xb4\x14\xc0\xa0MZ\xba\x9dP\xd3\xef\x838\xdc{\xb8\xcc\xe1^\x81\x0f\x1a\xf8\xb1\xab\xca?\x19eX\x00\x86\xaf\xed/\xf6a\xb8\xf7k\xd9\x12k\xfa\x09|P\xd3_\xfd\xbd\x9f\xa8*\xc3\xf2\xec\xd5\xabW\xa7Z\x0a`\xd8b\x89\x96\xe3%\xdd\xfcmM\xbfV\x97p{\xc3\xd3\x06\xd5\xc2D\x8d\x97\xdf\xfc\x9f\xc9}\xfe\xd1\xcf\xfd\xcd_\xea\xc2\xd4{\x00V\xe2<\x86\xb3e\x09\xc3\xbd\xbb\xe5\xe7J\xe8M\x0c\xdb\xa4\x0c\x9a\x97\xf3\xdeX\xfe\xea\xd5+O\x19D[_\xf9\xd2i\xfc#\xfb\x84P\x86\xe5W\xff\xd9\xaf\xcc\xfc\xbdb\xffg\xb2\x9f9\xff\xef\x1a\x10\x80e:\x8e\xc1\xef\xb4\xe9/\x1a\xd2\x85)~\xf4\xf7>\xf7\xfd\x7f\x87\xf5rSeXv\xbf\xf4\xd3\xd9\xdf\xf8\xed\xefh4\x00\x96-\xf4(>\xbf]\xc2\xad\xc9$\x0f\x81\x0f^\xf3c\xbf\xfd\x97\xb3{\xdf\xf8\xa9\xec\x87\xbe\xfb\x93\x1f\xfd\xff\xcf|\xfbG\x92eX\xde\xfeS?\x9c\xfd\xf0\xef\xfe\x91\x86\x03`U>Z\xc2-\xbb\xa9\xe9wT\xa7\xa6\x9f!]\xb8\xe3\xf3?\xfb\x0b\xbf\xf3\x13\xdf\xd9\xfa\xd3\xb7\xff\xff\xdc_|\x91\xfd\xab_\xfa\xe73\xf7\xff\xf9\x9f}+\xfb\x85\xff\xf4m\x0d\x07\xc0\xba=\xcbn\x86{\xa7\xae\xf7k\xd2\x06Don\x7fu\xf2\x13\xd9\xc7a/\xf8\xcc\xb7\xaf\x93\xbf\xf3\xb3\xd9\xff\xd2p\x00tAX\xc2\xed\xe1\xd7\x1f|!\x14\x85\xbe\x9d\xe4\xf1\xfdE\x00\xf4\xf0A\x0c{\xd9\x94\xd9V[?\xf2G\xd9\x9f\xf8\xd1\xdf\xca\xfe\xe5\xf3g\x9f\xfa\x9d\xbf\xffs\x7f+\xfb\xbb\xbf\xf1\xdf4\x1e\x00]\x14z,Bo\xdfQX\x03X\xe0C\xd8\x9b\x11\xf6\xee\xfa\xb3\x9f\xfd\x83\xec\xff\xfc\xfe\xbf\xc9\xfe\xf5\xbf\xfb\xb7\x1f\xfd\xff\xfe\xfdQ\xf6O\xb7~\xc4\xb9{\x00\xf4!\xf8\x1d\x0a|\x08{\x0d\xea(\xfd\x9d\x9f\xfcV\xf6\xcb\xff\xf1<{\xe7\x8b\x7f^\x19\x16\x00\xba\xec\x13C\xbb\x02\x1f\xc2^C\xef\xfc\xe07\xb3\xbf\xfe\xdd\xdf\xd2\x80\x00t\xd1\xd4\xc9\x1b\x02\x1f\xc2\xde\x1c\xfe\xde\x1f\xfeF\xf6\xd3\xbf\xff_5$\x00]\x10\x86m\xc3\xe7\xda\xd1\xac\xd58\x04>\x84=\xa1\x0f\x80~\xba\xc8n\x86mO\xee\xce\xc8\x9dFY\x16\x84\xbd9\xfd\x97\x1f\xf8\x5c\xf6\xd3\x99\xc0\x07\xc0\xca5^bM\x0f\x1f\xc2\xde\x1c\xdez\xf5?\xb2\x9f\xff\xbd_\xd7\xa8\x00\xacJ\x18\xb6\xbd\x9d\x84q\xd9\xf4\x97\xf5\xf0!\xec5\xf4\xf9\xfc\x7f\xff\xca\xcf\xff\xee\xaf\xff\xa2V\x05\xd8h{\xe5\xf6x\x05\xf7s\x16C\xded\x91\x1b\xd1\xc3\x87\xb0\xd7\xcc\xf1o^\xfc\xe2X\xab\x02l\xb6\xaf?\xf8B\x98\x05\xfbp\x89w\x11\x86m?*\x9a\xdc\xc6\x8d\x09|\x08{\xc2\x1e\x00\xcd\xc2\xde\xa8\xbcx\xb1\x84\x9b\x9e\xba,Z\x1b\x0c\xe9\x22\xec\x09{\x004s\xd8\xf2\xed\x85a\xdb\xa3\xd7k\xe7\xb5I\x0f\x1f\xc2\x9e\xb0\x07@M_\x7f\xf0\x85{\xe5\xc5e\xb9m-xS\xb7k\xdd\x16\xf3L\xc2hJ\x0f\x1f\xc2\x9e\xb0\x07@}\xfb\x0b\x86\xbd0l[d5j\xe7\x09| \xec\x01\xb0\x1e\xc5\x9c\xbf\x17\x96<;jR;O\xe0\x03a\x0f\x80\x15\xfb\xfa\x83/\xec\x95\x17\xf7\x1b\xfc\xcaB\xb5\xf3\x04>\x10\xf6\x00X\xbd\xba\x9f\x0d\x1f-y\xb6h\xed\xbc6\x99\xb4\x81\xb0'\xec\x01P\xa1f)\x96Vk\xe7\xb5I\x0f\x1f\xc2\x9e\xb0\x07@\xb5Y\x9f\x0fa\x12\xc6$\x06\xbd\x97]=x\x81\x0faO\xd8\x03\xa0\xda\xeb\xb5\xf7ZY\xf2L\xe0\x03a\x0f\x80\x0e\xf8\xfa\x83/\x84\xcf\x88P\x8ae\xa5\xb5\xf3\x04>\x84=a\x0f\x80\xd5\xd9+\xb7w\xb3%,y\xb6*&m \xec\x01\xc0\xc0\x09|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00\x08| \xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0\x81\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x80\xc0\x07\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x09{\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00\x08|\x08{\xc2\x1e\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00 \xf0\x09|\xc2\x9e\xb0\x07\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0\x87\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x02\x1f\xc2\x1e\x00 \xf0!\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0'\xec\x01\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00\x08|\x08{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\xc2\x1e\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x00\x02\x9f\xb0\x07\x00 \xf0\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x09{\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\x80\xc0'\xec\x01\x00\x08|\xc2\x1e\x00\xc0\xe6\x06>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x06\x1c\xf8\x84=\x00\x80\x01\x07>a\x0f\x00`\xc0\x81O\xd8\x03\x00\x18p\xe0\x13\xf6\x00\x00\x16\xf3\x99\x96\xc3\xd9=a\x0f\x00`\xc0\x81\xaftT\x86\xb4\x91\xb0\x07\x000\xc0\xc0\x17{\xf7B@\x1b\xb7p[\xc2\x1e\x00@\xd7\x02_\xe90^.\x14\xae\x84=\x00\x80\xee\x06\xbe\xdb`u\xbf\x0cm\xfb\xc2\x1e\x00\xc0\x80\x02_\x0cx\xf7\xa7\x84?a\x0f\x00`\x08\x81oJ\xc0{\xd8d\xf2\x86\xb0\x07\x00\xd0\xe1\xc0\x17\x83\xdd\xc3\x1a!P\xd8\x03\x00\xe8c\xe0K\x04\xbb\xca\xe0%\xec\x01\x00\xf4;\xf0%'o\x08{\x00\x00=\x08|eh\x0b!\xeb~b\x97Ca\x0f\x00\xa0\xc7\x81/\xab\x1e\xb6\xdd}}\xf2\x86\xb0\x07\x00\xd0\x93\xc0\x17\x83\xdcn\x8d]\x0f\x85=\x00\x80\x1e\x06\xbel\xc6p\xed\x14ca\x0f\x00`}\xf2W\xaf^\xcd\xf5\x8be\x80{Y^l\xd5\xdc\xfd\xa2\xdc\xb6\x85=\x00\x80\xd5\x9b\xab\x87/N\xd6\xd8j\xf0+\xc2\x1e\x00@\x9f\x02_6\xc7\xd2i\xc2\x1e\x00\xc0z4\x1e\xd2\x8d\x935^\x08{\x00\x00\xfd0O\x0f_!\xec\x01\x00\xf4G\xa3\x1e\xbe7\xb7\xbfz\xaf\xbc\xb8\xcc\x9a\x9d\xbf'\xec\x01\x00\xacQ\xd3\x1e\xbe}a\x0f\x00`\xd8\x81\xefpE\xc7%\xec\x01\x00\xb4\xa4\xf6\x90\xee\x9b\xdb_\xdd)/\xbe!\xec\x01\x00\xf4K\x93\x1e\xbeU\xf4\xee\x09{\x00\x00\xeb\x08|q\xb2\xc6\xfe*\x0e\xa8\xbc\xaf=O\x0b\x00@{j\x0d\xe9\xc6\x955\xde_\xe1q]\x95\xdbQ\xb9M~\xf3\xe2\x17_z\x9a\x00\x00\x96\x1f\xf8.\xcb\x8b\xfbk8\xbe\xebr;\x09\xe1\xaf\x0c~\xe7\x9e.\x00\x80%\x04\xbe8\xc4\xfa\xbc\x03\xc7z\x91\xdd\xf4\xfa\x9d\xe8\xf5\x03\x00h7\xf0M\xca\x8b\x83\x0e\x1d\xb3^?\x00\x80\xb6\x02_\x9c\xac\xf1\xbd\x0e\x1f\x7f\xe8\xf5+\xca\xe0w\xe2\xa9\x04\x00\x98\xaej\x96\xee\xb8\xe3a\xefH\xd8\x03\x00H{\xa3\xe2\xfa\xc3\x8e\x1d\xaf\xe1\x5c\x00\x80\xb6\x02_\x9c\xacq\xbf#\xc7i\xc2\x06\x00@\xdb\x81/\xeb\xc6p\xeeq\xa67\x0f\x00`!S'm\xbc\xb9\xfd\xd5Qy\xf1bM\xc7\xa4\xe82\x00@\x8bf\xf5\xf0\x8d\xd7p,\xc71\xe4\x9dzZ\x00\x00\x86\x13\xf8\xf4\xe6\x01\x00\xac:\xf0\xbd\xb9\xfd\xd5\xfdl\xf9\x935\x9e\xc5\x90\xa7\xa4\x0a\x00\xc0\xaa\x03_\xb6\xbc\xde\xbd\xd0\x9b7\x89A\xefR\xd3\x03\x00\xac\xc6'&m,i\xb2\xc6Y\xa6@2\x00\xc0\xda\xbc\xde\xc37n\xe9vC\x81\xe4I\x0cz\x97\x9a\x19\x00`8\x81/\xf4\xe6\x85!\xdb\x89\xa6\x05\x00\xe8X\xe0{s\xfb\xab!\xec\xcd3YCo\x1e\x00@\x1f\x02_\xd6\xbcw\xcfrg\x00\x00=\xf0\xd1\xa4\x8d\x06\x935Bo^\x98|a\xb93\x00\x80\x9e\xb8\xed\xe1;\xac\xd8Oo\x1e\x00@\xcf\x03\xdfx\xc6\xf5\x96;\x03\x00\xe8{\xe0\x8b\x935\xb6\xee\xfc\xccrg\x00\x00C\x0a|\xd9\xc7\xbd{z\xf3\x00\x00\x06\x1a\xf8B\xc0\xdb\xd7\x9b\x07\x000L\xff_\x80\x01\x00e|\xfb\xc4\xd4o\x058\x00\x00\x00\x00IEND\xaeB`\x82"
qt_resource_name = b"\x00\x11\x0bF\x95g\x00p\x00a\x00r\x00a\x00m\x00e\x00t\x00r\x00i\x00c\x00f\x00i\x00t\x00t\x00i\x00n\x00g\x00\x06\x07\x03}\xc3\x00i\x00m\x00a\x00g\x00e\x00s\x00\x1c\x053\xe8'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00z\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g\x00\x10\x0a1\xdeg\x00m\x00o\x00d\x00e\x00l\x00-\x00v\x00i\x00e\x00w\x00e\x00r\x00.\x00p\x00n\x00g\x00\x1c\x053\xf0'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00x\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g\x00\x1c\x053\xf4'\x00a\x00x\x00i\x00s\x00_\x00r\x00o\x00a\x00t\x00i\x00o\x00n\x00_\x00y\x00_\x00a\x00x\x00i\x00s\x00_\x00i\x00c\x00o\x00n\x00.\x00p\x00n\x00g"
qt_resource_struct = b"\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00(\x00\x02\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00:\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x9e\x00\x00\x00\x00\x00\x01\x00\x00F6\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x01\x00\x00~\xa5\x00\x00\x00x\x00\x00\x00\x00\x00\x01\x00\x006|"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| true
| true
|
7907aa1c3d3561e8015c2ad6df4b0d971630b5ab
| 124,342
|
py
|
Python
|
tests/model_forms/tests.py
|
KaushikSathvara/django
|
3b9fe906bf28d2e748ce4d9a1af5fbcd5df48946
|
[
"BSD-3-Clause",
"0BSD"
] | 5
|
2021-11-08T13:23:05.000Z
|
2022-01-08T09:14:23.000Z
|
tests/model_forms/tests.py
|
KaushikSathvara/django
|
3b9fe906bf28d2e748ce4d9a1af5fbcd5df48946
|
[
"BSD-3-Clause",
"0BSD"
] | 3
|
2020-01-21T17:58:28.000Z
|
2022-03-30T14:16:15.000Z
|
tests/model_forms/tests.py
|
KaushikSathvara/django
|
3b9fe906bf28d2e748ce4d9a1af5fbcd5df48946
|
[
"BSD-3-Clause",
"0BSD"
] | 2
|
2021-10-13T10:49:28.000Z
|
2021-11-30T12:33:33.000Z
|
import datetime
import os
from decimal import Decimal
from unittest import mock, skipUnless
from django import forms
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import connection, models
from django.db.models.query import EmptyQuerySet
from django.forms.models import (
ModelFormMetaclass, construct_instance, fields_for_model, model_to_dict,
modelform_factory,
)
from django.template import Context, Template
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from .models import (
Article, ArticleStatus, Author, Author1, Award, BetterWriter, BigInt, Book,
Category, Character, Colour, ColourfulItem, CustomErrorMessage, CustomFF,
CustomFieldForExclusionModel, DateTimePost, DerivedBook, DerivedPost, Dice,
Document, ExplicitPK, FilePathModel, FlexibleDatePost, Homepage,
ImprovedArticle, ImprovedArticleWithParentLink, Inventory,
NullableUniqueCharFieldModel, Number, Person, Photo, Post, Price, Product,
Publication, PublicationDefaults, StrictAssignmentAll,
StrictAssignmentFieldSpecific, Student, StumpJoke, TextFile, Triple,
Writer, WriterProfile, test_images,
)
if test_images:
from .models import ImageFile, NoExtensionImageFile, OptionalImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
fields = '__all__'
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
fields = '__all__'
class NoExtensionImageFileForm(forms.ModelForm):
class Meta:
model = NoExtensionImageFile
fields = '__all__'
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
class PriceForm(forms.ModelForm):
class Meta:
model = Price
fields = '__all__'
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = '__all__'
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = '__all__'
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = '__all__'
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
fields = '__all__'
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
fields = '__all__'
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
fields = '__all__'
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
fields = '__all__'
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
fields = '__all__'
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = TextFile
fields = '__all__'
class CustomErrorMessageForm(forms.ModelForm):
name1 = forms.CharField(error_messages={'invalid': 'Form custom error message.'})
class Meta:
fields = '__all__'
model = CustomErrorMessage
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(list(BaseCategoryForm.base_fields), ['name', 'slug', 'url'])
def test_no_model_class(self):
class NoModelModelForm(forms.ModelForm):
pass
with self.assertRaisesMessage(ValueError, 'ModelForm has no model class specified.'):
NoModelModelForm()
def test_empty_fields_to_fields_for_model(self):
"""
An argument of fields=() to fields_for_model should return an empty dictionary
"""
field_dict = fields_for_model(Person, fields=())
self.assertEqual(len(field_dict), 0)
def test_empty_fields_on_modelform(self):
"""
No fields on a ModelForm should actually result in no fields.
"""
class EmptyPersonForm(forms.ModelForm):
class Meta:
model = Person
fields = ()
form = EmptyPersonForm()
self.assertEqual(len(form.fields), 0)
def test_empty_fields_to_construct_instance(self):
"""
No fields should be set on a model instance if construct_instance receives fields=().
"""
form = modelform_factory(Person, fields="__all__")({'name': 'John Doe'})
self.assertTrue(form.is_valid())
instance = construct_instance(form, Person(), fields=())
self.assertEqual(instance.name, '')
def test_blank_with_null_foreign_key_field(self):
"""
#13776 -- ModelForm's with models having a FK set to null=False and
required=False should be valid.
"""
class FormForTestingIsValid(forms.ModelForm):
class Meta:
model = Student
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['character'].required = False
char = Character.objects.create(username='user', last_action=datetime.datetime.today())
data = {'study': 'Engineering'}
data2 = {'study': 'Engineering', 'character': char.pk}
# form is valid because required=False for field 'character'
f1 = FormForTestingIsValid(data)
self.assertTrue(f1.is_valid())
f2 = FormForTestingIsValid(data2)
self.assertTrue(f2.is_valid())
obj = f2.save()
self.assertEqual(obj.character, char)
def test_blank_false_with_null_true_foreign_key_field(self):
"""
A ModelForm with a model having ForeignKey(blank=False, null=True)
and the form field set to required=False should allow the field to be
unset.
"""
class AwardForm(forms.ModelForm):
class Meta:
model = Award
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['character'].required = False
character = Character.objects.create(username='user', last_action=datetime.datetime.today())
award = Award.objects.create(name='Best sprinter', character=character)
data = {'name': 'Best tester', 'character': ''} # remove character
form = AwardForm(data=data, instance=award)
self.assertTrue(form.is_valid())
award = form.save()
self.assertIsNone(award.character)
def test_blank_foreign_key_with_radio(self):
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = ['author']
widgets = {'author': forms.RadioSelect()}
writer = Writer.objects.create(name='Joe Doe')
form = BookForm()
self.assertEqual(list(form.fields['author'].choices), [
('', '---------'),
(writer.pk, 'Joe Doe'),
])
def test_non_blank_foreign_key_with_radio(self):
class AwardForm(forms.ModelForm):
class Meta:
model = Award
fields = ['character']
widgets = {'character': forms.RadioSelect()}
character = Character.objects.create(
username='user',
last_action=datetime.datetime.today(),
)
form = AwardForm()
self.assertEqual(
list(form.fields['character'].choices),
[(character.pk, 'user')],
)
def test_save_blank_false_with_required_false(self):
"""
A ModelForm with a model with a field set to blank=False and the form
field set to required=False should allow the field to be unset.
"""
obj = Writer.objects.create(name='test')
form = CustomWriterForm(data={'name': ''}, instance=obj)
self.assertTrue(form.is_valid())
obj = form.save()
self.assertEqual(obj.name, '')
def test_save_blank_null_unique_charfield_saves_null(self):
form_class = modelform_factory(model=NullableUniqueCharFieldModel, fields='__all__')
empty_value = '' if connection.features.interprets_empty_strings_as_nulls else None
data = {
'codename': '',
'email': '',
'slug': '',
'url': '',
}
form = form_class(data=data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.instance.codename, empty_value)
self.assertEqual(form.instance.email, empty_value)
self.assertEqual(form.instance.slug, empty_value)
self.assertEqual(form.instance.url, empty_value)
# Save a second form to verify there isn't a unique constraint violation.
form = form_class(data=data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.instance.codename, empty_value)
self.assertEqual(form.instance.email, empty_value)
self.assertEqual(form.instance.slug, empty_value)
self.assertEqual(form.instance.url, empty_value)
def test_missing_fields_attribute(self):
message = (
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form "
"MissingFieldsForm needs updating."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
class MissingFieldsForm(forms.ModelForm):
class Meta:
model = Category
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(list(ExtraFields.base_fields),
['name', 'slug', 'url', 'some_extra_field'])
def test_extra_field_model_form(self):
with self.assertRaisesMessage(FieldError, 'no-field'):
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'no-field')
def test_extra_declared_field_model_form(self):
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'age')
def test_extra_field_modelform_factory(self):
with self.assertRaisesMessage(FieldError, 'Unknown field(s) (no-field) specified for Person'):
modelform_factory(Person, fields=['no-field', 'name'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField)
def test_replace_field_variant_2(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = ['url']
self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField)
def test_replace_field_variant_3(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = [] # url will still appear, since it is explicit above
self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField)
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
wf = WriterForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_nonexistent_field(self):
expected_msg = 'Unknown field(s) (nonexistent) specified for Category'
with self.assertRaisesMessage(FieldError, expected_msg):
class InvalidCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ['nonexistent']
def test_limit_fields_with_string(self):
expected_msg = "CategoryForm.Meta.fields cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ('url') # note the missing comma
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(list(ExcludeFields.base_fields), ['name', 'slug'])
def test_exclude_nonexistent_field(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['nonexistent']
self.assertEqual(list(ExcludeFields.base_fields), ['name', 'slug', 'url'])
def test_exclude_fields_with_string(self):
expected_msg = "CategoryForm.Meta.exclude cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
exclude = ('url') # note the missing comma
def test_exclude_and_validation(self):
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
price = form.save(commit=False)
msg = "{'quantity': ['This field cannot be null.']}"
with self.assertRaisesMessage(ValidationError, msg):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertIsNone(form.instance.quantity)
self.assertIsNone(form.instance.pk)
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
""" Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(list(ConfusedForm.base_fields),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
""" Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
fields = '__all__'
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
list(MixModelForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
list(ArticleForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
# First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
list(BadForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_invalid_meta_model(self):
class InvalidModelForm(forms.ModelForm):
class Meta:
pass # no model
# Can't create new form
msg = 'ModelForm has no model class specified.'
with self.assertRaisesMessage(ValueError, msg):
InvalidModelForm()
# Even if you provide a model instance
with self.assertRaisesMessage(ValueError, msg):
InvalidModelForm(instance=Category)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
""" Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(list(SubCategoryForm.base_fields), ['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
class SubclassMeta(SomeCategoryForm):
""" We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr>
<tr><th><label for="id_slug">Slug:</label></th>
<td><input id="id_slug" type="text" name="slug" maxlength="20" required></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th>
<td><input type="checkbox" name="checkbox" id="id_checkbox" required></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(list(OrderFields.base_fields),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>
<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(list(OrderFields2.base_fields), ['slug', 'name'])
def test_default_populated_on_optional_field(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(max_length=255, required=False)
class Meta:
model = PublicationDefaults
fields = ('mode',)
# Empty data uses the model field default.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, 'di')
self.assertEqual(m1._meta.get_field('mode').get_default(), 'di')
# Blank data doesn't use the model field default.
mf2 = PubForm({'mode': ''})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.mode, '')
def test_default_not_populated_on_non_empty_value_in_cleaned_data(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(max_length=255, required=False)
mocked_mode = None
def clean(self):
self.cleaned_data['mode'] = self.mocked_mode
return self.cleaned_data
class Meta:
model = PublicationDefaults
fields = ('mode',)
pub_form = PubForm({})
pub_form.mocked_mode = 'de'
pub = pub_form.save(commit=False)
self.assertEqual(pub.mode, 'de')
# Default should be populated on an empty value in cleaned_data.
default_mode = 'di'
for empty_value in pub_form.fields['mode'].empty_values:
with self.subTest(empty_value=empty_value):
pub_form = PubForm({})
pub_form.mocked_mode = empty_value
pub = pub_form.save(commit=False)
self.assertEqual(pub.mode, default_mode)
def test_default_not_populated_on_optional_checkbox_input(self):
class PubForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ('active',)
# Empty data doesn't use the model default because CheckboxInput
# doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertIs(m1.active, False)
self.assertIsInstance(mf1.fields['active'].widget, forms.CheckboxInput)
self.assertIs(m1._meta.get_field('active').get_default(), True)
def test_default_not_populated_on_checkboxselectmultiple(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(required=False, widget=forms.CheckboxSelectMultiple)
class Meta:
model = PublicationDefaults
fields = ('mode',)
# Empty data doesn't use the model default because an unchecked
# CheckboxSelectMultiple doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, '')
self.assertEqual(m1._meta.get_field('mode').get_default(), 'di')
def test_default_not_populated_on_selectmultiple(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(required=False, widget=forms.SelectMultiple)
class Meta:
model = PublicationDefaults
fields = ('mode',)
# Empty data doesn't use the model default because an unselected
# SelectMultiple doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, '')
self.assertEqual(m1._meta.get_field('mode').get_default(), 'di')
def test_prefixed_form_with_default_field(self):
class PubForm(forms.ModelForm):
prefix = 'form-prefix'
class Meta:
model = PublicationDefaults
fields = ('mode',)
mode = 'de'
self.assertNotEqual(mode, PublicationDefaults._meta.get_field('mode').get_default())
mf1 = PubForm({'form-prefix-mode': mode})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, mode)
def test_renderer_kwarg(self):
custom = object()
self.assertIs(ProductForm(renderer=custom).renderer, custom)
def test_default_splitdatetime_field(self):
class PubForm(forms.ModelForm):
datetime_published = forms.SplitDateTimeField(required=False)
class Meta:
model = PublicationDefaults
fields = ('datetime_published',)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.datetime_published, datetime.datetime(2000, 1, 1))
mf2 = PubForm({'datetime_published_0': '2010-01-01', 'datetime_published_1': '0:00:00'})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.datetime_published, datetime.datetime(2010, 1, 1))
def test_default_filefield(self):
class PubForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ('file',)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.file.name, 'default.txt')
mf2 = PubForm({}, {'file': SimpleUploadedFile('name', b'foo')})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.file.name, 'name')
def test_default_selectdatewidget(self):
class PubForm(forms.ModelForm):
date_published = forms.DateField(required=False, widget=forms.SelectDateWidget)
class Meta:
model = PublicationDefaults
fields = ('date_published',)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.date_published, datetime.date.today())
mf2 = PubForm({'date_published_year': '2010', 'date_published_month': '1', 'date_published_day': '1'})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.date_published, datetime.date(2010, 1, 1))
class FieldOverridesByFormMetaForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
labels = {
'name': 'Title',
}
help_texts = {
'slug': 'Watch out! Letters, numbers, underscores and hyphens only.',
}
error_messages = {
'slug': {
'invalid': (
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!"
)
}
}
field_classes = {
'url': forms.URLField,
}
class TestFieldOverridesByFormMeta(SimpleTestCase):
def test_widget_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name']),
'<textarea id="id_name" rows="10" cols="40" name="name" maxlength="20" required></textarea>',
)
self.assertHTMLEqual(
str(form['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" required>',
)
self.assertHTMLEqual(
str(form['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" required>',
)
def test_label_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name'].label_tag()),
'<label for="id_name">Title:</label>',
)
self.assertHTMLEqual(
str(form['url'].label_tag()),
'<label for="id_url">The URL:</label>',
)
self.assertHTMLEqual(
str(form['slug'].label_tag()),
'<label for="id_slug">Slug:</label>',
)
def test_help_text_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertEqual(
form['slug'].help_text,
'Watch out! Letters, numbers, underscores and hyphens only.',
)
def test_error_messages_overrides(self):
form = FieldOverridesByFormMetaForm(data={
'name': 'Category',
'url': 'http://www.example.com/category/',
'slug': '!%#*@',
})
form.full_clean()
error = [
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!",
]
self.assertEqual(form.errors, {'slug': error})
def test_field_type_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertIs(Category._meta.get_field('url').__class__, models.CharField)
self.assertIsInstance(form.fields['url'], forms.URLField)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(SimpleTestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
self.assertIs(form.is_valid(), True)
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
self.assertIs(form.is_valid(), True)
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
self.assertIs(form.is_valid(), True)
class UniqueTest(TestCase):
"""
unique/unique_together validation.
"""
@classmethod
def setUpTestData(cls):
cls.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])
def test_unique_together_exclusion(self):
"""
Forms don't validate unique_together constraints when only part of the
constraint is included in the form's fields. This allows using
form.save(commit=False) and then assigning the missing field(s) to the
model instance.
"""
class BookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = ('isbn', 'suffix1')
# The unique_together is on suffix1/suffix2 but only suffix1 is part
# of the form. The fields must have defaults, otherwise they'll be
# skipped by other logic.
self.assertEqual(DerivedBook._meta.unique_together, (('suffix1', 'suffix2'),))
for name in ('suffix1', 'suffix2'):
with self.subTest(name=name):
field = DerivedBook._meta.get_field(name)
self.assertEqual(field.default, 0)
# The form fails validation with "Derived book with this Suffix1 and
# Suffix2 already exists." if the unique_together validation isn't
# skipped.
DerivedBook.objects.create(isbn='12345')
form = BookForm({'isbn': '56789', 'suffix1': '0'})
self.assertTrue(form.is_valid(), form.errors)
def test_multiple_field_unique_together(self):
"""
When the same field is involved in multiple unique_together
constraints, we need to make sure we don't remove the data for it
before doing all the validation checking (not just failing after
the first one).
"""
class TripleForm(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
Triple.objects.create(left=1, middle=2, right=3)
form = TripleForm({'left': '1', 'middle': '2', 'right': '3'})
self.assertFalse(form.is_valid())
form = TripleForm({'left': '1', 'middle': '3', 'right': '1'})
self.assertTrue(form.is_valid())
@skipUnlessDBFeature('supports_nullable_unique_constraints')
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other', 'author': self.writer.pk, 'isbn': isbn,
'suffix1': '1', 'suffix2': '2',
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other',
'author': self.writer.pk,
'isbn': '9876',
'suffix1': '0',
'suffix2': '0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors['__all__'],
['Derived book with this Suffix1 and Suffix2 already exists.'],
)
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': '', 'desc': ''})
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertFalse(form.is_valid())
if connection.features.interprets_empty_strings_as_nulls:
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
else:
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0", 'posted': '2008-09-03'}
form = PostForm(data, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], ['This field is required.'])
def test_unique_for_date_in_exclude(self):
"""
If the date for unique_for_* constraints is excluded from the
ModelForm (in this case 'posted' has editable=False, then the
constraint should be ignored.
"""
class DateTimePostForm(forms.ModelForm):
class Meta:
model = DateTimePost
fields = '__all__'
DateTimePost.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.datetime(2008, 9, 3, 10, 10, 1),
)
# 'title' has unique_for_date='posted'
form = DateTimePostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
# 'slug' has unique_for_year='posted'
form = DateTimePostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertTrue(form.is_valid())
# 'subtitle' has unique_for_month='posted'
form = DateTimePostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertTrue(form.is_valid())
def test_inherited_unique_for_date(self):
p = Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0", 'posted': '2008-09-03'}
form = DerivedPostForm(data, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
fields = '__all__'
p = FlexibleDatePost.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0"}
form = FlexDatePostForm(data, instance=p)
self.assertTrue(form.is_valid())
def test_override_unique_message(self):
class CustomProductForm(ProductForm):
class Meta(ProductForm.Meta):
error_messages = {
'slug': {
'unique': "%(model_name)s's %(field_label)s not unique.",
}
}
Product.objects.create(slug='teddy-bear-blue')
form = CustomProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ["Product's Slug not unique."])
def test_override_unique_together_message(self):
class CustomPriceForm(PriceForm):
class Meta(PriceForm.Meta):
error_messages = {
NON_FIELD_ERRORS: {
'unique_together': "%(model_name)s's %(field_labels)s not unique.",
}
}
Price.objects.create(price=6.00, quantity=1)
form = CustomPriceForm({'price': '6.00', 'quantity': '1'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors[NON_FIELD_ERRORS], ["Price's Price and Quantity not unique."])
def test_override_unique_for_date_message(self):
class CustomPostForm(PostForm):
class Meta(PostForm.Meta):
error_messages = {
'title': {
'unique_for_date': (
"%(model_name)s's %(field_label)s not unique "
"for %(date_field_label)s date."
),
}
}
Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = CustomPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ["Post's Title not unique for Posted date."])
class ModelFormBasicTests(TestCase):
def create_basic_data(self):
self.c1 = Category.objects.create(name='Entertainment', slug='entertainment', url='entertainment')
self.c2 = Category.objects.create(name="It's a test", slug='its-test', url='test')
self.c3 = Category.objects.create(name='Third test', slug='third-test', url='third')
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr>
<tr><th><label for="id_slug">Slug:</label></th>
<td><input id="id_slug" type="text" name="slug" maxlength="20" required></td></tr>
<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" required></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" required></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" required></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" required>""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" required></li>
<li>Slug: <input type="text" name="slug" maxlength="20" required></li>
<li>The URL: <input type="text" name="url" maxlength="40" required></li>"""
)
def test_initial_values(self):
self.create_basic_data()
# Initial values can be provided for model forms
f = ArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(self.c1.id), str(self.c2.id)]
})
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" required></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s" selected>Entertainment</option>
<option value="%s" selected>It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
f = RoykoForm(auto_id=False, instance=self.w_royko)
self.assertHTMLEqual(
str(f),
'''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" required><br>
<span class="helptext">Use both first and last names.</span></td></tr>'''
)
art = Article.objects.create(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=self.w_royko,
article='Hello.'
)
art_id_1 = art.id
f = ArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li>
<li>Writer: <select name="writer" required>
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected>Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required>Hello.</textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
f = ArticleForm({
'headline': 'Test headline',
'slug': 'test-headline',
'pub_date': '1984-02-06',
'writer': str(self.w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertTrue(f.is_valid())
test_art = f.save()
self.assertEqual(test_art.id, art_id_1)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, 'Test headline')
def test_m2m_initial_callable(self):
"""
Regression for #10349: A callable can be provided as the initial value for an m2m field
"""
self.maxDiff = 1200
self.create_basic_data()
# Set up a callable initial value
def formfield_for_dbfield(db_field, **kwargs):
if db_field.name == 'categories':
kwargs['initial'] = lambda: Category.objects.all().order_by('name')[:2]
return db_field.formfield(**kwargs)
# Create a ModelForm, instantiate it, and check that the output is as expected
ModelForm = modelform_factory(
Article,
fields=['headline', 'categories'],
formfield_callback=formfield_for_dbfield,
)
form = ModelForm()
self.assertHTMLEqual(
form.as_ul(),
"""<li><label for="id_headline">Headline:</label>
<input id="id_headline" type="text" name="headline" maxlength="50" required></li>
<li><label for="id_categories">Categories:</label>
<select multiple name="categories" id="id_categories">
<option value="%d" selected>Entertainment</option>
<option value="%d" selected>It's a test</option>
<option value="%d">Third test</option>
</select></li>"""
% (self.c1.pk, self.c2.pk, self.c3.pk))
def test_basic_creation(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({
'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment',
})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
# Testing whether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(Category.objects.count(), 1)
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
def test_save_commit_false(self):
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertTrue(f.is_valid())
c1 = f.save(commit=False)
self.assertEqual(c1.name, "Third test")
self.assertEqual(Category.objects.count(), 0)
c1.save()
self.assertEqual(Category.objects.count(), 1)
def test_save_with_data_errors(self):
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], ['This field is required.'])
self.assertEqual(
f.errors['slug'],
['Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.']
)
self.assertEqual(f.cleaned_data, {'url': 'foo'})
msg = "The Category could not be created because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaisesMessage(ValueError, msg):
f.save()
def test_multi_fields(self):
self.create_basic_data()
self.maxDiff = None
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
str(f),
'''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" required></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" required></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" required></td></tr>
<tr><th>Writer:</th><td><select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article" required></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# Add some categories and test the many-to-many form output.
new_art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=self.w_royko)
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertSequenceEqual(new_art.categories.all(), [self.c1])
f = ArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li>
<li>Writer: <select name="writer" required>
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected>Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required>Hello.</textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s" selected>Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
def test_subset_fields(self):
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'pub_date')
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(
str(f),
'''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" required></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" required></td></tr>''')
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'slug', 'pub_date')
w_royko = Writer.objects.create(name='Mike Royko')
art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=w_royko)
f = PartialArticleFormWithSlug({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li>'''
)
self.assertTrue(f.is_valid())
new_art = f.save()
self.assertEqual(new_art.id, art.id)
new_art = Article.objects.get(id=art.id)
self.assertEqual(new_art.headline, 'New headline')
def test_m2m_editing(self):
self.create_basic_data()
form_data = {
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04',
'writer': str(self.w_royko.pk),
'article': 'Hello.',
'categories': [str(self.c1.id), str(self.c2.id)]
}
# Create a new article, with categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
new_art = Article.objects.get(id=new_art.id)
art_id_1 = new_art.id
self.assertSequenceEqual(new_art.categories.order_by('name'), [self.c1, self.c2])
# Now, submit form data with no categories. This deletes the existing categories.
form_data['categories'] = []
f = ArticleForm(form_data, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id, art_id_1)
new_art = Article.objects.get(id=art_id_1)
self.assertSequenceEqual(new_art.categories.all(), [])
# Create a new article, with no categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
art_id_2 = new_art.id
self.assertNotIn(art_id_2, (None, art_id_1))
new_art = Article.objects.get(id=art_id_2)
self.assertSequenceEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
form_data['categories'] = [str(self.c1.id), str(self.c2.id)]
f = ArticleForm(form_data)
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_3 = new_art.id
self.assertNotIn(art_id_3, (None, art_id_1, art_id_2))
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_3)
self.assertSequenceEqual(new_art.categories.all(), [])
# Save the m2m data on the form
f.save_m2m()
self.assertSequenceEqual(new_art.categories.order_by('name'), [self.c1, self.c2])
def test_custom_form_fields(self):
# Here, we define a custom ModelForm. Because it happens to have the same fields as
# the Category model, we can just call the form's save() to apply its changes to an
# existing Category instance.
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class Meta:
model = Category
fields = '__all__'
cat = Category.objects.create(name='Third test')
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=cat.id).name, 'Third')
def test_runtime_choicefield_populated(self):
self.maxDiff = None
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
self.create_basic_data()
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" required></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> </li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
w_bernstein = Writer.objects.create(name='Carl Bernstein')
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" required></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
<option value="%s">Fourth</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, w_bernstein.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk, c4.pk))
def test_recleaning_model_form_instance(self):
"""
Re-cleaning an instance that was added via a ModelForm shouldn't raise
a pk uniqueness error.
"""
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = '__all__'
form = AuthorForm({'full_name': 'Bob'})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = 'Alice'
obj.full_clean()
def test_validate_foreign_key_uses_default_manager(self):
class MyForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
# Archived writers are filtered out by the default manager.
w = Writer.objects.create(name='Randy', archived=True)
data = {
'headline': 'My Article',
'slug': 'my-article',
'pub_date': datetime.date.today(),
'writer': w.pk,
'article': 'lorem ipsum',
}
form = MyForm(data)
self.assertIs(form.is_valid(), False)
self.assertEqual(
form.errors,
{'writer': ['Select a valid choice. That choice is not one of the available choices.']},
)
def test_validate_foreign_key_to_model_with_overridden_manager(self):
class MyForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Allow archived authors.
self.fields['writer'].queryset = Writer._base_manager.all()
w = Writer.objects.create(name='Randy', archived=True)
data = {
'headline': 'My Article',
'slug': 'my-article',
'pub_date': datetime.date.today(),
'writer': w.pk,
'article': 'lorem ipsum',
}
form = MyForm(data)
self.assertIs(form.is_valid(), True)
article = form.save()
self.assertEqual(article.writer, w)
class ModelMultipleChoiceFieldTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.c1 = Category.objects.create(name='Entertainment', slug='entertainment', url='entertainment')
cls.c2 = Category.objects.create(name="It's a test", slug='its-test', url='test')
cls.c3 = Category.objects.create(name='Third', slug='third-test', url='third')
def test_model_multiple_choice_field(self):
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test"),
(self.c3.pk, 'Third')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertCountEqual(f.clean([self.c1.id]), [self.c1])
self.assertCountEqual(f.clean([self.c2.id]), [self.c2])
self.assertCountEqual(f.clean([str(self.c1.id)]), [self.c1])
self.assertCountEqual(
f.clean([str(self.c1.id), str(self.c2.id)]),
[self.c1, self.c2],
)
self.assertCountEqual(
f.clean([self.c1.id, str(self.c2.id)]),
[self.c1, self.c2],
)
self.assertCountEqual(
f.clean((self.c1.id, str(self.c2.id))),
[self.c1, self.c2],
)
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Invalid types that require TypeError to be caught (#22808).
with self.assertRaises(ValidationError):
f.clean([['fail']])
with self.assertRaises(ValidationError):
f.clean([{'foo': 'bar'}])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
# Note, we are using an id of 1006 here since tests that run before
# this may create categories with primary keys up to 6. Use
# a number that will not conflict.
c6 = Category.objects.create(id=1006, name='Sixth', url='6th')
self.assertCountEqual(f.clean([c6.id]), [c6])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
def test_model_multiple_choice_required_false(self):
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertIsInstance(f.clean([]), EmptyQuerySet)
self.assertIsInstance(f.clean(()), EmptyQuerySet)
with self.assertRaises(ValidationError):
f.clean(['0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c3.id), '0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c1.id), '0'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Third')
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
self.assertSequenceEqual(f.clean([self.c2.id]), [self.c2])
with self.assertRaises(ValidationError):
f.clean([self.c3.id])
with self.assertRaises(ValidationError):
f.clean([str(self.c2.id), str(self.c3.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(self.c1.pk, 'multicategory Entertainment'),
(self.c2.pk, "multicategory It's a test"),
(self.c3.pk, 'multicategory Third')])
def test_model_multiple_choice_number_of_queries(self):
"""
ModelMultipleChoiceField does O(1) queries instead of O(n) (#10156).
"""
persons = [Writer.objects.create(name="Person %s" % i) for i in range(30)]
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
self.assertNumQueries(1, f.clean, [p.pk for p in persons[1:11:2]])
def test_model_multiple_choice_run_validators(self):
"""
ModelMultipleChoiceField run given validators (#14144).
"""
for i in range(30):
Writer.objects.create(name="Person %s" % i)
self._validator_run = False
def my_validator(value):
self._validator_run = True
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all(), validators=[my_validator])
f.clean([p.pk for p in Writer.objects.all()[8:9]])
self.assertTrue(self._validator_run)
def test_model_multiple_choice_show_hidden_initial(self):
"""
Test support of show_hidden_initial by ModelMultipleChoiceField.
"""
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(show_hidden_initial=True, queryset=Writer.objects.all())
person1 = Writer.objects.create(name="Person 1")
person2 = Writer.objects.create(name="Person 2")
form = WriterForm(
initial={'persons': [person1, person2]},
data={
'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person1.pk), str(person2.pk)],
},
)
self.assertTrue(form.is_valid())
self.assertFalse(form.has_changed())
form = WriterForm(
initial={'persons': [person1, person2]},
data={
'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person2.pk)],
},
)
self.assertTrue(form.is_valid())
self.assertTrue(form.has_changed())
def test_model_multiple_choice_field_22745(self):
"""
#22745 -- Make sure that ModelMultipleChoiceField with
CheckboxSelectMultiple widget doesn't produce unnecessary db queries
when accessing its BoundField's attrs.
"""
class ModelMultipleChoiceForm(forms.Form):
categories = forms.ModelMultipleChoiceField(Category.objects.all(), widget=forms.CheckboxSelectMultiple)
form = ModelMultipleChoiceForm()
field = form['categories'] # BoundField
template = Template('{{ field.name }}{{ field }}{{ field.help_text }}')
with self.assertNumQueries(1):
template.render(Context({'field': field}))
def test_show_hidden_initial_changed_queries_efficiently(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(
show_hidden_initial=True, queryset=Writer.objects.all())
writers = (Writer.objects.create(name=str(x)) for x in range(0, 50))
writer_pks = tuple(x.pk for x in writers)
form = WriterForm(data={'initial-persons': writer_pks})
with self.assertNumQueries(1):
self.assertTrue(form.has_changed())
def test_clean_does_deduplicate_values(self):
class PersonForm(forms.Form):
persons = forms.ModelMultipleChoiceField(queryset=Person.objects.all())
person1 = Person.objects.create(name='Person 1')
form = PersonForm(data={})
queryset = form.fields['persons'].clean([str(person1.pk)] * 50)
sql, params = queryset.query.sql_with_params()
self.assertEqual(len(params), 1)
def test_to_field_name_with_initial_data(self):
class ArticleCategoriesForm(forms.ModelForm):
categories = forms.ModelMultipleChoiceField(Category.objects.all(), to_field_name='slug')
class Meta:
model = Article
fields = ['categories']
article = Article.objects.create(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=Writer.objects.create(name='Test writer'),
article='Hello.',
)
article.categories.add(self.c2, self.c3)
form = ArticleCategoriesForm(instance=article)
self.assertCountEqual(form['categories'].value(), [self.c2.slug, self.c3.slug])
class ModelOneToOneFieldTests(TestCase):
def test_modelform_onetoonefield(self):
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
fields = '__all__'
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
fields = '__all__'
self.assertEqual(list(ImprovedArticleForm.base_fields), ['article'])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
def test_modelform_subclassed_model(self):
class BetterWriterForm(forms.ModelForm):
class Meta:
# BetterWriter model is a subclass of Writer with an additional `score` field
model = BetterWriter
fields = '__all__'
bw = BetterWriter.objects.create(name='Joe Better', score=10)
self.assertEqual(sorted(model_to_dict(bw)), ['id', 'name', 'score', 'writer_ptr'])
self.assertEqual(sorted(model_to_dict(bw, fields=[])), [])
self.assertEqual(sorted(model_to_dict(bw, fields=['id', 'name'])), ['id', 'name'])
self.assertEqual(sorted(model_to_dict(bw, exclude=[])), ['id', 'name', 'score', 'writer_ptr'])
self.assertEqual(sorted(model_to_dict(bw, exclude=['id', 'name'])), ['score', 'writer_ptr'])
form = BetterWriterForm({'name': 'Some Name', 'score': 12})
self.assertTrue(form.is_valid())
bw2 = form.save()
self.assertEqual(bw2.score, 12)
def test_onetoonefield(self):
class WriterProfileForm(forms.ModelForm):
class Meta:
# WriterProfile has a OneToOneField to Writer
model = WriterProfile
fields = '__all__'
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
form = WriterProfileForm()
self.assertHTMLEqual(
form.as_p(),
'''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" min="0" required></p>''' % (
self.w_woodward.pk, self.w_royko.pk,
)
)
data = {
'writer': str(self.w_woodward.pk),
'age': '65',
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(str(instance), 'Bob Woodward is 65')
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(
form.as_p(),
'''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer" required>
<option value="">---------</option>
<option value="%s" selected>Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label>
<input type="number" name="age" value="65" id="id_age" min="0" required></p>''' % (
self.w_woodward.pk, self.w_royko.pk,
)
)
def test_assignment_of_none(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda", date_published=datetime.date(1991, 8, 22))
author = Author.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertTrue(form.is_valid())
self.assertIsNone(form.cleaned_data['publication'])
author = form.save()
# author object returned from form still retains original publication object
# that's why we need to retrieve it from database again
new_author = Author.objects.get(pk=author.pk)
self.assertIsNone(new_author.publication)
def test_assignment_of_none_null_false(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author1
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda", date_published=datetime.date(1991, 8, 22))
author = Author1.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertFalse(form.is_valid())
class FileAndImageFieldTests(TestCase):
def test_clean_false(self):
"""
If the ``clean`` method on a non-required FileField receives False as
the data (meaning clear the field value), it returns False, regardless
of the value of ``initial``.
"""
f = forms.FileField(required=False)
self.assertIs(f.clean(False), False)
self.assertIs(f.clean(False, 'initial'), False)
def test_clean_false_required(self):
"""
If the ``clean`` method on a required FileField receives False as the
data, it has the same effect as None: initial is returned if non-empty,
otherwise the validation catches the lack of a required value.
"""
f = forms.FileField(required=True)
self.assertEqual(f.clean(False, 'initial'), 'initial')
with self.assertRaises(ValidationError):
f.clean(False)
def test_full_clear(self):
"""
Integration happy-path test that a model FileField can actually be set
and cleared via a ModelForm.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm()
self.assertIn('name="myfile"', str(form))
self.assertNotIn('myfile-clear', str(form))
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
self.assertEqual(doc.myfile.name, 'something.txt')
form = DocumentForm(instance=doc)
self.assertIn('myfile-clear', str(form))
form = DocumentForm(instance=doc, data={'myfile-clear': 'true'})
doc = form.save(commit=False)
self.assertFalse(doc.myfile)
def test_clear_and_file_contradiction(self):
"""
If the user submits a new file upload AND checks the clear checkbox,
they get a validation error, and the bound redisplay of the form still
includes the current file and the clear checkbox.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
form = DocumentForm(
instance=doc,
files={'myfile': SimpleUploadedFile('something.txt', b'content')},
data={'myfile-clear': 'true'},
)
self.assertTrue(not form.is_valid())
self.assertEqual(form.errors['myfile'],
['Please either submit a file or check the clear checkbox, not both.'])
rendered = str(form)
self.assertIn('something.txt', rendered)
self.assertIn('myfile-clear', rendered)
def test_render_empty_file_field(self):
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
doc = Document.objects.create()
form = DocumentForm(instance=doc)
self.assertHTMLEqual(
str(form['myfile']),
'<input id="id_myfile" name="myfile" type="file">'
)
def test_file_field_data(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={'description': 'Assistance'})
self.assertFalse(f.is_valid())
f = TextFileForm(data={'description': 'Assistance'}, files={})
self.assertFalse(f.is_valid())
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
# If the previous file has been deleted, the file name can be reused
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')},
)
self.assertFalse(f.is_valid())
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm({'description': 'Assistance'}, instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_filefield_required_false(self):
# Test the non-required FileField
f = TextFileForm(data={'description': 'Assistance'})
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
f = TextFileForm({'description': 'New Description'}, instance=instance)
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_custom_file_field_save(self):
"""
Regression for #11149: save_form_data should be called only once
"""
class CFFForm(forms.ModelForm):
class Meta:
model = CustomFF
fields = '__all__'
# It's enough that the form saves without error -- the custom save routine will
# generate an AssertionError if it is called more than once during save.
form = CFFForm(data={'f': None})
form.save()
def test_file_field_multiple_save(self):
"""
Simulate a file upload and check how many times Model.save() gets
called. Test for bug #639.
"""
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
fields = '__all__'
# Grab an image for testing.
filename = os.path.join(os.path.dirname(__file__), 'test.png')
with open(filename, "rb") as fp:
img = fp.read()
# Fake a POST QueryDict and FILES MultiValueDict.
data = {'title': 'Testing'}
files = {"image": SimpleUploadedFile('test.png', img, 'image/png')}
form = PhotoForm(data=data, files=files)
p = form.save()
try:
# Check the savecount stored on the object (see the model).
self.assertEqual(p._savecount, 1)
finally:
# Delete the "uploaded" file to avoid clogging /tmp.
p = Photo.objects.get()
p.image.delete(save=False)
def test_file_path_field_blank(self):
"""FilePathField(blank=True) includes the empty option."""
class FPForm(forms.ModelForm):
class Meta:
model = FilePathModel
fields = '__all__'
form = FPForm()
self.assertEqual([name for _, name in form['path'].field.choices], ['---------', 'models.py'])
@skipUnless(test_images, "Pillow not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slightly when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(__file__), 'test.png'), 'rb') as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(__file__), 'test2.png'), 'rb') as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': 'Test'})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertIsNone(instance.width)
self.assertIsNone(instance.height)
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect
# the image or its width/height properties.
f = OptionalImageFileForm({'description': 'New Description'}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': 'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
# Editing an instance that has an image without an extension shouldn't
# fail validation. First create:
f = NoExtensionImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/no_extension')
# Then edit:
f = NoExtensionImageFileForm(data={'description': 'Edited image'}, instance=instance)
self.assertTrue(f.is_valid())
class ModelOtherFieldTests(SimpleTestCase):
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertFalse(bif.is_valid())
self.assertEqual(
bif.errors,
{'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']}
)
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertFalse(bif.is_valid())
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']})
def test_url_on_modelform(self):
"Check basic URL field validation on model forms"
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
self.assertFalse(HomepageForm({'url': 'foo'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example.'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://com.'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://localhost'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com/foo/bar'}).is_valid())
def test_modelform_non_editable_field(self):
"""
When explicitly including a non-editable field in a ModelForm, the
error message should be explicit.
"""
# 'created', non-editable, is excluded by default
self.assertNotIn('created', ArticleForm().fields)
msg = "'created' cannot be specified for Article model form as it is a non-editable field"
with self.assertRaisesMessage(FieldError, msg):
class InvalidArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'created')
def test_http_prefixing(self):
"""
If the http:// prefix is omitted on form input, the field adds it again. (Refs #13613)
"""
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
form = HomepageForm({'url': 'example.com'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com')
form = HomepageForm({'url': 'example.com/test'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com/test')
class OtherModelFormTests(TestCase):
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(
str(f.media),
'''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet">
<script src="/some/form/javascript"></script>'''
)
def test_choices_type(self):
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_prefetch_related_queryset(self):
"""
ModelChoiceField should respect a prefetch_related() on its queryset.
"""
blue = Colour.objects.create(name='blue')
red = Colour.objects.create(name='red')
multicolor_item = ColourfulItem.objects.create()
multicolor_item.colours.add(blue, red)
red_item = ColourfulItem.objects.create()
red_item.colours.add(red)
class ColorModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return ', '.join(c.name for c in obj.colours.all())
field = ColorModelChoiceField(ColourfulItem.objects.prefetch_related('colours'))
with self.assertNumQueries(3): # would be 4 if prefetch is ignored
self.assertEqual(tuple(field.choices), (
('', '---------'),
(multicolor_item.pk, 'blue, red'),
(red_item.pk, 'red'),
))
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
pear = Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
('', '---------'),
(86, 'Apple'),
(87, 'Core'),
(22, 'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(str(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected>Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(list(CategoryForm.base_fields), ['description', 'url'])
self.assertHTMLEqual(
str(CategoryForm()),
'''<tr><th><label for="id_description">Description:</label></th>
<td><input type="text" name="description" id="id_description" required></td></tr>
<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>'''
)
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear')))
self.assertSequenceEqual(field.clean([86]), [apple])
form = SelectInventoryForm({'items': [87, 22]})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.cleaned_data), 1)
self.assertSequenceEqual(form.cleaned_data['items'], [core, pear])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields), ['name'])
self.assertHTMLEqual(
str(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="10" required></td></tr>'''
)
def test_iterable_model_m2m(self):
class ColourfulItemForm(forms.ModelForm):
class Meta:
model = ColourfulItem
fields = '__all__'
colour = Colour.objects.create(name='Blue')
form = ColourfulItemForm()
self.maxDiff = 1024
self.assertHTMLEqual(
form.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="50" required></p>
<p><label for="id_colours">Colours:</label>
<select multiple name="colours" id="id_colours" required>
<option value="%(blue_pk)s">Blue</option>
</select></p>"""
% {'blue_pk': colour.pk})
def test_callable_field_default(self):
class PublicationDefaultsForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ('title', 'date_published', 'mode', 'category')
self.maxDiff = 2000
form = PublicationDefaultsForm()
today_str = str(datetime.date.today())
self.assertHTMLEqual(
form.as_p(),
"""
<p><label for="id_title">Title:</label>
<input id="id_title" maxlength="30" name="title" type="text" required></p>
<p><label for="id_date_published">Date published:</label>
<input id="id_date_published" name="date_published" type="text" value="{0}" required>
<input id="initial-id_date_published" name="initial-date_published" type="hidden" value="{0}"></p>
<p><label for="id_mode">Mode:</label> <select id="id_mode" name="mode">
<option value="di" selected>direct</option>
<option value="de">delayed</option></select>
<input id="initial-id_mode" name="initial-mode" type="hidden" value="di"></p>
<p><label for="id_category">Category:</label> <select id="id_category" name="category">
<option value="1">Games</option>
<option value="2">Comics</option>
<option value="3" selected>Novel</option></select>
<input id="initial-id_category" name="initial-category" type="hidden" value="3">
""".format(today_str)
)
empty_data = {
'title': '',
'date_published': today_str,
'initial-date_published': today_str,
'mode': 'di',
'initial-mode': 'di',
'category': '3',
'initial-category': '3',
}
bound_form = PublicationDefaultsForm(empty_data)
self.assertFalse(bound_form.has_changed())
class ModelFormCustomErrorTests(SimpleTestCase):
def test_custom_error_messages(self):
data = {'name1': '@#$!!**@#$', 'name2': '@#$!!**@#$'}
errors = CustomErrorMessageForm(data).errors
self.assertHTMLEqual(
str(errors['name1']),
'<ul class="errorlist"><li>Form custom error message.</li></ul>'
)
self.assertHTMLEqual(
str(errors['name2']),
'<ul class="errorlist"><li>Model custom error message.</li></ul>'
)
def test_model_clean_error_messages(self):
data = {'name1': 'FORBIDDEN_VALUE', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages.</li></ul>'
)
data = {'name1': 'FORBIDDEN_VALUE2', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages (simpler syntax).</li></ul>'
)
data = {'name1': 'GLOBAL_ERROR', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'], ['Global error message.'])
class CustomCleanTests(TestCase):
def test_override_clean(self):
"""
Regression for #12596: Calling super from ModelForm.clean() should be
optional.
"""
class TripleFormWithCleanOverride(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
def clean(self):
if not self.cleaned_data['left'] == self.cleaned_data['right']:
raise ValidationError('Left and right should be equal')
return self.cleaned_data
form = TripleFormWithCleanOverride({'left': 1, 'middle': 2, 'right': 1})
self.assertTrue(form.is_valid())
# form.instance.left will be None if the instance was not constructed
# by form.full_clean().
self.assertEqual(form.instance.left, 1)
def test_model_form_clean_applies_to_model(self):
"""
Regression test for #12960. Make sure the cleaned_data returned from
ModelForm.clean() is applied to the model instance.
"""
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
def clean(self):
self.cleaned_data['name'] = self.cleaned_data['name'].upper()
return self.cleaned_data
data = {'name': 'Test', 'slug': 'test', 'url': '/test'}
form = CategoryForm(data)
category = form.save()
self.assertEqual(category.name, 'TEST')
class ModelFormInheritanceTests(SimpleTestCase):
def test_form_subclass_inheritance(self):
class Form(forms.Form):
age = forms.IntegerField()
class ModelForm(forms.ModelForm, Form):
class Meta:
model = Writer
fields = '__all__'
self.assertEqual(list(ModelForm().fields), ['name', 'age'])
def test_field_removal(self):
class ModelForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class Mixin:
age = None
class Form(forms.Form):
age = forms.IntegerField()
class Form2(forms.Form):
foo = forms.IntegerField()
self.assertEqual(list(ModelForm().fields), ['name'])
self.assertEqual(list(type('NewForm', (Mixin, Form), {})().fields), [])
self.assertEqual(list(type('NewForm', (Form2, Mixin, Form), {})().fields), ['foo'])
self.assertEqual(list(type('NewForm', (Mixin, ModelForm, Form), {})().fields), ['name'])
self.assertEqual(list(type('NewForm', (ModelForm, Mixin, Form), {})().fields), ['name'])
self.assertEqual(list(type('NewForm', (ModelForm, Form, Mixin), {})().fields), ['name', 'age'])
self.assertEqual(list(type('NewForm', (ModelForm, Form), {'age': None})().fields), ['name'])
def test_field_removal_name_clashes(self):
"""
Form fields can be removed in subclasses by setting them to None
(#22510).
"""
class MyForm(forms.ModelForm):
media = forms.CharField()
class Meta:
model = Writer
fields = '__all__'
class SubForm(MyForm):
media = None
self.assertIn('media', MyForm().fields)
self.assertNotIn('media', SubForm().fields)
self.assertTrue(hasattr(MyForm, 'media'))
self.assertTrue(hasattr(SubForm, 'media'))
class StumpJokeForm(forms.ModelForm):
class Meta:
model = StumpJoke
fields = '__all__'
class CustomFieldWithQuerysetButNoLimitChoicesTo(forms.Field):
queryset = 42
class StumpJokeWithCustomFieldForm(forms.ModelForm):
custom = CustomFieldWithQuerysetButNoLimitChoicesTo()
class Meta:
model = StumpJoke
fields = ()
class LimitChoicesToTests(TestCase):
"""
Tests the functionality of ``limit_choices_to``.
"""
@classmethod
def setUpTestData(cls):
cls.threepwood = Character.objects.create(
username='threepwood',
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
cls.marley = Character.objects.create(
username='marley',
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
def test_limit_choices_to_callable_for_fk_rel(self):
"""
A ForeignKey can use limit_choices_to as a callable (#2554).
"""
stumpjokeform = StumpJokeForm()
self.assertSequenceEqual(stumpjokeform.fields['most_recently_fooled'].queryset, [self.threepwood])
def test_limit_choices_to_callable_for_m2m_rel(self):
"""
A ManyToManyField can use limit_choices_to as a callable (#2554).
"""
stumpjokeform = StumpJokeForm()
self.assertSequenceEqual(stumpjokeform.fields['most_recently_fooled'].queryset, [self.threepwood])
def test_custom_field_with_queryset_but_no_limit_choices_to(self):
"""
A custom field with a `queryset` attribute but no `limit_choices_to`
works (#23795).
"""
f = StumpJokeWithCustomFieldForm()
self.assertEqual(f.fields['custom'].queryset, 42)
def test_fields_for_model_applies_limit_choices_to(self):
fields = fields_for_model(StumpJoke, ['has_fooled_today'])
self.assertSequenceEqual(fields['has_fooled_today'].queryset, [self.threepwood])
def test_callable_called_each_time_form_is_instantiated(self):
field = StumpJokeForm.base_fields['most_recently_fooled']
with mock.patch.object(field, 'limit_choices_to') as today_callable_dict:
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 1)
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 2)
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 3)
@isolate_apps('model_forms')
def test_limit_choices_to_no_duplicates(self):
joke1 = StumpJoke.objects.create(
funny=True,
most_recently_fooled=self.threepwood,
)
joke2 = StumpJoke.objects.create(
funny=True,
most_recently_fooled=self.threepwood,
)
joke3 = StumpJoke.objects.create(
funny=True,
most_recently_fooled=self.marley,
)
StumpJoke.objects.create(funny=False, most_recently_fooled=self.marley)
joke1.has_fooled_today.add(self.marley, self.threepwood)
joke2.has_fooled_today.add(self.marley)
joke3.has_fooled_today.add(self.marley, self.threepwood)
class CharacterDetails(models.Model):
character1 = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to=models.Q(
jokes__funny=True,
jokes_today__funny=True,
),
related_name='details_fk_1',
)
character2 = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to={
'jokes__funny': True,
'jokes_today__funny': True,
},
related_name='details_fk_2',
)
character3 = models.ManyToManyField(
Character,
limit_choices_to=models.Q(
jokes__funny=True,
jokes_today__funny=True,
),
related_name='details_m2m_1',
)
class CharacterDetailsForm(forms.ModelForm):
class Meta:
model = CharacterDetails
fields = '__all__'
form = CharacterDetailsForm()
self.assertCountEqual(
form.fields['character1'].queryset,
[self.marley, self.threepwood],
)
self.assertCountEqual(
form.fields['character2'].queryset,
[self.marley, self.threepwood],
)
self.assertCountEqual(
form.fields['character3'].queryset,
[self.marley, self.threepwood],
)
def test_limit_choices_to_m2m_through(self):
class DiceForm(forms.ModelForm):
class Meta:
model = Dice
fields = ['numbers']
Number.objects.create(value=0)
n1 = Number.objects.create(value=1)
n2 = Number.objects.create(value=2)
form = DiceForm()
self.assertCountEqual(form.fields['numbers'].queryset, [n1, n2])
class FormFieldCallbackTests(SimpleTestCase):
def test_baseform_with_widgets_in_meta(self):
"""Regression for #13095: Using base forms with widgets defined in Meta should not raise errors."""
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
Form = modelform_factory(Person, form=BaseForm)
self.assertIsInstance(Form.base_fields['name'].widget, forms.Textarea)
def test_factory_with_widget_argument(self):
""" Regression for #15315: modelform_factory should accept widgets
argument
"""
widget = forms.Textarea()
# Without a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__")
self.assertNotEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
# With a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__", widgets={'name': widget})
self.assertEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
def test_modelform_factory_without_fields(self):
""" Regression for #19733 """
message = (
"Calling modelform_factory without defining 'fields' or 'exclude' "
"explicitly is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
modelform_factory(Person)
def test_modelform_factory_with_all_fields(self):
""" Regression for #19733 """
form = modelform_factory(Person, fields="__all__")
self.assertEqual(list(form.base_fields), ["name"])
def test_custom_callback(self):
"""A custom formfield_callback is used if provided"""
callback_args = []
def callback(db_field, **kwargs):
callback_args.append((db_field, kwargs))
return db_field.formfield(**kwargs)
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
modelform_factory(Person, form=BaseForm, formfield_callback=callback)
id_field, name_field = Person._meta.fields
self.assertEqual(callback_args, [(id_field, {}), (name_field, {'widget': widget})])
def test_bad_callback(self):
# A bad callback provided by user still gives an error
with self.assertRaises(TypeError):
modelform_factory(Person, fields="__all__", formfield_callback='not a function or callable')
def test_inherit_after_custom_callback(self):
def callback(db_field, **kwargs):
if isinstance(db_field, models.CharField):
return forms.CharField(widget=forms.Textarea)
return db_field.formfield(**kwargs)
class BaseForm(forms.ModelForm):
class Meta:
model = Person
fields = '__all__'
NewForm = modelform_factory(Person, form=BaseForm, formfield_callback=callback)
class InheritedForm(NewForm):
pass
for name in NewForm.base_fields:
self.assertEqual(
type(InheritedForm.base_fields[name].widget),
type(NewForm.base_fields[name].widget)
)
class LocalizedModelFormTest(TestCase):
def test_model_form_applies_localize_to_some_fields(self):
class PartiallyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = ('left', 'right',)
fields = '__all__'
f = PartiallyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertFalse(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_applies_localize_to_all_fields(self):
class FullyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = '__all__'
fields = '__all__'
f = FullyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertTrue(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_refuses_arbitrary_string(self):
msg = (
"BrokenLocalizedTripleForm.Meta.localized_fields "
"cannot be a string. Did you mean to type: ('foo',)?"
)
with self.assertRaisesMessage(TypeError, msg):
class BrokenLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = "foo"
class CustomMetaclass(ModelFormMetaclass):
def __new__(cls, name, bases, attrs):
new = super().__new__(cls, name, bases, attrs)
new.base_fields = {}
return new
class CustomMetaclassForm(forms.ModelForm, metaclass=CustomMetaclass):
pass
class CustomMetaclassTestCase(SimpleTestCase):
def test_modelform_factory_metaclass(self):
new_cls = modelform_factory(Person, fields="__all__", form=CustomMetaclassForm)
self.assertEqual(new_cls.base_fields, {})
class StrictAssignmentTests(SimpleTestCase):
"""
Should a model do anything special with __setattr__() or descriptors which
raise a ValidationError, a model form should catch the error (#24706).
"""
def test_setattr_raises_validation_error_field_specific(self):
"""
A model ValidationError using the dict form should put the error
message into the correct key of form.errors.
"""
form_class = modelform_factory(model=StrictAssignmentFieldSpecific, fields=['title'])
form = form_class(data={'title': 'testing setattr'}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'title': ['Cannot set attribute', 'This field cannot be blank.']
})
def test_setattr_raises_validation_error_non_field(self):
"""
A model ValidationError not using the dict form should put the error
message into __all__ (i.e. non-field errors) on the form.
"""
form_class = modelform_factory(model=StrictAssignmentAll, fields=['title'])
form = form_class(data={'title': 'testing setattr'}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'__all__': ['Cannot set attribute'],
'title': ['This field cannot be blank.']
})
class ModelToDictTests(TestCase):
def test_many_to_many(self):
"""Data for a ManyToManyField is a list rather than a lazy QuerySet."""
blue = Colour.objects.create(name='blue')
red = Colour.objects.create(name='red')
item = ColourfulItem.objects.create()
item.colours.set([blue])
data = model_to_dict(item)['colours']
self.assertEqual(data, [blue])
item.colours.set([red])
# If data were a QuerySet, it would be reevaluated here and give "red"
# instead of the original value.
self.assertEqual(data, [blue])
| 39.955656
| 127
| 0.610276
|
import datetime
import os
from decimal import Decimal
from unittest import mock, skipUnless
from django import forms
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import connection, models
from django.db.models.query import EmptyQuerySet
from django.forms.models import (
ModelFormMetaclass, construct_instance, fields_for_model, model_to_dict,
modelform_factory,
)
from django.template import Context, Template
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from .models import (
Article, ArticleStatus, Author, Author1, Award, BetterWriter, BigInt, Book,
Category, Character, Colour, ColourfulItem, CustomErrorMessage, CustomFF,
CustomFieldForExclusionModel, DateTimePost, DerivedBook, DerivedPost, Dice,
Document, ExplicitPK, FilePathModel, FlexibleDatePost, Homepage,
ImprovedArticle, ImprovedArticleWithParentLink, Inventory,
NullableUniqueCharFieldModel, Number, Person, Photo, Post, Price, Product,
Publication, PublicationDefaults, StrictAssignmentAll,
StrictAssignmentFieldSpecific, Student, StumpJoke, TextFile, Triple,
Writer, WriterProfile, test_images,
)
if test_images:
from .models import ImageFile, NoExtensionImageFile, OptionalImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
fields = '__all__'
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
fields = '__all__'
class NoExtensionImageFileForm(forms.ModelForm):
class Meta:
model = NoExtensionImageFile
fields = '__all__'
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
class PriceForm(forms.ModelForm):
class Meta:
model = Price
fields = '__all__'
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = '__all__'
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = '__all__'
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = '__all__'
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
fields = '__all__'
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
fields = '__all__'
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
fields = '__all__'
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
fields = '__all__'
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
fields = '__all__'
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = TextFile
fields = '__all__'
class CustomErrorMessageForm(forms.ModelForm):
name1 = forms.CharField(error_messages={'invalid': 'Form custom error message.'})
class Meta:
fields = '__all__'
model = CustomErrorMessage
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(list(BaseCategoryForm.base_fields), ['name', 'slug', 'url'])
def test_no_model_class(self):
class NoModelModelForm(forms.ModelForm):
pass
with self.assertRaisesMessage(ValueError, 'ModelForm has no model class specified.'):
NoModelModelForm()
def test_empty_fields_to_fields_for_model(self):
field_dict = fields_for_model(Person, fields=())
self.assertEqual(len(field_dict), 0)
def test_empty_fields_on_modelform(self):
class EmptyPersonForm(forms.ModelForm):
class Meta:
model = Person
fields = ()
form = EmptyPersonForm()
self.assertEqual(len(form.fields), 0)
def test_empty_fields_to_construct_instance(self):
form = modelform_factory(Person, fields="__all__")({'name': 'John Doe'})
self.assertTrue(form.is_valid())
instance = construct_instance(form, Person(), fields=())
self.assertEqual(instance.name, '')
def test_blank_with_null_foreign_key_field(self):
class FormForTestingIsValid(forms.ModelForm):
class Meta:
model = Student
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['character'].required = False
char = Character.objects.create(username='user', last_action=datetime.datetime.today())
data = {'study': 'Engineering'}
data2 = {'study': 'Engineering', 'character': char.pk}
f1 = FormForTestingIsValid(data)
self.assertTrue(f1.is_valid())
f2 = FormForTestingIsValid(data2)
self.assertTrue(f2.is_valid())
obj = f2.save()
self.assertEqual(obj.character, char)
def test_blank_false_with_null_true_foreign_key_field(self):
class AwardForm(forms.ModelForm):
class Meta:
model = Award
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['character'].required = False
character = Character.objects.create(username='user', last_action=datetime.datetime.today())
award = Award.objects.create(name='Best sprinter', character=character)
data = {'name': 'Best tester', 'character': ''}
form = AwardForm(data=data, instance=award)
self.assertTrue(form.is_valid())
award = form.save()
self.assertIsNone(award.character)
def test_blank_foreign_key_with_radio(self):
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = ['author']
widgets = {'author': forms.RadioSelect()}
writer = Writer.objects.create(name='Joe Doe')
form = BookForm()
self.assertEqual(list(form.fields['author'].choices), [
('', '---------'),
(writer.pk, 'Joe Doe'),
])
def test_non_blank_foreign_key_with_radio(self):
class AwardForm(forms.ModelForm):
class Meta:
model = Award
fields = ['character']
widgets = {'character': forms.RadioSelect()}
character = Character.objects.create(
username='user',
last_action=datetime.datetime.today(),
)
form = AwardForm()
self.assertEqual(
list(form.fields['character'].choices),
[(character.pk, 'user')],
)
def test_save_blank_false_with_required_false(self):
obj = Writer.objects.create(name='test')
form = CustomWriterForm(data={'name': ''}, instance=obj)
self.assertTrue(form.is_valid())
obj = form.save()
self.assertEqual(obj.name, '')
def test_save_blank_null_unique_charfield_saves_null(self):
form_class = modelform_factory(model=NullableUniqueCharFieldModel, fields='__all__')
empty_value = '' if connection.features.interprets_empty_strings_as_nulls else None
data = {
'codename': '',
'email': '',
'slug': '',
'url': '',
}
form = form_class(data=data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.instance.codename, empty_value)
self.assertEqual(form.instance.email, empty_value)
self.assertEqual(form.instance.slug, empty_value)
self.assertEqual(form.instance.url, empty_value)
form = form_class(data=data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.instance.codename, empty_value)
self.assertEqual(form.instance.email, empty_value)
self.assertEqual(form.instance.slug, empty_value)
self.assertEqual(form.instance.url, empty_value)
def test_missing_fields_attribute(self):
message = (
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form "
"MissingFieldsForm needs updating."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
class MissingFieldsForm(forms.ModelForm):
class Meta:
model = Category
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(list(ExtraFields.base_fields),
['name', 'slug', 'url', 'some_extra_field'])
def test_extra_field_model_form(self):
with self.assertRaisesMessage(FieldError, 'no-field'):
class ExtraPersonForm(forms.ModelForm):
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'no-field')
def test_extra_declared_field_model_form(self):
class ExtraPersonForm(forms.ModelForm):
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'age')
def test_extra_field_modelform_factory(self):
with self.assertRaisesMessage(FieldError, 'Unknown field(s) (no-field) specified for Person'):
modelform_factory(Person, fields=['no-field', 'name'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField)
def test_replace_field_variant_2(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = ['url']
self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField)
def test_replace_field_variant_3(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = [] # url will still appear, since it is explicit above
self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField)
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
wf = WriterForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_nonexistent_field(self):
expected_msg = 'Unknown field(s) (nonexistent) specified for Category'
with self.assertRaisesMessage(FieldError, expected_msg):
class InvalidCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ['nonexistent']
def test_limit_fields_with_string(self):
expected_msg = "CategoryForm.Meta.fields cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ('url') # note the missing comma
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(list(ExcludeFields.base_fields), ['name', 'slug'])
def test_exclude_nonexistent_field(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['nonexistent']
self.assertEqual(list(ExcludeFields.base_fields), ['name', 'slug', 'url'])
def test_exclude_fields_with_string(self):
expected_msg = "CategoryForm.Meta.exclude cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
exclude = ('url') # note the missing comma
def test_exclude_and_validation(self):
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
price = form.save(commit=False)
msg = "{'quantity': ['This field cannot be null.']}"
with self.assertRaisesMessage(ValidationError, msg):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertIsNone(form.instance.quantity)
self.assertIsNone(form.instance.pk)
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(list(ConfusedForm.base_fields),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
class Meta:
model = Article
fields = '__all__'
self.assertEqual(
list(MixModelForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
list(ArticleForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
list(BadForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_invalid_meta_model(self):
class InvalidModelForm(forms.ModelForm):
class Meta:
pass
msg = 'ModelForm has no model class specified.'
with self.assertRaisesMessage(ValueError, msg):
InvalidModelForm()
# Even if you provide a model instance
with self.assertRaisesMessage(ValueError, msg):
InvalidModelForm(instance=Category)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
pass
self.assertEqual(list(SubCategoryForm.base_fields), ['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
class SubclassMeta(SomeCategoryForm):
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr>
<tr><th><label for="id_slug">Slug:</label></th>
<td><input id="id_slug" type="text" name="slug" maxlength="20" required></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th>
<td><input type="checkbox" name="checkbox" id="id_checkbox" required></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(list(OrderFields.base_fields),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>
<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(list(OrderFields2.base_fields), ['slug', 'name'])
def test_default_populated_on_optional_field(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(max_length=255, required=False)
class Meta:
model = PublicationDefaults
fields = ('mode',)
# Empty data uses the model field default.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, 'di')
self.assertEqual(m1._meta.get_field('mode').get_default(), 'di')
# Blank data doesn't use the model field default.
mf2 = PubForm({'mode': ''})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.mode, '')
def test_default_not_populated_on_non_empty_value_in_cleaned_data(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(max_length=255, required=False)
mocked_mode = None
def clean(self):
self.cleaned_data['mode'] = self.mocked_mode
return self.cleaned_data
class Meta:
model = PublicationDefaults
fields = ('mode',)
pub_form = PubForm({})
pub_form.mocked_mode = 'de'
pub = pub_form.save(commit=False)
self.assertEqual(pub.mode, 'de')
default_mode = 'di'
for empty_value in pub_form.fields['mode'].empty_values:
with self.subTest(empty_value=empty_value):
pub_form = PubForm({})
pub_form.mocked_mode = empty_value
pub = pub_form.save(commit=False)
self.assertEqual(pub.mode, default_mode)
def test_default_not_populated_on_optional_checkbox_input(self):
class PubForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ('active',)
# doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertIs(m1.active, False)
self.assertIsInstance(mf1.fields['active'].widget, forms.CheckboxInput)
self.assertIs(m1._meta.get_field('active').get_default(), True)
def test_default_not_populated_on_checkboxselectmultiple(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(required=False, widget=forms.CheckboxSelectMultiple)
class Meta:
model = PublicationDefaults
fields = ('mode',)
# CheckboxSelectMultiple doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, '')
self.assertEqual(m1._meta.get_field('mode').get_default(), 'di')
def test_default_not_populated_on_selectmultiple(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(required=False, widget=forms.SelectMultiple)
class Meta:
model = PublicationDefaults
fields = ('mode',)
# SelectMultiple doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, '')
self.assertEqual(m1._meta.get_field('mode').get_default(), 'di')
def test_prefixed_form_with_default_field(self):
class PubForm(forms.ModelForm):
prefix = 'form-prefix'
class Meta:
model = PublicationDefaults
fields = ('mode',)
mode = 'de'
self.assertNotEqual(mode, PublicationDefaults._meta.get_field('mode').get_default())
mf1 = PubForm({'form-prefix-mode': mode})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, mode)
def test_renderer_kwarg(self):
custom = object()
self.assertIs(ProductForm(renderer=custom).renderer, custom)
def test_default_splitdatetime_field(self):
class PubForm(forms.ModelForm):
datetime_published = forms.SplitDateTimeField(required=False)
class Meta:
model = PublicationDefaults
fields = ('datetime_published',)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.datetime_published, datetime.datetime(2000, 1, 1))
mf2 = PubForm({'datetime_published_0': '2010-01-01', 'datetime_published_1': '0:00:00'})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.datetime_published, datetime.datetime(2010, 1, 1))
def test_default_filefield(self):
class PubForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ('file',)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.file.name, 'default.txt')
mf2 = PubForm({}, {'file': SimpleUploadedFile('name', b'foo')})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.file.name, 'name')
def test_default_selectdatewidget(self):
class PubForm(forms.ModelForm):
date_published = forms.DateField(required=False, widget=forms.SelectDateWidget)
class Meta:
model = PublicationDefaults
fields = ('date_published',)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.date_published, datetime.date.today())
mf2 = PubForm({'date_published_year': '2010', 'date_published_month': '1', 'date_published_day': '1'})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.date_published, datetime.date(2010, 1, 1))
class FieldOverridesByFormMetaForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
labels = {
'name': 'Title',
}
help_texts = {
'slug': 'Watch out! Letters, numbers, underscores and hyphens only.',
}
error_messages = {
'slug': {
'invalid': (
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!"
)
}
}
field_classes = {
'url': forms.URLField,
}
class TestFieldOverridesByFormMeta(SimpleTestCase):
def test_widget_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name']),
'<textarea id="id_name" rows="10" cols="40" name="name" maxlength="20" required></textarea>',
)
self.assertHTMLEqual(
str(form['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" required>',
)
self.assertHTMLEqual(
str(form['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" required>',
)
def test_label_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name'].label_tag()),
'<label for="id_name">Title:</label>',
)
self.assertHTMLEqual(
str(form['url'].label_tag()),
'<label for="id_url">The URL:</label>',
)
self.assertHTMLEqual(
str(form['slug'].label_tag()),
'<label for="id_slug">Slug:</label>',
)
def test_help_text_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertEqual(
form['slug'].help_text,
'Watch out! Letters, numbers, underscores and hyphens only.',
)
def test_error_messages_overrides(self):
form = FieldOverridesByFormMetaForm(data={
'name': 'Category',
'url': 'http://www.example.com/category/',
'slug': '!%
})
form.full_clean()
error = [
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!",
]
self.assertEqual(form.errors, {'slug': error})
def test_field_type_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertIs(Category._meta.get_field('url').__class__, models.CharField)
self.assertIsInstance(form.fields['url'], forms.URLField)
class IncompleteCategoryFormWithFields(forms.ModelForm):
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(SimpleTestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
self.assertIs(form.is_valid(), True)
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
self.assertIs(form.is_valid(), True)
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
self.assertIs(form.is_valid(), True)
class UniqueTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])
def test_unique_together_exclusion(self):
class BookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = ('isbn', 'suffix1')
# skipped by other logic.
self.assertEqual(DerivedBook._meta.unique_together, (('suffix1', 'suffix2'),))
for name in ('suffix1', 'suffix2'):
with self.subTest(name=name):
field = DerivedBook._meta.get_field(name)
self.assertEqual(field.default, 0)
# The form fails validation with "Derived book with this Suffix1 and
# Suffix2 already exists." if the unique_together validation isn't
DerivedBook.objects.create(isbn='12345')
form = BookForm({'isbn': '56789', 'suffix1': '0'})
self.assertTrue(form.is_valid(), form.errors)
def test_multiple_field_unique_together(self):
class TripleForm(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
Triple.objects.create(left=1, middle=2, right=3)
form = TripleForm({'left': '1', 'middle': '2', 'right': '3'})
self.assertFalse(form.is_valid())
form = TripleForm({'left': '1', 'middle': '3', 'right': '1'})
self.assertTrue(form.is_valid())
@skipUnlessDBFeature('supports_nullable_unique_constraints')
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other', 'author': self.writer.pk, 'isbn': isbn,
'suffix1': '1', 'suffix2': '2',
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other',
'author': self.writer.pk,
'isbn': '9876',
'suffix1': '0',
'suffix2': '0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors['__all__'],
['Derived book with this Suffix1 and Suffix2 already exists.'],
)
def test_explicitpk_unspecified(self):
form = ExplicitPKForm({'key': '', 'desc': ''})
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertFalse(form.is_valid())
if connection.features.interprets_empty_strings_as_nulls:
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
else:
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0", 'posted': '2008-09-03'}
form = PostForm(data, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], ['This field is required.'])
def test_unique_for_date_in_exclude(self):
class DateTimePostForm(forms.ModelForm):
class Meta:
model = DateTimePost
fields = '__all__'
DateTimePost.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.datetime(2008, 9, 3, 10, 10, 1),
)
form = DateTimePostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DateTimePostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertTrue(form.is_valid())
form = DateTimePostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertTrue(form.is_valid())
def test_inherited_unique_for_date(self):
p = Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0", 'posted': '2008-09-03'}
form = DerivedPostForm(data, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
fields = '__all__'
p = FlexibleDatePost.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0"}
form = FlexDatePostForm(data, instance=p)
self.assertTrue(form.is_valid())
def test_override_unique_message(self):
class CustomProductForm(ProductForm):
class Meta(ProductForm.Meta):
error_messages = {
'slug': {
'unique': "%(model_name)s's %(field_label)s not unique.",
}
}
Product.objects.create(slug='teddy-bear-blue')
form = CustomProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ["Product's Slug not unique."])
def test_override_unique_together_message(self):
class CustomPriceForm(PriceForm):
class Meta(PriceForm.Meta):
error_messages = {
NON_FIELD_ERRORS: {
'unique_together': "%(model_name)s's %(field_labels)s not unique.",
}
}
Price.objects.create(price=6.00, quantity=1)
form = CustomPriceForm({'price': '6.00', 'quantity': '1'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors[NON_FIELD_ERRORS], ["Price's Price and Quantity not unique."])
def test_override_unique_for_date_message(self):
class CustomPostForm(PostForm):
class Meta(PostForm.Meta):
error_messages = {
'title': {
'unique_for_date': (
"%(model_name)s's %(field_label)s not unique "
"for %(date_field_label)s date."
),
}
}
Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = CustomPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ["Post's Title not unique for Posted date."])
class ModelFormBasicTests(TestCase):
def create_basic_data(self):
self.c1 = Category.objects.create(name='Entertainment', slug='entertainment', url='entertainment')
self.c2 = Category.objects.create(name="It's a test", slug='its-test', url='test')
self.c3 = Category.objects.create(name='Third test', slug='third-test', url='third')
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr>
<tr><th><label for="id_slug">Slug:</label></th>
<td><input id="id_slug" type="text" name="slug" maxlength="20" required></td></tr>
<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" required></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" required></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" required></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" required>""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" required></li>
<li>Slug: <input type="text" name="slug" maxlength="20" required></li>
<li>The URL: <input type="text" name="url" maxlength="40" required></li>"""
)
def test_initial_values(self):
self.create_basic_data()
# Initial values can be provided for model forms
f = ArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(self.c1.id), str(self.c2.id)]
})
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" required></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s" selected>Entertainment</option>
<option value="%s" selected>It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# When the ModelForm is passed an instance, that instance's current values are
f = RoykoForm(auto_id=False, instance=self.w_royko)
self.assertHTMLEqual(
str(f),
'''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" required><br>
<span class="helptext">Use both first and last names.</span></td></tr>'''
)
art = Article.objects.create(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=self.w_royko,
article='Hello.'
)
art_id_1 = art.id
f = ArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li>
<li>Writer: <select name="writer" required>
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected>Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required>Hello.</textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
f = ArticleForm({
'headline': 'Test headline',
'slug': 'test-headline',
'pub_date': '1984-02-06',
'writer': str(self.w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertTrue(f.is_valid())
test_art = f.save()
self.assertEqual(test_art.id, art_id_1)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, 'Test headline')
def test_m2m_initial_callable(self):
self.maxDiff = 1200
self.create_basic_data()
def formfield_for_dbfield(db_field, **kwargs):
if db_field.name == 'categories':
kwargs['initial'] = lambda: Category.objects.all().order_by('name')[:2]
return db_field.formfield(**kwargs)
ModelForm = modelform_factory(
Article,
fields=['headline', 'categories'],
formfield_callback=formfield_for_dbfield,
)
form = ModelForm()
self.assertHTMLEqual(
form.as_ul(),
"""<li><label for="id_headline">Headline:</label>
<input id="id_headline" type="text" name="headline" maxlength="50" required></li>
<li><label for="id_categories">Categories:</label>
<select multiple name="categories" id="id_categories">
<option value="%d" selected>Entertainment</option>
<option value="%d" selected>It's a test</option>
<option value="%d">Third test</option>
</select></li>"""
% (self.c1.pk, self.c2.pk, self.c3.pk))
def test_basic_creation(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({
'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment',
})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
self.assertEqual(Category.objects.count(), 1)
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
def test_save_commit_false(self):
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertTrue(f.is_valid())
c1 = f.save(commit=False)
self.assertEqual(c1.name, "Third test")
self.assertEqual(Category.objects.count(), 0)
c1.save()
self.assertEqual(Category.objects.count(), 1)
def test_save_with_data_errors(self):
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], ['This field is required.'])
self.assertEqual(
f.errors['slug'],
['Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.']
)
self.assertEqual(f.cleaned_data, {'url': 'foo'})
msg = "The Category could not be created because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaisesMessage(ValueError, msg):
f.save()
def test_multi_fields(self):
self.create_basic_data()
self.maxDiff = None
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
str(f),
'''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" required></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" required></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" required></td></tr>
<tr><th>Writer:</th><td><select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article" required></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
new_art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=self.w_royko)
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertSequenceEqual(new_art.categories.all(), [self.c1])
f = ArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li>
<li>Writer: <select name="writer" required>
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected>Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required>Hello.</textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s" selected>Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
def test_subset_fields(self):
# from the form can't provide a value for that field!
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'pub_date')
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(
str(f),
'''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" required></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" required></td></tr>''')
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'slug', 'pub_date')
w_royko = Writer.objects.create(name='Mike Royko')
art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=w_royko)
f = PartialArticleFormWithSlug({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li>'''
)
self.assertTrue(f.is_valid())
new_art = f.save()
self.assertEqual(new_art.id, art.id)
new_art = Article.objects.get(id=art.id)
self.assertEqual(new_art.headline, 'New headline')
def test_m2m_editing(self):
self.create_basic_data()
form_data = {
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04',
'writer': str(self.w_royko.pk),
'article': 'Hello.',
'categories': [str(self.c1.id), str(self.c2.id)]
}
f = ArticleForm(form_data)
new_art = f.save()
new_art = Article.objects.get(id=new_art.id)
art_id_1 = new_art.id
self.assertSequenceEqual(new_art.categories.order_by('name'), [self.c1, self.c2])
form_data['categories'] = []
f = ArticleForm(form_data, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id, art_id_1)
new_art = Article.objects.get(id=art_id_1)
self.assertSequenceEqual(new_art.categories.all(), [])
f = ArticleForm(form_data)
new_art = f.save()
art_id_2 = new_art.id
self.assertNotIn(art_id_2, (None, art_id_1))
new_art = Article.objects.get(id=art_id_2)
self.assertSequenceEqual(new_art.categories.all(), [])
form_data['categories'] = [str(self.c1.id), str(self.c2.id)]
f = ArticleForm(form_data)
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_3 = new_art.id
self.assertNotIn(art_id_3, (None, art_id_1, art_id_2))
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_3)
self.assertSequenceEqual(new_art.categories.all(), [])
f.save_m2m()
self.assertSequenceEqual(new_art.categories.order_by('name'), [self.c1, self.c2])
def test_custom_form_fields(self):
# existing Category instance.
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class Meta:
model = Category
fields = '__all__'
cat = Category.objects.create(name='Third test')
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=cat.id).name, 'Third')
def test_runtime_choicefield_populated(self):
self.maxDiff = None
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
self.create_basic_data()
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" required></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> </li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
w_bernstein = Writer.objects.create(name='Carl Bernstein')
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" required></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
<option value="%s">Fourth</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, w_bernstein.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk, c4.pk))
def test_recleaning_model_form_instance(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = '__all__'
form = AuthorForm({'full_name': 'Bob'})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = 'Alice'
obj.full_clean()
def test_validate_foreign_key_uses_default_manager(self):
class MyForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
# Archived writers are filtered out by the default manager.
w = Writer.objects.create(name='Randy', archived=True)
data = {
'headline': 'My Article',
'slug': 'my-article',
'pub_date': datetime.date.today(),
'writer': w.pk,
'article': 'lorem ipsum',
}
form = MyForm(data)
self.assertIs(form.is_valid(), False)
self.assertEqual(
form.errors,
{'writer': ['Select a valid choice. That choice is not one of the available choices.']},
)
def test_validate_foreign_key_to_model_with_overridden_manager(self):
class MyForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Allow archived authors.
self.fields['writer'].queryset = Writer._base_manager.all()
w = Writer.objects.create(name='Randy', archived=True)
data = {
'headline': 'My Article',
'slug': 'my-article',
'pub_date': datetime.date.today(),
'writer': w.pk,
'article': 'lorem ipsum',
}
form = MyForm(data)
self.assertIs(form.is_valid(), True)
article = form.save()
self.assertEqual(article.writer, w)
class ModelMultipleChoiceFieldTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.c1 = Category.objects.create(name='Entertainment', slug='entertainment', url='entertainment')
cls.c2 = Category.objects.create(name="It's a test", slug='its-test', url='test')
cls.c3 = Category.objects.create(name='Third', slug='third-test', url='third')
def test_model_multiple_choice_field(self):
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test"),
(self.c3.pk, 'Third')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertCountEqual(f.clean([self.c1.id]), [self.c1])
self.assertCountEqual(f.clean([self.c2.id]), [self.c2])
self.assertCountEqual(f.clean([str(self.c1.id)]), [self.c1])
self.assertCountEqual(
f.clean([str(self.c1.id), str(self.c2.id)]),
[self.c1, self.c2],
)
self.assertCountEqual(
f.clean([self.c1.id, str(self.c2.id)]),
[self.c1, self.c2],
)
self.assertCountEqual(
f.clean((self.c1.id, str(self.c2.id))),
[self.c1, self.c2],
)
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Invalid types that require TypeError to be caught (#22808).
with self.assertRaises(ValidationError):
f.clean([['fail']])
with self.assertRaises(ValidationError):
f.clean([{'foo': 'bar'}])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
# Note, we are using an id of 1006 here since tests that run before
# this may create categories with primary keys up to 6. Use
# a number that will not conflict.
c6 = Category.objects.create(id=1006, name='Sixth', url='6th')
self.assertCountEqual(f.clean([c6.id]), [c6])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
def test_model_multiple_choice_required_false(self):
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertIsInstance(f.clean([]), EmptyQuerySet)
self.assertIsInstance(f.clean(()), EmptyQuerySet)
with self.assertRaises(ValidationError):
f.clean(['0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c3.id), '0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c1.id), '0'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Third')
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
self.assertSequenceEqual(f.clean([self.c2.id]), [self.c2])
with self.assertRaises(ValidationError):
f.clean([self.c3.id])
with self.assertRaises(ValidationError):
f.clean([str(self.c2.id), str(self.c3.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(self.c1.pk, 'multicategory Entertainment'),
(self.c2.pk, "multicategory It's a test"),
(self.c3.pk, 'multicategory Third')])
def test_model_multiple_choice_number_of_queries(self):
persons = [Writer.objects.create(name="Person %s" % i) for i in range(30)]
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
self.assertNumQueries(1, f.clean, [p.pk for p in persons[1:11:2]])
def test_model_multiple_choice_run_validators(self):
for i in range(30):
Writer.objects.create(name="Person %s" % i)
self._validator_run = False
def my_validator(value):
self._validator_run = True
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all(), validators=[my_validator])
f.clean([p.pk for p in Writer.objects.all()[8:9]])
self.assertTrue(self._validator_run)
def test_model_multiple_choice_show_hidden_initial(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(show_hidden_initial=True, queryset=Writer.objects.all())
person1 = Writer.objects.create(name="Person 1")
person2 = Writer.objects.create(name="Person 2")
form = WriterForm(
initial={'persons': [person1, person2]},
data={
'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person1.pk), str(person2.pk)],
},
)
self.assertTrue(form.is_valid())
self.assertFalse(form.has_changed())
form = WriterForm(
initial={'persons': [person1, person2]},
data={
'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person2.pk)],
},
)
self.assertTrue(form.is_valid())
self.assertTrue(form.has_changed())
def test_model_multiple_choice_field_22745(self):
class ModelMultipleChoiceForm(forms.Form):
categories = forms.ModelMultipleChoiceField(Category.objects.all(), widget=forms.CheckboxSelectMultiple)
form = ModelMultipleChoiceForm()
field = form['categories'] # BoundField
template = Template('{{ field.name }}{{ field }}{{ field.help_text }}')
with self.assertNumQueries(1):
template.render(Context({'field': field}))
def test_show_hidden_initial_changed_queries_efficiently(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(
show_hidden_initial=True, queryset=Writer.objects.all())
writers = (Writer.objects.create(name=str(x)) for x in range(0, 50))
writer_pks = tuple(x.pk for x in writers)
form = WriterForm(data={'initial-persons': writer_pks})
with self.assertNumQueries(1):
self.assertTrue(form.has_changed())
def test_clean_does_deduplicate_values(self):
class PersonForm(forms.Form):
persons = forms.ModelMultipleChoiceField(queryset=Person.objects.all())
person1 = Person.objects.create(name='Person 1')
form = PersonForm(data={})
queryset = form.fields['persons'].clean([str(person1.pk)] * 50)
sql, params = queryset.query.sql_with_params()
self.assertEqual(len(params), 1)
def test_to_field_name_with_initial_data(self):
class ArticleCategoriesForm(forms.ModelForm):
categories = forms.ModelMultipleChoiceField(Category.objects.all(), to_field_name='slug')
class Meta:
model = Article
fields = ['categories']
article = Article.objects.create(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=Writer.objects.create(name='Test writer'),
article='Hello.',
)
article.categories.add(self.c2, self.c3)
form = ArticleCategoriesForm(instance=article)
self.assertCountEqual(form['categories'].value(), [self.c2.slug, self.c3.slug])
class ModelOneToOneFieldTests(TestCase):
def test_modelform_onetoonefield(self):
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
fields = '__all__'
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
fields = '__all__'
self.assertEqual(list(ImprovedArticleForm.base_fields), ['article'])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
def test_modelform_subclassed_model(self):
class BetterWriterForm(forms.ModelForm):
class Meta:
# BetterWriter model is a subclass of Writer with an additional `score` field
model = BetterWriter
fields = '__all__'
bw = BetterWriter.objects.create(name='Joe Better', score=10)
self.assertEqual(sorted(model_to_dict(bw)), ['id', 'name', 'score', 'writer_ptr'])
self.assertEqual(sorted(model_to_dict(bw, fields=[])), [])
self.assertEqual(sorted(model_to_dict(bw, fields=['id', 'name'])), ['id', 'name'])
self.assertEqual(sorted(model_to_dict(bw, exclude=[])), ['id', 'name', 'score', 'writer_ptr'])
self.assertEqual(sorted(model_to_dict(bw, exclude=['id', 'name'])), ['score', 'writer_ptr'])
form = BetterWriterForm({'name': 'Some Name', 'score': 12})
self.assertTrue(form.is_valid())
bw2 = form.save()
self.assertEqual(bw2.score, 12)
def test_onetoonefield(self):
class WriterProfileForm(forms.ModelForm):
class Meta:
# WriterProfile has a OneToOneField to Writer
model = WriterProfile
fields = '__all__'
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
form = WriterProfileForm()
self.assertHTMLEqual(
form.as_p(),
'''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" min="0" required></p>''' % (
self.w_woodward.pk, self.w_royko.pk,
)
)
data = {
'writer': str(self.w_woodward.pk),
'age': '65',
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(str(instance), 'Bob Woodward is 65')
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(
form.as_p(),
'''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer" required>
<option value="">---------</option>
<option value="%s" selected>Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label>
<input type="number" name="age" value="65" id="id_age" min="0" required></p>''' % (
self.w_woodward.pk, self.w_royko.pk,
)
)
def test_assignment_of_none(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda", date_published=datetime.date(1991, 8, 22))
author = Author.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertTrue(form.is_valid())
self.assertIsNone(form.cleaned_data['publication'])
author = form.save()
# author object returned from form still retains original publication object
# that's why we need to retrieve it from database again
new_author = Author.objects.get(pk=author.pk)
self.assertIsNone(new_author.publication)
def test_assignment_of_none_null_false(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author1
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda", date_published=datetime.date(1991, 8, 22))
author = Author1.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertFalse(form.is_valid())
class FileAndImageFieldTests(TestCase):
def test_clean_false(self):
f = forms.FileField(required=False)
self.assertIs(f.clean(False), False)
self.assertIs(f.clean(False, 'initial'), False)
def test_clean_false_required(self):
f = forms.FileField(required=True)
self.assertEqual(f.clean(False, 'initial'), 'initial')
with self.assertRaises(ValidationError):
f.clean(False)
def test_full_clear(self):
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm()
self.assertIn('name="myfile"', str(form))
self.assertNotIn('myfile-clear', str(form))
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
self.assertEqual(doc.myfile.name, 'something.txt')
form = DocumentForm(instance=doc)
self.assertIn('myfile-clear', str(form))
form = DocumentForm(instance=doc, data={'myfile-clear': 'true'})
doc = form.save(commit=False)
self.assertFalse(doc.myfile)
def test_clear_and_file_contradiction(self):
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
form = DocumentForm(
instance=doc,
files={'myfile': SimpleUploadedFile('something.txt', b'content')},
data={'myfile-clear': 'true'},
)
self.assertTrue(not form.is_valid())
self.assertEqual(form.errors['myfile'],
['Please either submit a file or check the clear checkbox, not both.'])
rendered = str(form)
self.assertIn('something.txt', rendered)
self.assertIn('myfile-clear', rendered)
def test_render_empty_file_field(self):
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
doc = Document.objects.create()
form = DocumentForm(instance=doc)
self.assertHTMLEqual(
str(form['myfile']),
'<input id="id_myfile" name="myfile" type="file">'
)
def test_file_field_data(self):
f = TextFileForm(data={'description': 'Assistance'})
self.assertFalse(f.is_valid())
f = TextFileForm(data={'description': 'Assistance'}, files={})
self.assertFalse(f.is_valid())
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')},
)
self.assertFalse(f.is_valid())
f = TextFileForm({'description': 'Assistance'}, instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
instance.file.delete()
instance.delete()
def test_filefield_required_false(self):
f = TextFileForm(data={'description': 'Assistance'})
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
f = TextFileForm({'description': 'New Description'}, instance=instance)
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
instance.file.delete()
instance.delete()
def test_custom_file_field_save(self):
class CFFForm(forms.ModelForm):
class Meta:
model = CustomFF
fields = '__all__'
# generate an AssertionError if it is called more than once during save.
form = CFFForm(data={'f': None})
form.save()
def test_file_field_multiple_save(self):
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
fields = '__all__'
# Grab an image for testing.
filename = os.path.join(os.path.dirname(__file__), 'test.png')
with open(filename, "rb") as fp:
img = fp.read()
# Fake a POST QueryDict and FILES MultiValueDict.
data = {'title': 'Testing'}
files = {"image": SimpleUploadedFile('test.png', img, 'image/png')}
form = PhotoForm(data=data, files=files)
p = form.save()
try:
# Check the savecount stored on the object (see the model).
self.assertEqual(p._savecount, 1)
finally:
# Delete the "uploaded" file to avoid clogging /tmp.
p = Photo.objects.get()
p.image.delete(save=False)
def test_file_path_field_blank(self):
class FPForm(forms.ModelForm):
class Meta:
model = FilePathModel
fields = '__all__'
form = FPForm()
self.assertEqual([name for _, name in form['path'].field.choices], ['---------', 'models.py'])
@skipUnless(test_images, "Pillow not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slightly when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(__file__), 'test.png'), 'rb') as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(__file__), 'test2.png'), 'rb') as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': 'Test'})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertIsNone(instance.width)
self.assertIsNone(instance.height)
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect
# the image or its width/height properties.
f = OptionalImageFileForm({'description': 'New Description'}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': 'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
# fail validation. First create:
f = NoExtensionImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/no_extension')
# Then edit:
f = NoExtensionImageFileForm(data={'description': 'Edited image'}, instance=instance)
self.assertTrue(f.is_valid())
class ModelOtherFieldTests(SimpleTestCase):
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertFalse(bif.is_valid())
self.assertEqual(
bif.errors,
{'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']}
)
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertFalse(bif.is_valid())
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']})
def test_url_on_modelform(self):
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
self.assertFalse(HomepageForm({'url': 'foo'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example.'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://com.'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://localhost'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com/foo/bar'}).is_valid())
def test_modelform_non_editable_field(self):
# 'created', non-editable, is excluded by default
self.assertNotIn('created', ArticleForm().fields)
msg = "'created' cannot be specified for Article model form as it is a non-editable field"
with self.assertRaisesMessage(FieldError, msg):
class InvalidArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'created')
def test_http_prefixing(self):
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
form = HomepageForm({'url': 'example.com'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com')
form = HomepageForm({'url': 'example.com/test'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com/test')
class OtherModelFormTests(TestCase):
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(
str(f.media),
'''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet">
<script src="/some/form/javascript"></script>'''
)
def test_choices_type(self):
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_prefetch_related_queryset(self):
blue = Colour.objects.create(name='blue')
red = Colour.objects.create(name='red')
multicolor_item = ColourfulItem.objects.create()
multicolor_item.colours.add(blue, red)
red_item = ColourfulItem.objects.create()
red_item.colours.add(red)
class ColorModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return ', '.join(c.name for c in obj.colours.all())
field = ColorModelChoiceField(ColourfulItem.objects.prefetch_related('colours'))
with self.assertNumQueries(3): # would be 4 if prefetch is ignored
self.assertEqual(tuple(field.choices), (
('', '---------'),
(multicolor_item.pk, 'blue, red'),
(red_item.pk, 'red'),
))
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
pear = Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
('', '---------'),
(86, 'Apple'),
(87, 'Core'),
(22, 'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(str(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected>Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(list(CategoryForm.base_fields), ['description', 'url'])
self.assertHTMLEqual(
str(CategoryForm()),
'''<tr><th><label for="id_description">Description:</label></th>
<td><input type="text" name="description" id="id_description" required></td></tr>
<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>'''
)
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear')))
self.assertSequenceEqual(field.clean([86]), [apple])
form = SelectInventoryForm({'items': [87, 22]})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.cleaned_data), 1)
self.assertSequenceEqual(form.cleaned_data['items'], [core, pear])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields), ['name'])
self.assertHTMLEqual(
str(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="10" required></td></tr>'''
)
def test_iterable_model_m2m(self):
class ColourfulItemForm(forms.ModelForm):
class Meta:
model = ColourfulItem
fields = '__all__'
colour = Colour.objects.create(name='Blue')
form = ColourfulItemForm()
self.maxDiff = 1024
self.assertHTMLEqual(
form.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="50" required></p>
<p><label for="id_colours">Colours:</label>
<select multiple name="colours" id="id_colours" required>
<option value="%(blue_pk)s">Blue</option>
</select></p>"""
% {'blue_pk': colour.pk})
def test_callable_field_default(self):
class PublicationDefaultsForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ('title', 'date_published', 'mode', 'category')
self.maxDiff = 2000
form = PublicationDefaultsForm()
today_str = str(datetime.date.today())
self.assertHTMLEqual(
form.as_p(),
"""
<p><label for="id_title">Title:</label>
<input id="id_title" maxlength="30" name="title" type="text" required></p>
<p><label for="id_date_published">Date published:</label>
<input id="id_date_published" name="date_published" type="text" value="{0}" required>
<input id="initial-id_date_published" name="initial-date_published" type="hidden" value="{0}"></p>
<p><label for="id_mode">Mode:</label> <select id="id_mode" name="mode">
<option value="di" selected>direct</option>
<option value="de">delayed</option></select>
<input id="initial-id_mode" name="initial-mode" type="hidden" value="di"></p>
<p><label for="id_category">Category:</label> <select id="id_category" name="category">
<option value="1">Games</option>
<option value="2">Comics</option>
<option value="3" selected>Novel</option></select>
<input id="initial-id_category" name="initial-category" type="hidden" value="3">
""".format(today_str)
)
empty_data = {
'title': '',
'date_published': today_str,
'initial-date_published': today_str,
'mode': 'di',
'initial-mode': 'di',
'category': '3',
'initial-category': '3',
}
bound_form = PublicationDefaultsForm(empty_data)
self.assertFalse(bound_form.has_changed())
class ModelFormCustomErrorTests(SimpleTestCase):
def test_custom_error_messages(self):
data = {'name1': '@ata).errors
self.assertHTMLEqual(
str(errors['name1']),
'<ul class="errorlist"><li>Form custom error message.</li></ul>'
)
self.assertHTMLEqual(
str(errors['name2']),
'<ul class="errorlist"><li>Model custom error message.</li></ul>'
)
def test_model_clean_error_messages(self):
data = {'name1': 'FORBIDDEN_VALUE', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages.</li></ul>'
)
data = {'name1': 'FORBIDDEN_VALUE2', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages (simpler syntax).</li></ul>'
)
data = {'name1': 'GLOBAL_ERROR', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'], ['Global error message.'])
class CustomCleanTests(TestCase):
def test_override_clean(self):
class TripleFormWithCleanOverride(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
def clean(self):
if not self.cleaned_data['left'] == self.cleaned_data['right']:
raise ValidationError('Left and right should be equal')
return self.cleaned_data
form = TripleFormWithCleanOverride({'left': 1, 'middle': 2, 'right': 1})
self.assertTrue(form.is_valid())
# form.instance.left will be None if the instance was not constructed
# by form.full_clean().
self.assertEqual(form.instance.left, 1)
def test_model_form_clean_applies_to_model(self):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
def clean(self):
self.cleaned_data['name'] = self.cleaned_data['name'].upper()
return self.cleaned_data
data = {'name': 'Test', 'slug': 'test', 'url': '/test'}
form = CategoryForm(data)
category = form.save()
self.assertEqual(category.name, 'TEST')
class ModelFormInheritanceTests(SimpleTestCase):
def test_form_subclass_inheritance(self):
class Form(forms.Form):
age = forms.IntegerField()
class ModelForm(forms.ModelForm, Form):
class Meta:
model = Writer
fields = '__all__'
self.assertEqual(list(ModelForm().fields), ['name', 'age'])
def test_field_removal(self):
class ModelForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class Mixin:
age = None
class Form(forms.Form):
age = forms.IntegerField()
class Form2(forms.Form):
foo = forms.IntegerField()
self.assertEqual(list(ModelForm().fields), ['name'])
self.assertEqual(list(type('NewForm', (Mixin, Form), {})().fields), [])
self.assertEqual(list(type('NewForm', (Form2, Mixin, Form), {})().fields), ['foo'])
self.assertEqual(list(type('NewForm', (Mixin, ModelForm, Form), {})().fields), ['name'])
self.assertEqual(list(type('NewForm', (ModelForm, Mixin, Form), {})().fields), ['name'])
self.assertEqual(list(type('NewForm', (ModelForm, Form, Mixin), {})().fields), ['name', 'age'])
self.assertEqual(list(type('NewForm', (ModelForm, Form), {'age': None})().fields), ['name'])
def test_field_removal_name_clashes(self):
class MyForm(forms.ModelForm):
media = forms.CharField()
class Meta:
model = Writer
fields = '__all__'
class SubForm(MyForm):
media = None
self.assertIn('media', MyForm().fields)
self.assertNotIn('media', SubForm().fields)
self.assertTrue(hasattr(MyForm, 'media'))
self.assertTrue(hasattr(SubForm, 'media'))
class StumpJokeForm(forms.ModelForm):
class Meta:
model = StumpJoke
fields = '__all__'
class CustomFieldWithQuerysetButNoLimitChoicesTo(forms.Field):
queryset = 42
class StumpJokeWithCustomFieldForm(forms.ModelForm):
custom = CustomFieldWithQuerysetButNoLimitChoicesTo()
class Meta:
model = StumpJoke
fields = ()
class LimitChoicesToTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.threepwood = Character.objects.create(
username='threepwood',
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
cls.marley = Character.objects.create(
username='marley',
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
def test_limit_choices_to_callable_for_fk_rel(self):
stumpjokeform = StumpJokeForm()
self.assertSequenceEqual(stumpjokeform.fields['most_recently_fooled'].queryset, [self.threepwood])
def test_limit_choices_to_callable_for_m2m_rel(self):
stumpjokeform = StumpJokeForm()
self.assertSequenceEqual(stumpjokeform.fields['most_recently_fooled'].queryset, [self.threepwood])
def test_custom_field_with_queryset_but_no_limit_choices_to(self):
f = StumpJokeWithCustomFieldForm()
self.assertEqual(f.fields['custom'].queryset, 42)
def test_fields_for_model_applies_limit_choices_to(self):
fields = fields_for_model(StumpJoke, ['has_fooled_today'])
self.assertSequenceEqual(fields['has_fooled_today'].queryset, [self.threepwood])
def test_callable_called_each_time_form_is_instantiated(self):
field = StumpJokeForm.base_fields['most_recently_fooled']
with mock.patch.object(field, 'limit_choices_to') as today_callable_dict:
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 1)
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 2)
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 3)
@isolate_apps('model_forms')
def test_limit_choices_to_no_duplicates(self):
joke1 = StumpJoke.objects.create(
funny=True,
most_recently_fooled=self.threepwood,
)
joke2 = StumpJoke.objects.create(
funny=True,
most_recently_fooled=self.threepwood,
)
joke3 = StumpJoke.objects.create(
funny=True,
most_recently_fooled=self.marley,
)
StumpJoke.objects.create(funny=False, most_recently_fooled=self.marley)
joke1.has_fooled_today.add(self.marley, self.threepwood)
joke2.has_fooled_today.add(self.marley)
joke3.has_fooled_today.add(self.marley, self.threepwood)
class CharacterDetails(models.Model):
character1 = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to=models.Q(
jokes__funny=True,
jokes_today__funny=True,
),
related_name='details_fk_1',
)
character2 = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to={
'jokes__funny': True,
'jokes_today__funny': True,
},
related_name='details_fk_2',
)
character3 = models.ManyToManyField(
Character,
limit_choices_to=models.Q(
jokes__funny=True,
jokes_today__funny=True,
),
related_name='details_m2m_1',
)
class CharacterDetailsForm(forms.ModelForm):
class Meta:
model = CharacterDetails
fields = '__all__'
form = CharacterDetailsForm()
self.assertCountEqual(
form.fields['character1'].queryset,
[self.marley, self.threepwood],
)
self.assertCountEqual(
form.fields['character2'].queryset,
[self.marley, self.threepwood],
)
self.assertCountEqual(
form.fields['character3'].queryset,
[self.marley, self.threepwood],
)
def test_limit_choices_to_m2m_through(self):
class DiceForm(forms.ModelForm):
class Meta:
model = Dice
fields = ['numbers']
Number.objects.create(value=0)
n1 = Number.objects.create(value=1)
n2 = Number.objects.create(value=2)
form = DiceForm()
self.assertCountEqual(form.fields['numbers'].queryset, [n1, n2])
class FormFieldCallbackTests(SimpleTestCase):
def test_baseform_with_widgets_in_meta(self):
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
Form = modelform_factory(Person, form=BaseForm)
self.assertIsInstance(Form.base_fields['name'].widget, forms.Textarea)
def test_factory_with_widget_argument(self):
widget = forms.Textarea()
# Without a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__")
self.assertNotEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
# With a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__", widgets={'name': widget})
self.assertEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
def test_modelform_factory_without_fields(self):
message = (
"Calling modelform_factory without defining 'fields' or 'exclude' "
"explicitly is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
modelform_factory(Person)
def test_modelform_factory_with_all_fields(self):
form = modelform_factory(Person, fields="__all__")
self.assertEqual(list(form.base_fields), ["name"])
def test_custom_callback(self):
callback_args = []
def callback(db_field, **kwargs):
callback_args.append((db_field, kwargs))
return db_field.formfield(**kwargs)
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
modelform_factory(Person, form=BaseForm, formfield_callback=callback)
id_field, name_field = Person._meta.fields
self.assertEqual(callback_args, [(id_field, {}), (name_field, {'widget': widget})])
def test_bad_callback(self):
# A bad callback provided by user still gives an error
with self.assertRaises(TypeError):
modelform_factory(Person, fields="__all__", formfield_callback='not a function or callable')
def test_inherit_after_custom_callback(self):
def callback(db_field, **kwargs):
if isinstance(db_field, models.CharField):
return forms.CharField(widget=forms.Textarea)
return db_field.formfield(**kwargs)
class BaseForm(forms.ModelForm):
class Meta:
model = Person
fields = '__all__'
NewForm = modelform_factory(Person, form=BaseForm, formfield_callback=callback)
class InheritedForm(NewForm):
pass
for name in NewForm.base_fields:
self.assertEqual(
type(InheritedForm.base_fields[name].widget),
type(NewForm.base_fields[name].widget)
)
class LocalizedModelFormTest(TestCase):
def test_model_form_applies_localize_to_some_fields(self):
class PartiallyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = ('left', 'right',)
fields = '__all__'
f = PartiallyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertFalse(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_applies_localize_to_all_fields(self):
class FullyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = '__all__'
fields = '__all__'
f = FullyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertTrue(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_refuses_arbitrary_string(self):
msg = (
"BrokenLocalizedTripleForm.Meta.localized_fields "
"cannot be a string. Did you mean to type: ('foo',)?"
)
with self.assertRaisesMessage(TypeError, msg):
class BrokenLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = "foo"
class CustomMetaclass(ModelFormMetaclass):
def __new__(cls, name, bases, attrs):
new = super().__new__(cls, name, bases, attrs)
new.base_fields = {}
return new
class CustomMetaclassForm(forms.ModelForm, metaclass=CustomMetaclass):
pass
class CustomMetaclassTestCase(SimpleTestCase):
def test_modelform_factory_metaclass(self):
new_cls = modelform_factory(Person, fields="__all__", form=CustomMetaclassForm)
self.assertEqual(new_cls.base_fields, {})
class StrictAssignmentTests(SimpleTestCase):
def test_setattr_raises_validation_error_field_specific(self):
form_class = modelform_factory(model=StrictAssignmentFieldSpecific, fields=['title'])
form = form_class(data={'title': 'testing setattr'}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'title': ['Cannot set attribute', 'This field cannot be blank.']
})
def test_setattr_raises_validation_error_non_field(self):
form_class = modelform_factory(model=StrictAssignmentAll, fields=['title'])
form = form_class(data={'title': 'testing setattr'}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'__all__': ['Cannot set attribute'],
'title': ['This field cannot be blank.']
})
class ModelToDictTests(TestCase):
def test_many_to_many(self):
blue = Colour.objects.create(name='blue')
red = Colour.objects.create(name='red')
item = ColourfulItem.objects.create()
item.colours.set([blue])
data = model_to_dict(item)['colours']
self.assertEqual(data, [blue])
item.colours.set([red])
# If data were a QuerySet, it would be reevaluated here and give "red"
# instead of the original value.
self.assertEqual(data, [blue])
| true
| true
|
7907acab9221c3960723660abcc48d9c5b644763
| 8,945
|
py
|
Python
|
src/streamlink/plugins/youtube.py
|
nxkbd/streamlink
|
0ba7767c024a6d6086d570e342680dc40c05a57b
|
[
"BSD-2-Clause"
] | null | null | null |
src/streamlink/plugins/youtube.py
|
nxkbd/streamlink
|
0ba7767c024a6d6086d570e342680dc40c05a57b
|
[
"BSD-2-Clause"
] | null | null | null |
src/streamlink/plugins/youtube.py
|
nxkbd/streamlink
|
0ba7767c024a6d6086d570e342680dc40c05a57b
|
[
"BSD-2-Clause"
] | null | null | null |
import re
from streamlink.compat import urlparse, parse_qsl
from streamlink.plugin import Plugin, PluginError
from streamlink.plugin.api import http, validate
from streamlink.plugin.api.utils import parse_query
from streamlink.stream import HTTPStream, HLSStream
from streamlink.compat import parse_qsl
from streamlink.stream.ffmpegmux import MuxedStream
API_KEY = "AIzaSyBDBi-4roGzWJN4du9TuDMLd_jVTcVkKz4"
API_BASE = "https://www.googleapis.com/youtube/v3"
API_SEARCH_URL = API_BASE + "/search"
API_VIDEO_INFO = "http://youtube.com/get_video_info"
HLS_HEADERS = {
"User-Agent": "Mozilla/5.0"
}
def parse_stream_map(stream_map):
if not stream_map:
return []
return [parse_query(s) for s in stream_map.split(",")]
def parse_fmt_list(formatsmap):
formats = {}
if not formatsmap:
return formats
for format in formatsmap.split(","):
s = format.split("/")
(w, h) = s[1].split("x")
formats[int(s[0])] = "{0}p".format(h)
return formats
_config_schema = validate.Schema(
{
validate.optional("fmt_list"): validate.all(
validate.text,
validate.transform(parse_fmt_list)
),
validate.optional("url_encoded_fmt_stream_map"): validate.all(
validate.text,
validate.transform(parse_stream_map),
[{
"itag": validate.all(
validate.text,
validate.transform(int)
),
"quality": validate.text,
"url": validate.url(scheme="http"),
validate.optional("s"): validate.text,
validate.optional("stereo3d"): validate.all(
validate.text,
validate.transform(int),
validate.transform(bool)
),
}]
),
validate.optional("adaptive_fmts"): validate.all(
validate.text,
validate.transform(parse_stream_map),
[{
validate.optional("s"): validate.text,
"type": validate.all(
validate.text,
validate.transform(lambda t: t.split(";")[0].split("/")),
[validate.text, validate.text]
),
"url": validate.all(
validate.url(scheme="http")
)
}]
),
validate.optional("hlsvp"): validate.text,
validate.optional("live_playback"): validate.transform(bool),
"status": validate.text
}
)
_search_schema = validate.Schema(
{
"items": [{
"id": {
"videoId": validate.text
}
}]
},
validate.get("items")
)
_channelid_re = re.compile(r'meta itemprop="channelId" content="([^"]+)"')
_livechannelid_re = re.compile(r'meta property="og:video:url" content="([^"]+)')
_url_re = re.compile(r"""
http(s)?://(\w+\.)?youtube.com
(?:
(?:
/(watch.+v=|embed/|v/)
(?P<video_id>[0-9A-z_-]{11})
)
|
(?:
/(user|channel)/(?P<user>[^/?]+)
)
|
(?:
/c/(?P<liveChannel>[^/?]+)/live
)
)
""", re.VERBOSE)
class YouTube(Plugin):
adp_video = {
137: "1080p",
303: "1080p60", # HFR
299: "1080p60", # HFR
264: "1440p",
308: "1440p60", # HFR
266: "2160p",
315: "2160p60", # HFR
138: "2160p",
302: "720p60", # HFR
}
adp_audio = {
140: 128,
141: 256,
171: 128,
249: 48,
250: 64,
251: 160,
}
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, stream):
match_3d = re.match(r"(\w+)_3d", stream)
match_hfr = re.match(r"(\d+p)(\d+)", stream)
if match_3d:
weight, group = Plugin.stream_weight(match_3d.group(1))
weight -= 1
group = "youtube_3d"
elif match_hfr:
weight, group = Plugin.stream_weight(match_hfr.group(1))
weight += 1
group = "high_frame_rate"
else:
weight, group = Plugin.stream_weight(stream)
return weight, group
def _find_channel_video(self):
res = http.get(self.url)
match = _channelid_re.search(res.text)
if not match:
return
return self._get_channel_video(match.group(1))
def _get_channel_video(self, channel_id):
query = {
"channelId": channel_id,
"type": "video",
"eventType": "live",
"part": "id",
"key": API_KEY
}
res = http.get(API_SEARCH_URL, params=query)
videos = http.json(res, schema=_search_schema)
for video in videos:
video_id = video["id"]["videoId"]
return video_id
def _find_canonical_stream_info(self):
res = http.get(self.url)
match = _livechannelid_re.search(res.text)
if not match:
return
return self._get_stream_info(match.group(1))
def _get_stream_info(self, url):
match = _url_re.match(url)
user = match.group("user")
live_channel = match.group("liveChannel")
if user:
video_id = self._find_channel_video()
elif live_channel:
return self._find_canonical_stream_info()
else:
video_id = match.group("video_id")
if video_id == "live_stream":
query_info = dict(parse_qsl(urlparse(url).query))
if "channel" in query_info:
video_id = self._get_channel_video(query_info["channel"])
if not video_id:
return
params = {
"video_id": video_id,
"el": "player_embedded"
}
res = http.get(API_VIDEO_INFO, params=params, headers=HLS_HEADERS)
return parse_query(res.text, name="config", schema=_config_schema)
def _get_streams(self):
info = self._get_stream_info(self.url)
if not info:
return
formats = info.get("fmt_list")
streams = {}
protected = False
for stream_info in info.get("url_encoded_fmt_stream_map", []):
if stream_info.get("s"):
protected = True
continue
stream = HTTPStream(self.session, stream_info["url"])
name = formats.get(stream_info["itag"]) or stream_info["quality"]
if stream_info.get("stereo3d"):
name += "_3d"
streams[name] = stream
adaptive_streams = {}
best_audio_itag = None
# Extract audio streams from the DASH format list
for stream_info in info.get("adaptive_fmts", []):
if stream_info.get("s"):
protected = True
continue
stream_params = dict(parse_qsl(stream_info["url"]))
if "itag" not in stream_params:
continue
itag = int(stream_params["itag"])
# extract any high quality streams only available in adaptive formats
adaptive_streams[itag] = stream_info["url"]
stream_type, stream_format = stream_info["type"]
if stream_type == "audio":
stream = HTTPStream(self.session, stream_info["url"])
name = "audio_{0}".format(stream_format)
streams[name] = stream
# find the best quality audio stream m4a, opus or vorbis
if best_audio_itag is None or self.adp_audio[itag] > self.adp_audio[best_audio_itag]:
best_audio_itag = itag
if best_audio_itag and adaptive_streams and MuxedStream.is_usable(self.session):
aurl = adaptive_streams[best_audio_itag]
for itag, name in self.adp_video.items():
if itag in adaptive_streams:
vurl = adaptive_streams[itag]
streams[name] = MuxedStream(self.session,
HTTPStream(self.session, vurl),
HTTPStream(self.session, aurl))
hls_playlist = info.get("hlsvp")
if hls_playlist:
try:
hls_streams = HLSStream.parse_variant_playlist(
self.session, hls_playlist, headers=HLS_HEADERS, namekey="pixels"
)
streams.update(hls_streams)
except IOError as err:
self.logger.warning("Failed to extract HLS streams: {0}", err)
if not streams and protected:
raise PluginError("This plugin does not support protected videos, "
"try youtube-dl instead")
return streams
__plugin__ = YouTube
| 30.951557
| 101
| 0.541643
|
import re
from streamlink.compat import urlparse, parse_qsl
from streamlink.plugin import Plugin, PluginError
from streamlink.plugin.api import http, validate
from streamlink.plugin.api.utils import parse_query
from streamlink.stream import HTTPStream, HLSStream
from streamlink.compat import parse_qsl
from streamlink.stream.ffmpegmux import MuxedStream
API_KEY = "AIzaSyBDBi-4roGzWJN4du9TuDMLd_jVTcVkKz4"
API_BASE = "https://www.googleapis.com/youtube/v3"
API_SEARCH_URL = API_BASE + "/search"
API_VIDEO_INFO = "http://youtube.com/get_video_info"
HLS_HEADERS = {
"User-Agent": "Mozilla/5.0"
}
def parse_stream_map(stream_map):
if not stream_map:
return []
return [parse_query(s) for s in stream_map.split(",")]
def parse_fmt_list(formatsmap):
formats = {}
if not formatsmap:
return formats
for format in formatsmap.split(","):
s = format.split("/")
(w, h) = s[1].split("x")
formats[int(s[0])] = "{0}p".format(h)
return formats
_config_schema = validate.Schema(
{
validate.optional("fmt_list"): validate.all(
validate.text,
validate.transform(parse_fmt_list)
),
validate.optional("url_encoded_fmt_stream_map"): validate.all(
validate.text,
validate.transform(parse_stream_map),
[{
"itag": validate.all(
validate.text,
validate.transform(int)
),
"quality": validate.text,
"url": validate.url(scheme="http"),
validate.optional("s"): validate.text,
validate.optional("stereo3d"): validate.all(
validate.text,
validate.transform(int),
validate.transform(bool)
),
}]
),
validate.optional("adaptive_fmts"): validate.all(
validate.text,
validate.transform(parse_stream_map),
[{
validate.optional("s"): validate.text,
"type": validate.all(
validate.text,
validate.transform(lambda t: t.split(";")[0].split("/")),
[validate.text, validate.text]
),
"url": validate.all(
validate.url(scheme="http")
)
}]
),
validate.optional("hlsvp"): validate.text,
validate.optional("live_playback"): validate.transform(bool),
"status": validate.text
}
)
_search_schema = validate.Schema(
{
"items": [{
"id": {
"videoId": validate.text
}
}]
},
validate.get("items")
)
_channelid_re = re.compile(r'meta itemprop="channelId" content="([^"]+)"')
_livechannelid_re = re.compile(r'meta property="og:video:url" content="([^"]+)')
_url_re = re.compile(r"""
http(s)?://(\w+\.)?youtube.com
(?:
(?:
/(watch.+v=|embed/|v/)
(?P<video_id>[0-9A-z_-]{11})
)
|
(?:
/(user|channel)/(?P<user>[^/?]+)
)
|
(?:
/c/(?P<liveChannel>[^/?]+)/live
)
)
""", re.VERBOSE)
class YouTube(Plugin):
adp_video = {
137: "1080p",
303: "1080p60", # HFR
299: "1080p60", # HFR
264: "1440p",
308: "1440p60", # HFR
266: "2160p",
315: "2160p60", # HFR
138: "2160p",
302: "720p60", # HFR
}
adp_audio = {
140: 128,
141: 256,
171: 128,
249: 48,
250: 64,
251: 160,
}
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, stream):
match_3d = re.match(r"(\w+)_3d", stream)
match_hfr = re.match(r"(\d+p)(\d+)", stream)
if match_3d:
weight, group = Plugin.stream_weight(match_3d.group(1))
weight -= 1
group = "youtube_3d"
elif match_hfr:
weight, group = Plugin.stream_weight(match_hfr.group(1))
weight += 1
group = "high_frame_rate"
else:
weight, group = Plugin.stream_weight(stream)
return weight, group
def _find_channel_video(self):
res = http.get(self.url)
match = _channelid_re.search(res.text)
if not match:
return
return self._get_channel_video(match.group(1))
def _get_channel_video(self, channel_id):
query = {
"channelId": channel_id,
"type": "video",
"eventType": "live",
"part": "id",
"key": API_KEY
}
res = http.get(API_SEARCH_URL, params=query)
videos = http.json(res, schema=_search_schema)
for video in videos:
video_id = video["id"]["videoId"]
return video_id
def _find_canonical_stream_info(self):
res = http.get(self.url)
match = _livechannelid_re.search(res.text)
if not match:
return
return self._get_stream_info(match.group(1))
def _get_stream_info(self, url):
match = _url_re.match(url)
user = match.group("user")
live_channel = match.group("liveChannel")
if user:
video_id = self._find_channel_video()
elif live_channel:
return self._find_canonical_stream_info()
else:
video_id = match.group("video_id")
if video_id == "live_stream":
query_info = dict(parse_qsl(urlparse(url).query))
if "channel" in query_info:
video_id = self._get_channel_video(query_info["channel"])
if not video_id:
return
params = {
"video_id": video_id,
"el": "player_embedded"
}
res = http.get(API_VIDEO_INFO, params=params, headers=HLS_HEADERS)
return parse_query(res.text, name="config", schema=_config_schema)
def _get_streams(self):
info = self._get_stream_info(self.url)
if not info:
return
formats = info.get("fmt_list")
streams = {}
protected = False
for stream_info in info.get("url_encoded_fmt_stream_map", []):
if stream_info.get("s"):
protected = True
continue
stream = HTTPStream(self.session, stream_info["url"])
name = formats.get(stream_info["itag"]) or stream_info["quality"]
if stream_info.get("stereo3d"):
name += "_3d"
streams[name] = stream
adaptive_streams = {}
best_audio_itag = None
# Extract audio streams from the DASH format list
for stream_info in info.get("adaptive_fmts", []):
if stream_info.get("s"):
protected = True
continue
stream_params = dict(parse_qsl(stream_info["url"]))
if "itag" not in stream_params:
continue
itag = int(stream_params["itag"])
# extract any high quality streams only available in adaptive formats
adaptive_streams[itag] = stream_info["url"]
stream_type, stream_format = stream_info["type"]
if stream_type == "audio":
stream = HTTPStream(self.session, stream_info["url"])
name = "audio_{0}".format(stream_format)
streams[name] = stream
# find the best quality audio stream m4a, opus or vorbis
if best_audio_itag is None or self.adp_audio[itag] > self.adp_audio[best_audio_itag]:
best_audio_itag = itag
if best_audio_itag and adaptive_streams and MuxedStream.is_usable(self.session):
aurl = adaptive_streams[best_audio_itag]
for itag, name in self.adp_video.items():
if itag in adaptive_streams:
vurl = adaptive_streams[itag]
streams[name] = MuxedStream(self.session,
HTTPStream(self.session, vurl),
HTTPStream(self.session, aurl))
hls_playlist = info.get("hlsvp")
if hls_playlist:
try:
hls_streams = HLSStream.parse_variant_playlist(
self.session, hls_playlist, headers=HLS_HEADERS, namekey="pixels"
)
streams.update(hls_streams)
except IOError as err:
self.logger.warning("Failed to extract HLS streams: {0}", err)
if not streams and protected:
raise PluginError("This plugin does not support protected videos, "
"try youtube-dl instead")
return streams
__plugin__ = YouTube
| true
| true
|
7907acc3377c89435895b694e856fe95c9df31b3
| 15,787
|
py
|
Python
|
spark_cluster/04_2_HV_basic/HV_v1_NYT_sim1_and_sim3_to_sim2/6200_ML2_HV_v1_NYT_sim1_and_sim3_to_sim2_round5_human_validation.py
|
poltextlab/nyt_hybrid_classification_workflow
|
3f676938b08f4373be3a83e975ee51dfa5ce6bf5
|
[
"MIT"
] | null | null | null |
spark_cluster/04_2_HV_basic/HV_v1_NYT_sim1_and_sim3_to_sim2/6200_ML2_HV_v1_NYT_sim1_and_sim3_to_sim2_round5_human_validation.py
|
poltextlab/nyt_hybrid_classification_workflow
|
3f676938b08f4373be3a83e975ee51dfa5ce6bf5
|
[
"MIT"
] | null | null | null |
spark_cluster/04_2_HV_basic/HV_v1_NYT_sim1_and_sim3_to_sim2/6200_ML2_HV_v1_NYT_sim1_and_sim3_to_sim2_round5_human_validation.py
|
poltextlab/nyt_hybrid_classification_workflow
|
3f676938b08f4373be3a83e975ee51dfa5ce6bf5
|
[
"MIT"
] | null | null | null |
# import libraries
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark.sql.types import *
from pyspark.sql.functions import col, count, lit, rand, when
import pandas as pd
from math import ceil
#################################################
# spark config
#################################################
mtaMaster = "spark://192.168.0.182:7077"
conf = SparkConf()
conf.setMaster(mtaMaster)
conf.set("spark.executor.memory", "24g")
conf.set("spark.driver.memory", "26g")
conf.set("spark.cores.max", 96)
conf.set("spark.driver.cores", 8)
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
conf.set("spark.kryoserializer.buffer", "256m")
conf.set("spark.kryoserializer.buffer.max", "256m")
conf.set("spark.default.parallelism", 24)
conf.set("spark.eventLog.enabled", "true")
conf.set("spark.eventLog.dir", "hdfs://192.168.0.182:9000/eventlog")
conf.set("spark.history.fs.logDirectory", "hdfs://192.168.0.182:9000/eventlog")
conf.set("spark.driver.maxResultSize", "4g")
conf.getAll()
#################################################
# create spark session
#################################################
spark = SparkSession.builder.appName('ML2_HV_v1_NYT_sim1_and_sim3_to_sim2_round5_human_validation').config(conf=conf).getOrCreate()
sc = spark.sparkContext
# check things are working
print(sc)
print(sc.defaultParallelism)
print("SPARK CONTEXT IS RUNNING")
#################################################
# define major topic codes
#################################################
# major topic codes for loop (NO 23 IN THE NYT CORPUS)
majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]
#majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100]
#################################################
# read result data from round 3
#################################################
df_results = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_r5_classified.parquet").repartition(50)
# verdict to integer for the comparison with majortopic later
df_results = df_results.withColumn('verdict', df_results.verdict.cast(IntegerType()))
#################################################
# create table to store sample and validation numbers
#################################################
columns = ["num_classified", "num_sample", "num_non_sample", "num_correct", "num_incorrect", "precision_in_sample", "num_added_to_training"]
df_numbers = pd.DataFrame(index=majortopic_codes, columns=columns)
df_numbers = df_numbers.fillna(0)
#################################################
# create table of samples from results
#################################################
# constants for sample size calculation for 95% confidence with +-0.05 precision confidence interval:
z = 1.96
delta = 0.05
z_delta = z*z*0.5*0.5/(delta*delta)
print("z_delta :", z_delta)
for i in majortopic_codes:
df_classified = df_results.where(col('verdict') == i)
num_classified = df_classified.count()
df_numbers["num_classified"].loc[i] = num_classified
print("MTC:", i, "num_classified: ", num_classified)
if num_classified > 100:
sample_size = ceil(z_delta/(1+1/num_classified*(z_delta-1)))
print("sample_size: ", sample_size)
if sample_size < 100:
sample_size = 100
df_sample = df_classified.sort('doc_id').withColumn('random', rand()).sort('random').limit(sample_size).drop('random')
df_sample_num = df_sample.count()
print("df_sample: ", df_sample_num)
# separate non-sample from sample elements
ids_drop = df_sample.select("doc_id")
df_non_sample = df_classified.join(ids_drop, "doc_id", "left_anti")
df_numbers["num_sample"].loc[i] = df_sample_num
df_numbers["num_non_sample"].loc[i] = df_non_sample.count()
else:
df_numbers["num_sample"].loc[i] = num_classified
df_sample = df_classified
df_non_sample = None
# create table of all samples and add new sample to it
if i == 1:
df_sample_all = df_sample
else:
df_sample_all = df_sample_all.union(df_sample)
#print("MTC:", i, "df_sample_all: ", df_sample_all.count())
# create table of all non-samples and add new non-sample to it
if i == 1:
df_non_sample_all = None
if df_non_sample != None and df_non_sample_all == None:
df_non_sample_all = df_non_sample
elif df_non_sample != None and df_non_sample_all != None:
df_non_sample_all = df_non_sample_all.union(df_non_sample)
#print("MTC:", i, "df_non_sample_all: ", df_non_sample_all.count())
print("MTC:", i)
#################################################
# check precision by majortopic codes
#################################################
# count correctly classified and precision for each majortopic code and write to table of numbers
df_correctly_classified = df_sample_all.where(col('majortopic') == col('verdict'))
for i in majortopic_codes:
num_correct = df_correctly_classified.where(col('verdict') == i).count()
df_numbers["num_correct"].loc[i] = num_correct
df_numbers["precision_in_sample"].loc[i] = num_correct/df_numbers["num_sample"].loc[i]
# count incorrectly classified for debugging and checking
df_incorrectly_classified = df_sample_all.where(col('majortopic') != col('verdict'))
for i in majortopic_codes:
num_incorrect = df_incorrectly_classified.where(col('verdict') == i).count()
df_numbers["num_incorrect"].loc[i] = num_incorrect
print(df_numbers)
#################################################
# create tables of elements based on precision
#################################################
# create tables for sorting elements based on precision results
# where precision is equal to or greater than 75%
# NOTE: validated wrongly classified elements will NOT be added to the results with the wrong major
# topic code, instead they will be added to the unclassified elements as in rounds 1&2
df_replace_all = None
# where precision is less than 75%
df_non_sample_replace = None
df_correct_replace = None
df_wrong_replace = None
for i in majortopic_codes:
print("create tables MTC:", i)
if df_numbers["precision_in_sample"].loc[i] >= 0.75:
# in this case add all elements from sample and non-sample to the training set with
# new major topic code i, EXCEPT for validated negatives, those are added to back into the
# test set
# first add wrong sample elements to their table
df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') != col('verdict'))
if df_wrong_replace == None:
df_wrong_replace = df_lemma
else:
df_wrong_replace = df_wrong_replace.union(df_lemma)
# get doc_ids for these elements to remove them from the rest of the elements classified as
# belonging to major topic i
ids_drop = df_lemma.select("doc_id")
# get all elements classified as belonging to major topic code i
df_lemma = df_results.where(col('verdict') == i)
# remove wrongly classified from df_lemma
df_lemma = df_lemma.join(ids_drop, "doc_id", "left_anti")
# add df_lemma to df_replace_all
if df_replace_all == None:
df_replace_all = df_lemma
else:
df_replace_all = df_replace_all.union(df_lemma)
# write numbers to df_numbers
df_numbers["num_added_to_training"].loc[i] = df_lemma.count()
#print("MTC:", i, "df_replace_all: ", df_replace_all.count())
else:
# in this case add only correct elements from sample to training set, the rest go back in
# the test set
# first add non-sample elements to their table, BUT we have to check whether non-sample elements
# exist
if df_non_sample_all != None:
df_lemma = df_non_sample_all.where(col('verdict') == i)
if df_non_sample_replace == None:
df_non_sample_replace = df_lemma
else:
df_non_sample_replace = df_non_sample_replace.union(df_lemma)
else:
df_non_sample_replace = None
#print("MTC:", i, "df_non_sample_replace: ", df_non_sample_replace.count())
# second add correct sample elements to their table
df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') == col('verdict'))
if df_correct_replace == None:
df_correct_replace = df_lemma
else:
df_correct_replace = df_correct_replace.union(df_lemma)
df_numbers["num_added_to_training"].loc[i] = df_lemma.count()
#print("MTC:", i, "df_correct_replace: ", df_correct_replace.count())
# finally add wrong sample elements to their table
df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') != col('verdict'))
if df_wrong_replace == None:
df_wrong_replace = df_lemma
else:
df_wrong_replace = df_wrong_replace.union(df_lemma)
#print("MTC:", i, "df_wrong_replace: ", df_wrong_replace.count())
# sometimes there will be no major topic code with precision => 75%
if df_replace_all == None:
df_replace_all = "empty"
# sometimes there will be no non-sample elements
if df_non_sample_replace == None:
df_non_sample_replace = "empty"
# the reason for creating these "empty" values, is because they will persist after we clear the
# cache, and we can use them later in the workflow control
# write all tables to parquet before clearing memory
df_correct_replace.write.parquet("hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet", mode="overwrite")
df_wrong_replace.write.parquet("hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet", mode="overwrite")
# sometimes there will be no non-sample elements
if df_non_sample_replace != "empty":
df_non_sample_replace.write.parquet("hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet", mode="overwrite")
# sometimes there will be no major topic code with precision => 75%
if df_replace_all != "empty":
df_replace_all.write.parquet("hdfs://192.168.0.182:9000/input/df_replace_all_temp.parquet", mode="overwrite")
# write df_numbers to csv
df_numbers.to_csv("ML2_HV_v1_NYT_human_validation_numbers_r5.csv", index=True)
# empty memory
spark.catalog.clearCache()
print("cache cleared")
#################################################
# prepare df_original to add tables to it
#################################################
df_original = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_r5_train_and_remaining_NOTclassified.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_original = df_original.withColumnRenamed('majortopic', 'mtc_after_r4')
df_original = df_original.withColumn('majortopic', df_original['mtc_after_r4'])
# finally, create the new train id column
df_original = df_original.withColumn("train_r6", when(df_original["train_r5"] == 1, 1).otherwise(0))
#################################################
# add df_replace_all back to df_original
#################################################
if df_replace_all != "empty":
print("df_replace_all is NOT empty")
df_replace_all = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_replace_all_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_replace_all = df_replace_all.withColumnRenamed('majortopic', 'mtc_after_r4')
df_replace_all = df_replace_all.withColumn('majortopic', df_replace_all['verdict'])
# create the new train id column
df_replace_all = df_replace_all.withColumn("train_r6", lit(1))
# drop the extra columns to be able to add it back to df_original
df_replace_all = df_replace_all.drop('verdict')
# add df_replace_all elements to df_original
df_original = df_original.union(df_replace_all)
else:
print("df_replace_all is empty")
#################################################
# add df_non_sample_replace back to df_original
#################################################
if df_non_sample_replace != "empty":
print("df_non_sample_replace is NOT empty")
df_non_sample_replace = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_non_sample_replace = df_non_sample_replace.withColumnRenamed('majortopic', 'mtc_after_r4')
df_non_sample_replace = df_non_sample_replace.withColumn('majortopic', df_non_sample_replace['mtc_after_r4'])
# create the new train id column
df_non_sample_replace = df_non_sample_replace.withColumn("train_r6", lit(0))
# drop the extra columns to be able to add it back to df_original
df_non_sample_replace = df_non_sample_replace.drop('verdict')
# add df_non_sample_replace elements to df_original
df_original = df_original.union(df_non_sample_replace)
else:
print("df_non_sample_replace is empty")
#################################################
# add df_correct_replace back to df_original
#################################################
df_correct_replace = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_correct_replace = df_correct_replace.withColumnRenamed('majortopic', 'mtc_after_r4')
df_correct_replace = df_correct_replace.withColumn('majortopic', df_correct_replace['verdict'])
# create the new train id column
df_correct_replace = df_correct_replace.withColumn("train_r6", lit(1))
# drop the extra columns to be able to add it back to df_original
df_correct_replace = df_correct_replace.drop('verdict')
# add df_correct_replace elements to df_original
df_original = df_original.union(df_correct_replace)
#################################################
# add df_wrong_replace back to df_original
#################################################
df_wrong_replace = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_wrong_replace = df_wrong_replace.withColumnRenamed('majortopic', 'mtc_after_r4')
df_wrong_replace = df_wrong_replace.withColumn('majortopic', df_wrong_replace['mtc_after_r4'])
# create the new train id column
df_wrong_replace = df_wrong_replace.withColumn("train_r6", lit(0))
# drop the extra columns to be able to add it back to df_original
df_wrong_replace = df_wrong_replace.drop('verdict')
# add df_wrong_replace elements to df_original
df_original = df_original.union(df_wrong_replace)
#################################################
# final write operations
#################################################
df_original.write.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_round6_start.parquet", mode="overwrite")
df_original.groupBy("train_r6").count().show(n=30)
# empty memory
spark.catalog.clearCache()
print("cache cleared")
# write to pandas and export to csv for debugging
df_original = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_round6_start.parquet").repartition(50)
df_original = df_original.drop('text', 'words', 'features', 'raw_features').toPandas()
df_original.to_csv("ML2_HV_v1_NYT_round6_starting_table.csv", index=False)
sc.stop()
spark.stop()
| 44.221289
| 142
| 0.668905
|
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark.sql.types import *
from pyspark.sql.functions import col, count, lit, rand, when
import pandas as pd
from math import ceil
| true
| true
|
7907ad49a8a35139014ecd6e8154cf8ca8ebc04b
| 3,982
|
py
|
Python
|
slicr/resources/links.py
|
travisbyrum/slicr
|
d4d64c102478623022f68632adff070398a8771f
|
[
"MIT"
] | null | null | null |
slicr/resources/links.py
|
travisbyrum/slicr
|
d4d64c102478623022f68632adff070398a8771f
|
[
"MIT"
] | null | null | null |
slicr/resources/links.py
|
travisbyrum/slicr
|
d4d64c102478623022f68632adff070398a8771f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
slicr.resources.links
~~~~~~~~~~~~~~~~~~~~~
Slicr link resource.
:copyright: © 2018
"""
from flask import current_app
from flask_restful import Resource
from webargs import fields
from webargs.flaskparser import use_args
from slicr.models import Link, LinkSchema
from slicr.utils import convert_args
link_args = {
'url': fields.Str(required=True),
'domain_id': fields.Int(missing=None)
}
# pylint: disable=R0201
class LinkResource(Resource):
"""Link resource."""
endpoints = ['/links', '/links/<int:link_id>']
schema = LinkSchema()
def get(self, link_id):
"""Get link resource.
.. :quickref: Link collection.
**Example request**:
.. sourcecode:: http
GET /links/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"data": {
"clicks": 0,
"created": "2018-08-21T19:13:34.157470+00:00",
"short_link": "b",
"updated": null,
"url": "https://www.google.com"
},
"id": 1,
"type": "links",
"url": "/links"
}
:jsonparam string url: url for which to create short link.
:reqheader Accept: The response content type depends on
:mailheader:`Accept` header
:reqheader Authorization: Optional authentication token.
:resheader Content-Type: this depends on :mailheader:`Accept`
header of request
:statuscode 201: Link created
"""
link = Link.query.filter_by(id=link_id).first()
link_data, errors = self.schema.dump(link)
if errors:
current_app.logger.warning(errors)
response_out = {
'id': link.id,
'data': link_data,
'url': '/links',
'type': 'link'
}
return response_out, 200
@use_args(link_args)
def post(self, args):
"""Create shortened link.
.. :quickref: Link collection.
**Example request**:
.. sourcecode:: http
POST /links HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"url": "https://www.google.com"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 201 OK
Vary: Accept
Content-Type: text/javascript
{
"data": {
"clicks": 0,
"created": "2018-08-21T19:13:34.157470+00:00",
"short_link": "b",
"updated": null,
"url": "https://www.google.com"
},
"id": 1,
"type": "links",
"url": "/links"
}
:jsonparam string url: url for which to create short link.
:reqheader Accept: The response content type depends on
:mailheader:`Accept` header
:reqheader Authorization: Optional authentication token.
:resheader Content-Type: this depends on :mailheader:`Accept`
header of request
:statuscode 201: Link created
"""
args = convert_args(args)
link = Link(
url=args.url,
domain_id=args.domain_id,
salt=int(current_app.config.get('ENCODER_SALT'))
).save()
link_data, errors = self.schema.dump(link)
if errors:
current_app.logger.warning(errors)
response_out = {
'id': link.id,
'data': link_data,
'url': '/links',
'type': 'link'
}
return response_out, 201
| 24.732919
| 69
| 0.506781
|
from flask import current_app
from flask_restful import Resource
from webargs import fields
from webargs.flaskparser import use_args
from slicr.models import Link, LinkSchema
from slicr.utils import convert_args
link_args = {
'url': fields.Str(required=True),
'domain_id': fields.Int(missing=None)
}
class LinkResource(Resource):
endpoints = ['/links', '/links/<int:link_id>']
schema = LinkSchema()
def get(self, link_id):
link = Link.query.filter_by(id=link_id).first()
link_data, errors = self.schema.dump(link)
if errors:
current_app.logger.warning(errors)
response_out = {
'id': link.id,
'data': link_data,
'url': '/links',
'type': 'link'
}
return response_out, 200
@use_args(link_args)
def post(self, args):
args = convert_args(args)
link = Link(
url=args.url,
domain_id=args.domain_id,
salt=int(current_app.config.get('ENCODER_SALT'))
).save()
link_data, errors = self.schema.dump(link)
if errors:
current_app.logger.warning(errors)
response_out = {
'id': link.id,
'data': link_data,
'url': '/links',
'type': 'link'
}
return response_out, 201
| true
| true
|
7907ad6b6a3588c246c97f800ac2258c1c835ccb
| 7,922
|
py
|
Python
|
python/fixrgraph/annotator/acdfgClass.py
|
LesleyLai/biggroum
|
e26de363e4bf4645dd5d90121742d3f3533f5a00
|
[
"Apache-2.0"
] | 7
|
2019-02-14T17:28:29.000Z
|
2021-01-11T07:12:34.000Z
|
python/fixrgraph/annotator/acdfgClass.py
|
LesleyLai/biggroum
|
e26de363e4bf4645dd5d90121742d3f3533f5a00
|
[
"Apache-2.0"
] | 23
|
2018-08-19T23:06:54.000Z
|
2020-04-14T08:21:05.000Z
|
python/fixrgraph/annotator/acdfgClass.py
|
LesleyLai/biggroum
|
e26de363e4bf4645dd5d90121742d3f3533f5a00
|
[
"Apache-2.0"
] | 4
|
2018-06-28T18:22:55.000Z
|
2019-03-21T06:36:56.000Z
|
'''
Acdfg class will have the class definitions for loading
and creating acdfg objects
'''
from __future__ import print_function
try:
from enum import Enum
except ImportError:
from enum34 import Enum
#import proto_acdfg
from protobuf.proto_acdfg_pb2 import Acdfg as ProtoAcdfg
import logging
class NodeType(Enum):
regular_node = 1
data_node = 2
method_node = 3
class EdgeType(Enum):
control_edge = 1
def_edge = 2
use_edge = 3
transitive_edge = 4
exceptional_edge = 5
class Node:
def __init__(self, node_type, key):
self.node_type = node_type
self.id = key
assert isinstance(key, int)
# assert isinstance(node_type, NodeType)
# def __init__(self, key):
# self.node_type = NodeType.regular_node
# self.id = key
# assert isinstance(key, int)
def get_type(self):
return self.node_type
def get_id(self):
return self.id
def get_node_type_str(self):
if (self.node_type == NodeType.regular_node):
return "regular node"
elif (self.node_type == NodeType.data_node):
return "data node"
elif (self.node_type == NodeType.method_node):
return "method node"
else:
assert False, ' Unhandled node type'
class DataNode(Node):
DATA_VAR = 0
DATA_CONST = 1
def __init__(self, key, name, data_type, data_type_type):
Node.__init__(self, NodeType.data_node, key)
self.name = name
self.data_type = data_type
if ("DATA_VAR" == ProtoAcdfg.DataNode.DataType.Name(data_type_type)):
self.data_type_type = DataNode.DATA_VAR
elif ("DATA_CONST" == ProtoAcdfg.DataNode.DataType.Name(data_type_type)):
self.data_type_type = DataNode.DATA_CONST
else:
logging.error("Cannot determine the type %s for data node" % (str(data_type_type)))
raise Exception("Cannot determine the type %s for data node" % (str(data_type_type)))
logging.debug('DataNode: (%s,%s,%s,%s)' % (str(key), str(name),
str(data_type),
str(data_type_type)))
def get_name(self):
return self.name
def get_data_type(self):
return self.data_type
def get_data_type_type(self):
return self.data_type_type
class MethodNode(Node):
def __init__(self, key, name, receiver, arg_list):
Node.__init__(self, NodeType.method_node, key)
self.name = name
self.receiver = receiver
self.arg_list = arg_list
for a in arg_list:
assert isinstance(a, DataNode)
if receiver:
assert isinstance(receiver, DataNode)
logging.debug(type(name))
assert isinstance(name, str) or isinstance(name, unicode)
logging.debug('Method Node: %s,%s' % (str(key), str(name)))
def get_name(self):
return self.name
def get_receiver(self):
return self.receiver
def get_args(self):
return self.arg_list
class Edge:
def __init__(self, edge_type, key, src, tgt):
self.edge_type = edge_type
self.id = key
self.src = src
self.tgt = tgt
assert isinstance(src, Node)
assert isinstance(tgt, Node)
def get_id(self):
return self.id
def get_edge_type(self):
return self.edge_type
class DefEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.def_edge, key, src, tgt)
assert isinstance(tgt, DataNode)
class UseEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.use_edge, key, src, tgt)
assert isinstance(src, DataNode)
class ControlEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.control_edge, key, src, tgt)
class TransitiveEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.transitive_edge, key, src, tgt)
class ExceptionEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.exceptional_edge, key, src, tgt)
class Acdfg:
def __init__(self, acdfg_protobuf_obj):
self.acdfg_protobuf = acdfg_protobuf_obj
self.all_nodes = {}
self.data_nodes = {}
self.method_nodes = {}
self.regular_nodes = {}
self.all_edges = {}
def add_node(self, node):
assert isinstance(node, Node), \
'Only node objects can be added through add_node'
key = node.get_id()
assert key not in self.all_nodes, \
'key %d for node already present'%key
self.all_nodes[key] = node
if isinstance(node, DataNode):
self.data_nodes[key] = node
elif isinstance(node, MethodNode):
self.method_nodes[key] = node
else:
self.regular_nodes[key] = node
def get_data_nodes(self):
return self.data_nodes
def get_method_nodes(self):
return self.method_nodes
def add_edge(self, edge):
assert isinstance(edge, Edge)
key = edge.get_id()
assert key not in self.all_edges, 'key %d for edge already present'%key
self.all_edges[key] = edge
def get_node_from_id(self, id):
if id in self.data_nodes:
return self.data_nodes[id]
elif id in self.method_nodes:
return self.method_nodes[id]
elif id in self.regular_nodes:
return self.regular_nodes[id]
else:
assert False, 'ID: %d not found'%(id)
def get_node_obj_from_ids(acdfg_obj, proto_edge):
src = acdfg_obj.get_node_from_id(getattr(proto_edge, 'from'))
tgt = acdfg_obj.get_node_from_id(proto_edge.to)
return src, tgt
def read_acdfg(filename):
try:
f = open(filename, 'rb')
acdfg = ProtoAcdfg() # create a new acdfg
# acdfg.parse_from_bytes(f.read())
acdfg.ParseFromString(f.read())
acdfg_obj = Acdfg(acdfg)
for dNode in acdfg.data_node:
data_node_obj = DataNode(int ( getattr(dNode,'id') ),
dNode.name,
getattr(dNode,'type'),
dNode.data_type)
acdfg_obj.add_node(data_node_obj)
for mNode in acdfg.method_node:
arg_ids = mNode.argument
arg_list = [acdfg_obj.get_node_from_id(j) for j in arg_ids]
if mNode.invokee:
rcv = acdfg_obj.get_node_from_id(mNode.invokee)
else:
rcv = None
method_node_obj = MethodNode(int(mNode.id), mNode.name, rcv, arg_list)
acdfg_obj.add_node(method_node_obj)
for rNode in acdfg.misc_node:
misc_node_obj = Node(NodeType.regular_node,int(rNode.id))
acdfg_obj.add_node(misc_node_obj)
for ctrl_edge in acdfg.control_edge:
src, tgt = get_node_obj_from_ids(acdfg_obj, ctrl_edge)
cedge_obj = ControlEdge(ctrl_edge.id, src, tgt)
acdfg_obj.add_edge(cedge_obj)
for dedge in acdfg.def_edge:
src, tgt = get_node_obj_from_ids(acdfg_obj, dedge)
dedge_obj = ControlEdge(dedge.id, src, tgt)
acdfg_obj.add_edge(dedge_obj)
for uedge in acdfg.use_edge:
src, tgt = get_node_obj_from_ids(acdfg_obj, uedge)
uedge_obj = UseEdge(uedge.id, src, tgt)
acdfg_obj.add_edge(uedge_obj)
for tedge in acdfg.trans_edge:
src, tgt = get_node_obj_from_ids(acdfg_obj, tedge)
tedge_obj = TransitiveEdge(tedge.id, src, tgt)
acdfg_obj.add_edge(tedge_obj)
f.close()
return acdfg_obj
except IOError:
print('Could not open: ', filename, 'for reading in binary mode.')
assert False
| 30.705426
| 97
| 0.611083
|
from __future__ import print_function
try:
from enum import Enum
except ImportError:
from enum34 import Enum
from protobuf.proto_acdfg_pb2 import Acdfg as ProtoAcdfg
import logging
class NodeType(Enum):
regular_node = 1
data_node = 2
method_node = 3
class EdgeType(Enum):
control_edge = 1
def_edge = 2
use_edge = 3
transitive_edge = 4
exceptional_edge = 5
class Node:
def __init__(self, node_type, key):
self.node_type = node_type
self.id = key
assert isinstance(key, int)
def get_type(self):
return self.node_type
def get_id(self):
return self.id
def get_node_type_str(self):
if (self.node_type == NodeType.regular_node):
return "regular node"
elif (self.node_type == NodeType.data_node):
return "data node"
elif (self.node_type == NodeType.method_node):
return "method node"
else:
assert False, ' Unhandled node type'
class DataNode(Node):
DATA_VAR = 0
DATA_CONST = 1
def __init__(self, key, name, data_type, data_type_type):
Node.__init__(self, NodeType.data_node, key)
self.name = name
self.data_type = data_type
if ("DATA_VAR" == ProtoAcdfg.DataNode.DataType.Name(data_type_type)):
self.data_type_type = DataNode.DATA_VAR
elif ("DATA_CONST" == ProtoAcdfg.DataNode.DataType.Name(data_type_type)):
self.data_type_type = DataNode.DATA_CONST
else:
logging.error("Cannot determine the type %s for data node" % (str(data_type_type)))
raise Exception("Cannot determine the type %s for data node" % (str(data_type_type)))
logging.debug('DataNode: (%s,%s,%s,%s)' % (str(key), str(name),
str(data_type),
str(data_type_type)))
def get_name(self):
return self.name
def get_data_type(self):
return self.data_type
def get_data_type_type(self):
return self.data_type_type
class MethodNode(Node):
def __init__(self, key, name, receiver, arg_list):
Node.__init__(self, NodeType.method_node, key)
self.name = name
self.receiver = receiver
self.arg_list = arg_list
for a in arg_list:
assert isinstance(a, DataNode)
if receiver:
assert isinstance(receiver, DataNode)
logging.debug(type(name))
assert isinstance(name, str) or isinstance(name, unicode)
logging.debug('Method Node: %s,%s' % (str(key), str(name)))
def get_name(self):
return self.name
def get_receiver(self):
return self.receiver
def get_args(self):
return self.arg_list
class Edge:
def __init__(self, edge_type, key, src, tgt):
self.edge_type = edge_type
self.id = key
self.src = src
self.tgt = tgt
assert isinstance(src, Node)
assert isinstance(tgt, Node)
def get_id(self):
return self.id
def get_edge_type(self):
return self.edge_type
class DefEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.def_edge, key, src, tgt)
assert isinstance(tgt, DataNode)
class UseEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.use_edge, key, src, tgt)
assert isinstance(src, DataNode)
class ControlEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.control_edge, key, src, tgt)
class TransitiveEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.transitive_edge, key, src, tgt)
class ExceptionEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.exceptional_edge, key, src, tgt)
class Acdfg:
def __init__(self, acdfg_protobuf_obj):
self.acdfg_protobuf = acdfg_protobuf_obj
self.all_nodes = {}
self.data_nodes = {}
self.method_nodes = {}
self.regular_nodes = {}
self.all_edges = {}
def add_node(self, node):
assert isinstance(node, Node), \
'Only node objects can be added through add_node'
key = node.get_id()
assert key not in self.all_nodes, \
'key %d for node already present'%key
self.all_nodes[key] = node
if isinstance(node, DataNode):
self.data_nodes[key] = node
elif isinstance(node, MethodNode):
self.method_nodes[key] = node
else:
self.regular_nodes[key] = node
def get_data_nodes(self):
return self.data_nodes
def get_method_nodes(self):
return self.method_nodes
def add_edge(self, edge):
assert isinstance(edge, Edge)
key = edge.get_id()
assert key not in self.all_edges, 'key %d for edge already present'%key
self.all_edges[key] = edge
def get_node_from_id(self, id):
if id in self.data_nodes:
return self.data_nodes[id]
elif id in self.method_nodes:
return self.method_nodes[id]
elif id in self.regular_nodes:
return self.regular_nodes[id]
else:
assert False, 'ID: %d not found'%(id)
def get_node_obj_from_ids(acdfg_obj, proto_edge):
src = acdfg_obj.get_node_from_id(getattr(proto_edge, 'from'))
tgt = acdfg_obj.get_node_from_id(proto_edge.to)
return src, tgt
def read_acdfg(filename):
try:
f = open(filename, 'rb')
acdfg = ProtoAcdfg()
acdfg.ParseFromString(f.read())
acdfg_obj = Acdfg(acdfg)
for dNode in acdfg.data_node:
data_node_obj = DataNode(int ( getattr(dNode,'id') ),
dNode.name,
getattr(dNode,'type'),
dNode.data_type)
acdfg_obj.add_node(data_node_obj)
for mNode in acdfg.method_node:
arg_ids = mNode.argument
arg_list = [acdfg_obj.get_node_from_id(j) for j in arg_ids]
if mNode.invokee:
rcv = acdfg_obj.get_node_from_id(mNode.invokee)
else:
rcv = None
method_node_obj = MethodNode(int(mNode.id), mNode.name, rcv, arg_list)
acdfg_obj.add_node(method_node_obj)
for rNode in acdfg.misc_node:
misc_node_obj = Node(NodeType.regular_node,int(rNode.id))
acdfg_obj.add_node(misc_node_obj)
for ctrl_edge in acdfg.control_edge:
src, tgt = get_node_obj_from_ids(acdfg_obj, ctrl_edge)
cedge_obj = ControlEdge(ctrl_edge.id, src, tgt)
acdfg_obj.add_edge(cedge_obj)
for dedge in acdfg.def_edge:
src, tgt = get_node_obj_from_ids(acdfg_obj, dedge)
dedge_obj = ControlEdge(dedge.id, src, tgt)
acdfg_obj.add_edge(dedge_obj)
for uedge in acdfg.use_edge:
src, tgt = get_node_obj_from_ids(acdfg_obj, uedge)
uedge_obj = UseEdge(uedge.id, src, tgt)
acdfg_obj.add_edge(uedge_obj)
for tedge in acdfg.trans_edge:
src, tgt = get_node_obj_from_ids(acdfg_obj, tedge)
tedge_obj = TransitiveEdge(tedge.id, src, tgt)
acdfg_obj.add_edge(tedge_obj)
f.close()
return acdfg_obj
except IOError:
print('Could not open: ', filename, 'for reading in binary mode.')
assert False
| true
| true
|
7907ad7f651896a977c6f97ab0451c2bb9750752
| 17,894
|
py
|
Python
|
base2designs/utils/np_box_list_ops_test.py
|
sethusaim/Automatic-Number-Plate-Recognition
|
8b26008f8511e52600b150157901079e0fd0ebfe
|
[
"MIT"
] | null | null | null |
base2designs/utils/np_box_list_ops_test.py
|
sethusaim/Automatic-Number-Plate-Recognition
|
8b26008f8511e52600b150157901079e0fd0ebfe
|
[
"MIT"
] | null | null | null |
base2designs/utils/np_box_list_ops_test.py
|
sethusaim/Automatic-Number-Plate-Recognition
|
8b26008f8511e52600b150157901079e0fd0ebfe
|
[
"MIT"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_list_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
class AreaRelatedTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]], dtype=float)
boxes2 = np.array(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.boxlist1 = np_box_list.BoxList(boxes1)
self.boxlist2 = np_box_list.BoxList(boxes2)
def test_area(self):
areas = np_box_list_ops.area(self.boxlist1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def test_intersection(self):
intersection = np_box_list_ops.intersection(self.boxlist1, self.boxlist2)
expected_intersection = np.array(
[[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]], dtype=float
)
self.assertAllClose(intersection, expected_intersection)
def test_iou(self):
iou = np_box_list_ops.iou(self.boxlist1, self.boxlist2)
expected_iou = np.array(
[[2.0 / 16.0, 0.0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]],
dtype=float,
)
self.assertAllClose(iou, expected_iou)
def test_ioa(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist2 = np_box_list.BoxList(
np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32)
)
ioa21 = np_box_list_ops.ioa(boxlist2, boxlist1)
expected_ioa21 = np.array([[0.5, 0.0], [1.0, 1.0]], dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
def test_scale(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist_scaled = np_box_list_ops.scale(boxlist, 2.0, 3.0)
expected_boxlist_scaled = np_box_list.BoxList(
np.array([[0.5, 0.75, 1.5, 2.25], [0.0, 0.0, 1.0, 2.25]], dtype=np.float32)
)
self.assertAllClose(expected_boxlist_scaled.get(), boxlist_scaled.get())
def test_clip_to_window(self):
boxlist = np_box_list.BoxList(
np.array(
[
[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5],
],
dtype=np.float32,
)
)
boxlist_clipped = np_box_list_ops.clip_to_window(boxlist, [0.0, 0.0, 1.0, 1.0])
expected_boxlist_clipped = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75], [0.0, 0.0, 0.7, 1.0]],
dtype=np.float32,
)
)
self.assertAllClose(expected_boxlist_clipped.get(), boxlist_clipped.get())
def test_prune_outside_window(self):
boxlist = np_box_list.BoxList(
np.array(
[
[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5],
],
dtype=np.float32,
)
)
boxlist_pruned, _ = np_box_list_ops.prune_outside_window(
boxlist, [0.0, 0.0, 1.0, 1.0]
)
expected_boxlist_pruned = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
self.assertAllClose(expected_boxlist_pruned.get(), boxlist_pruned.get())
def test_concatenate(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist2 = np_box_list.BoxList(
np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32)
)
boxlists = [boxlist1, boxlist2]
boxlist_concatenated = np_box_list_ops.concatenate(boxlists)
boxlist_concatenated_expected = np_box_list.BoxList(
np.array(
[
[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75],
[0.5, 0.25, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
],
dtype=np.float32,
)
)
self.assertAllClose(
boxlist_concatenated_expected.get(), boxlist_concatenated.get()
)
def test_change_coordinate_frame(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist_coord = np_box_list_ops.change_coordinate_frame(
boxlist, np.array([0, 0, 0.5, 0.5], dtype=np.float32)
)
expected_boxlist_coord = np_box_list.BoxList(
np.array([[0.5, 0.5, 1.5, 1.5], [0, 0, 1.0, 1.5]], dtype=np.float32)
)
self.assertAllClose(boxlist_coord.get(), expected_boxlist_coord.get())
def test_filter_scores_greater_than(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist.add_field("scores", np.array([0.8, 0.2], np.float32))
boxlist_greater = np_box_list_ops.filter_scores_greater_than(boxlist, 0.5)
expected_boxlist_greater = np_box_list.BoxList(
np.array([[0.25, 0.25, 0.75, 0.75]], dtype=np.float32)
)
self.assertAllClose(boxlist_greater.get(), expected_boxlist_greater.get())
class GatherOpsTest(tf.test.TestCase):
def setUp(self):
boxes = np.array(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field("scores", np.array([0.5, 0.7, 0.9], dtype=float))
self.boxlist.add_field(
"labels",
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int),
)
def test_gather_with_out_of_range_indices(self):
indices = np.array([3, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_with_invalid_multidimensional_indices(self):
indices = np.array([[0, 1], [1, 2]], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_without_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices)
expected_scores = np.array([0.9, 0.5, 0.7], dtype=float)
self.assertAllClose(expected_scores, subboxlist.get_field("scores"))
expected_boxes = np.array(
[[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],
dtype=float,
)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array(
[[0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=int
)
self.assertAllClose(expected_labels, subboxlist.get_field("labels"))
def test_gather_with_invalid_field_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, "labels")
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, ["objectness"])
def test_gather_with_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices, ["labels"])
self.assertFalse(subboxlist.has_field("scores"))
expected_boxes = np.array(
[[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],
dtype=float,
)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array(
[[0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=int
)
self.assertAllClose(expected_labels, subboxlist.get_field("labels"))
class SortByFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field("scores", np.array([0.5, 0.9, 0.4], dtype=float))
self.boxlist.add_field(
"labels",
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int),
)
def test_with_invalid_field(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, "objectness")
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, "labels")
def test_with_invalid_sorting_order(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, "scores", "Descending")
def test_with_descending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(self.boxlist, "scores")
expected_boxes = np.array(
[[14.0, 14.0, 15.0, 15.0], [3.0, 4.0, 6.0, 8.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.9, 0.5, 0.4], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field("scores"))
def test_with_ascending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(
self.boxlist, "scores", np_box_list_ops.SortOrder.ASCEND
)
expected_boxes = np.array(
[[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],],
dtype=float,
)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.4, 0.5, 0.9], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field("scores"))
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
self._boxes = np.array(
[
[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101],
],
dtype=float,
)
self._boxlist = np_box_list.BoxList(self._boxes)
def test_with_no_scores_field(self):
boxlist = np_box_list.BoxList(self._boxes)
max_output_size = 3
iou_threshold = 0.5
with self.assertRaises(ValueError):
np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
def test_nms_disabled_max_output_size_equals_three(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.2, 0.3], dtype=float)
)
max_output_size = 3
iou_threshold = 1.0 # No NMS
expected_boxes = np.array(
[[0, 10, 1, 11], [0, 0, 1, 1], [0, 0.1, 1, 1.1]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.2, 0.3], dtype=float)
)
max_output_size = 3
iou_threshold = 0.5
expected_boxes = np.array(
[[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_two_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3], dtype=float)
)
max_output_size = 2
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_thirty_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3], dtype=float)
)
max_output_size = 30
iou_threshold = 0.5
expected_boxes = np.array(
[[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_ten_indentical_boxes(self):
boxes = np.array(10 * [[0, 0, 1, 1]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field("scores", np.array(10 * [0.8]))
iou_threshold = 0.5
max_output_size = 3
expected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_different_iou_threshold(self):
boxes = np.array(
[
[0, 0, 20, 100],
[0, 0, 20, 80],
[200, 200, 210, 300],
[200, 200, 210, 250],
],
dtype=float,
)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field("scores", np.array([0.9, 0.8, 0.7, 0.6]))
max_output_size = 4
iou_threshold = 0.4
expected_boxes = np.array([[0, 0, 20, 100], [200, 200, 210, 300],], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = 0.5
expected_boxes = np.array(
[[0, 0, 20, 100], [200, 200, 210, 300], [200, 200, 210, 250]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = 0.8
expected_boxes = np.array(
[
[0, 0, 20, 100],
[0, 0, 20, 80],
[200, 200, 210, 300],
[200, 200, 210, 250],
],
dtype=float,
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_multiclass_nms(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],
dtype=np.float32,
)
)
scores = np.array(
[
[-0.2, 0.1, 0.5, -0.4, 0.3],
[0.7, -0.7, 0.6, 0.2, -0.9],
[0.4, 0.34, -0.9, 0.2, 0.31],
],
dtype=np.float32,
)
boxlist.add_field("scores", scores)
boxlist_clean = np_box_list_ops.multi_class_non_max_suppression(
boxlist, score_thresh=0.25, iou_thresh=0.1, max_output_size=3
)
scores_clean = boxlist_clean.get_field("scores")
classes_clean = boxlist_clean.get_field("classes")
boxes = boxlist_clean.get()
expected_scores = np.array([0.7, 0.6, 0.34, 0.31])
expected_classes = np.array([0, 2, 1, 4])
expected_boxes = np.array(
[
[0.4, 0.2, 0.8, 0.8],
[0.4, 0.2, 0.8, 0.8],
[0.6, 0.0, 1.0, 1.0],
[0.6, 0.0, 1.0, 1.0],
],
dtype=np.float32,
)
self.assertAllClose(scores_clean, expected_scores)
self.assertAllClose(classes_clean, expected_classes)
self.assertAllClose(boxes, expected_boxes)
if __name__ == "__main__":
tf.test.main()
| 36.743326
| 88
| 0.555941
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
class AreaRelatedTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]], dtype=float)
boxes2 = np.array(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.boxlist1 = np_box_list.BoxList(boxes1)
self.boxlist2 = np_box_list.BoxList(boxes2)
def test_area(self):
areas = np_box_list_ops.area(self.boxlist1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def test_intersection(self):
intersection = np_box_list_ops.intersection(self.boxlist1, self.boxlist2)
expected_intersection = np.array(
[[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]], dtype=float
)
self.assertAllClose(intersection, expected_intersection)
def test_iou(self):
iou = np_box_list_ops.iou(self.boxlist1, self.boxlist2)
expected_iou = np.array(
[[2.0 / 16.0, 0.0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]],
dtype=float,
)
self.assertAllClose(iou, expected_iou)
def test_ioa(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist2 = np_box_list.BoxList(
np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32)
)
ioa21 = np_box_list_ops.ioa(boxlist2, boxlist1)
expected_ioa21 = np.array([[0.5, 0.0], [1.0, 1.0]], dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
def test_scale(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist_scaled = np_box_list_ops.scale(boxlist, 2.0, 3.0)
expected_boxlist_scaled = np_box_list.BoxList(
np.array([[0.5, 0.75, 1.5, 2.25], [0.0, 0.0, 1.0, 2.25]], dtype=np.float32)
)
self.assertAllClose(expected_boxlist_scaled.get(), boxlist_scaled.get())
def test_clip_to_window(self):
boxlist = np_box_list.BoxList(
np.array(
[
[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5],
],
dtype=np.float32,
)
)
boxlist_clipped = np_box_list_ops.clip_to_window(boxlist, [0.0, 0.0, 1.0, 1.0])
expected_boxlist_clipped = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75], [0.0, 0.0, 0.7, 1.0]],
dtype=np.float32,
)
)
self.assertAllClose(expected_boxlist_clipped.get(), boxlist_clipped.get())
def test_prune_outside_window(self):
boxlist = np_box_list.BoxList(
np.array(
[
[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5],
],
dtype=np.float32,
)
)
boxlist_pruned, _ = np_box_list_ops.prune_outside_window(
boxlist, [0.0, 0.0, 1.0, 1.0]
)
expected_boxlist_pruned = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
self.assertAllClose(expected_boxlist_pruned.get(), boxlist_pruned.get())
def test_concatenate(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist2 = np_box_list.BoxList(
np.array([[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32)
)
boxlists = [boxlist1, boxlist2]
boxlist_concatenated = np_box_list_ops.concatenate(boxlists)
boxlist_concatenated_expected = np_box_list.BoxList(
np.array(
[
[0.25, 0.25, 0.75, 0.75],
[0.0, 0.0, 0.5, 0.75],
[0.5, 0.25, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
],
dtype=np.float32,
)
)
self.assertAllClose(
boxlist_concatenated_expected.get(), boxlist_concatenated.get()
)
def test_change_coordinate_frame(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist_coord = np_box_list_ops.change_coordinate_frame(
boxlist, np.array([0, 0, 0.5, 0.5], dtype=np.float32)
)
expected_boxlist_coord = np_box_list.BoxList(
np.array([[0.5, 0.5, 1.5, 1.5], [0, 0, 1.0, 1.5]], dtype=np.float32)
)
self.assertAllClose(boxlist_coord.get(), expected_boxlist_coord.get())
def test_filter_scores_greater_than(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32
)
)
boxlist.add_field("scores", np.array([0.8, 0.2], np.float32))
boxlist_greater = np_box_list_ops.filter_scores_greater_than(boxlist, 0.5)
expected_boxlist_greater = np_box_list.BoxList(
np.array([[0.25, 0.25, 0.75, 0.75]], dtype=np.float32)
)
self.assertAllClose(boxlist_greater.get(), expected_boxlist_greater.get())
class GatherOpsTest(tf.test.TestCase):
def setUp(self):
boxes = np.array(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field("scores", np.array([0.5, 0.7, 0.9], dtype=float))
self.boxlist.add_field(
"labels",
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int),
)
def test_gather_with_out_of_range_indices(self):
indices = np.array([3, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_with_invalid_multidimensional_indices(self):
indices = np.array([[0, 1], [1, 2]], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_without_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices)
expected_scores = np.array([0.9, 0.5, 0.7], dtype=float)
self.assertAllClose(expected_scores, subboxlist.get_field("scores"))
expected_boxes = np.array(
[[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],
dtype=float,
)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array(
[[0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=int
)
self.assertAllClose(expected_labels, subboxlist.get_field("labels"))
def test_gather_with_invalid_field_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, "labels")
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, ["objectness"])
def test_gather_with_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices, ["labels"])
self.assertFalse(subboxlist.has_field("scores"))
expected_boxes = np.array(
[[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]],
dtype=float,
)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array(
[[0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0]], dtype=int
)
self.assertAllClose(expected_labels, subboxlist.get_field("labels"))
class SortByFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array(
[[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field("scores", np.array([0.5, 0.9, 0.4], dtype=float))
self.boxlist.add_field(
"labels",
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int),
)
def test_with_invalid_field(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, "objectness")
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, "labels")
def test_with_invalid_sorting_order(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, "scores", "Descending")
def test_with_descending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(self.boxlist, "scores")
expected_boxes = np.array(
[[14.0, 14.0, 15.0, 15.0], [3.0, 4.0, 6.0, 8.0], [0.0, 0.0, 20.0, 20.0]],
dtype=float,
)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.9, 0.5, 0.4], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field("scores"))
def test_with_ascending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(
self.boxlist, "scores", np_box_list_ops.SortOrder.ASCEND
)
expected_boxes = np.array(
[[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],],
dtype=float,
)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.4, 0.5, 0.9], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field("scores"))
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
self._boxes = np.array(
[
[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101],
],
dtype=float,
)
self._boxlist = np_box_list.BoxList(self._boxes)
def test_with_no_scores_field(self):
boxlist = np_box_list.BoxList(self._boxes)
max_output_size = 3
iou_threshold = 0.5
with self.assertRaises(ValueError):
np_box_list_ops.non_max_suppression(boxlist, max_output_size, iou_threshold)
def test_nms_disabled_max_output_size_equals_three(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.2, 0.3], dtype=float)
)
max_output_size = 3
iou_threshold = 1.0
expected_boxes = np.array(
[[0, 10, 1, 11], [0, 0, 1, 1], [0, 0.1, 1, 1.1]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.2, 0.3], dtype=float)
)
max_output_size = 3
iou_threshold = 0.5
expected_boxes = np.array(
[[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_two_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3], dtype=float)
)
max_output_size = 2
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_thirty_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field(
"scores", np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3], dtype=float)
)
max_output_size = 30
iou_threshold = 0.5
expected_boxes = np.array(
[[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_ten_indentical_boxes(self):
boxes = np.array(10 * [[0, 0, 1, 1]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field("scores", np.array(10 * [0.8]))
iou_threshold = 0.5
max_output_size = 3
expected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_different_iou_threshold(self):
boxes = np.array(
[
[0, 0, 20, 100],
[0, 0, 20, 80],
[200, 200, 210, 300],
[200, 200, 210, 250],
],
dtype=float,
)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field("scores", np.array([0.9, 0.8, 0.7, 0.6]))
max_output_size = 4
iou_threshold = 0.4
expected_boxes = np.array([[0, 0, 20, 100], [200, 200, 210, 300],], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = 0.5
expected_boxes = np.array(
[[0, 0, 20, 100], [200, 200, 210, 300], [200, 200, 210, 250]], dtype=float
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = 0.8
expected_boxes = np.array(
[
[0, 0, 20, 100],
[0, 0, 20, 80],
[200, 200, 210, 300],
[200, 200, 210, 250],
],
dtype=float,
)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold
)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_multiclass_nms(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],
dtype=np.float32,
)
)
scores = np.array(
[
[-0.2, 0.1, 0.5, -0.4, 0.3],
[0.7, -0.7, 0.6, 0.2, -0.9],
[0.4, 0.34, -0.9, 0.2, 0.31],
],
dtype=np.float32,
)
boxlist.add_field("scores", scores)
boxlist_clean = np_box_list_ops.multi_class_non_max_suppression(
boxlist, score_thresh=0.25, iou_thresh=0.1, max_output_size=3
)
scores_clean = boxlist_clean.get_field("scores")
classes_clean = boxlist_clean.get_field("classes")
boxes = boxlist_clean.get()
expected_scores = np.array([0.7, 0.6, 0.34, 0.31])
expected_classes = np.array([0, 2, 1, 4])
expected_boxes = np.array(
[
[0.4, 0.2, 0.8, 0.8],
[0.4, 0.2, 0.8, 0.8],
[0.6, 0.0, 1.0, 1.0],
[0.6, 0.0, 1.0, 1.0],
],
dtype=np.float32,
)
self.assertAllClose(scores_clean, expected_scores)
self.assertAllClose(classes_clean, expected_classes)
self.assertAllClose(boxes, expected_boxes)
if __name__ == "__main__":
tf.test.main()
| true
| true
|
7907adb19c92d9d0bd748e6b1c7ac20f5b14e6a9
| 4,658
|
py
|
Python
|
scripts/neural_net_workshop.py
|
Henrynaut/ML
|
47ca3a67948ed8190a31a57d1e9a803ca532938b
|
[
"MIT"
] | null | null | null |
scripts/neural_net_workshop.py
|
Henrynaut/ML
|
47ca3a67948ed8190a31a57d1e9a803ca532938b
|
[
"MIT"
] | null | null | null |
scripts/neural_net_workshop.py
|
Henrynaut/ML
|
47ca3a67948ed8190a31a57d1e9a803ca532938b
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
random.seed(200)
# Create Sigmoid Function
def sig(inp):
return (1/(1+np.exp(-1*inp)))
# For Back Propagation, make Desigmoid function
def dsig(inp):
return (1.0-inp)*inp
# Define class for neuron
class Neuron:
def __init__(self,weights,func,dfunc):
# member variables for class
self.weights = weights
self.output = None
self.func = func
# dfunc is the derivative of the function
self.dfunc = dfunc
# No delta yet because we haven't defined anything
self.delta = None
def agr(self,x):
bias = self.weights[-1]
out = np.inner(self.weights.copy()[:-1],x) + bias
return out
def activation(self,inp):
self.output = self.func(inp)
return self.output
# Definition for weights
def gen_weights(dim):
# Add 1 to the dimension for the bias
return np.random.uniform(-0.1,0.1,dim+1)
# Definition of the actual network
# Activations correspond to activation funcitons used
def gen_net(structure, activations):
# Create empty list
net = []
for i in range(1,len(structure)):
layer = []
for j in range(structure[i]):
# feed in neuron weights from last layer
weights = gen_weights(structure[i-1])
layer.append(Neuron(weights, activations[0][i-1], activations[1][i-1]))
net.append(layer)
return net
# Define feed forward
def feed_fwd(net, inp):
# It stores the current input associated with the given layer
inp_store = inp
for layer in net:
out_of_curr_layer = []
for neuron in layer:
# Calculate accumulated output value
accum = neuron.agr(inp_store)
output = neuron.activation(accum)
# Store output for later use
out_of_curr_layer.append(output)
inp_store = out_of_curr_layer
return inp_store
# Define back propagation
def back_prop(net, target):
back_len = len(net)
for i in range(back_len):
ind = back_len-i-1
layer = net[ind]
errors = []
if ind == back_len-1:
j=0
for neuron in layer:
errors.append(target[j]-neuron.output)
j+=1
else:
for j in range(len(layer)):
error = 0.0
# For neuron in front of current neuron, check deltas
for neuron in net[ind+1]:
error+=(neuron.weights[j]*neuron.delta)
errors.append(error)
j=0
for neuron in layer:
neuron.delta = errors[j]*neuron.dfunc(neuron.output)
j+=1
return net
# Define how much to update the weights by everytime
# Alpha is the learning rate, but if too high it may overshoot
def update_weights(net,inp,alpha):
for i in range(len(net)):
if i==0:
inputs = inp
else:
inputs = []
prev_layer = net[i-1]
for neuron in prev_layer:
inputs.append(neuron.output)
curr_layer = net[i]
for neuron in curr_layer:
for j in range(len(inputs)):
neuron.weights[j] += alpha*neuron.delta*inputs[j]
neuron.weights[-1]+=alpha*neuron.delta
#Define training approach
def train(net,train_data,alpha,epoch):
for curr_epoch_no in range(epoch):
sums = 0
sample_no = 0
# Accuracy Count (number of samples that are right)
acc_cnt = 0
for sample in train_data:
outputs = feed_fwd(net,sample[0])
expected = sample[1]
sums+=sum([(expected[i]-outputs[i])**2 for i in range(len(expected))])
if expected.index(max(expected) == outputs.index(max(outputs))):
acc_cnt += 1
back_prop(net,expected)
update_weights(net,sample[0],alpha)
# Metadata on how well it's doing
print('epoch_no:', curr_epoch_no,'loss:', sums, 'accuracy:', acc_cnt)
net = gen_net([2,100,100,2],[(sig,sig,sig),[dsig,dsig,dsig]])
train(net,[[[0,0],[0,1]],
[[0,1],[1,0]],
[[1,0],[1,0]],
[[1,1],[0,1]]],
2, 100)
# Code to test out neural network output
# net = gen_net([2,2,2],[(sig,sig),[dsig,dsig]])
# print(feed_fwd(net,[0.2,0.3]))
# for i in range(len(net)):
# for j in range(len(net[i])):
# print(net[i][j].weights)
# print("--------------------------")
# net = back_prop(net,[1,0])
# net = update_weights(net,[0.2,0.3],0.2)
# for i in range(len(net)):
# for j in range(len(net[i])):
# print(net[i][j].weights)
| 31.90411
| 83
| 0.572349
|
import numpy as np
import random
random.seed(200)
def sig(inp):
return (1/(1+np.exp(-1*inp)))
def dsig(inp):
return (1.0-inp)*inp
class Neuron:
def __init__(self,weights,func,dfunc):
self.weights = weights
self.output = None
self.func = func
self.dfunc = dfunc
self.delta = None
def agr(self,x):
bias = self.weights[-1]
out = np.inner(self.weights.copy()[:-1],x) + bias
return out
def activation(self,inp):
self.output = self.func(inp)
return self.output
# Definition for weights
def gen_weights(dim):
# Add 1 to the dimension for the bias
return np.random.uniform(-0.1,0.1,dim+1)
# Definition of the actual network
# Activations correspond to activation funcitons used
def gen_net(structure, activations):
# Create empty list
net = []
for i in range(1,len(structure)):
layer = []
for j in range(structure[i]):
# feed in neuron weights from last layer
weights = gen_weights(structure[i-1])
layer.append(Neuron(weights, activations[0][i-1], activations[1][i-1]))
net.append(layer)
return net
# Define feed forward
def feed_fwd(net, inp):
# It stores the current input associated with the given layer
inp_store = inp
for layer in net:
out_of_curr_layer = []
for neuron in layer:
# Calculate accumulated output value
accum = neuron.agr(inp_store)
output = neuron.activation(accum)
# Store output for later use
out_of_curr_layer.append(output)
inp_store = out_of_curr_layer
return inp_store
# Define back propagation
def back_prop(net, target):
back_len = len(net)
for i in range(back_len):
ind = back_len-i-1
layer = net[ind]
errors = []
if ind == back_len-1:
j=0
for neuron in layer:
errors.append(target[j]-neuron.output)
j+=1
else:
for j in range(len(layer)):
error = 0.0
# For neuron in front of current neuron, check deltas
for neuron in net[ind+1]:
error+=(neuron.weights[j]*neuron.delta)
errors.append(error)
j=0
for neuron in layer:
neuron.delta = errors[j]*neuron.dfunc(neuron.output)
j+=1
return net
# Define how much to update the weights by everytime
# Alpha is the learning rate, but if too high it may overshoot
def update_weights(net,inp,alpha):
for i in range(len(net)):
if i==0:
inputs = inp
else:
inputs = []
prev_layer = net[i-1]
for neuron in prev_layer:
inputs.append(neuron.output)
curr_layer = net[i]
for neuron in curr_layer:
for j in range(len(inputs)):
neuron.weights[j] += alpha*neuron.delta*inputs[j]
neuron.weights[-1]+=alpha*neuron.delta
#Define training approach
def train(net,train_data,alpha,epoch):
for curr_epoch_no in range(epoch):
sums = 0
sample_no = 0
# Accuracy Count (number of samples that are right)
acc_cnt = 0
for sample in train_data:
outputs = feed_fwd(net,sample[0])
expected = sample[1]
sums+=sum([(expected[i]-outputs[i])**2 for i in range(len(expected))])
if expected.index(max(expected) == outputs.index(max(outputs))):
acc_cnt += 1
back_prop(net,expected)
update_weights(net,sample[0],alpha)
# Metadata on how well it's doing
print('epoch_no:', curr_epoch_no,'loss:', sums, 'accuracy:', acc_cnt)
net = gen_net([2,100,100,2],[(sig,sig,sig),[dsig,dsig,dsig]])
train(net,[[[0,0],[0,1]],
[[0,1],[1,0]],
[[1,0],[1,0]],
[[1,1],[0,1]]],
2, 100)
| true
| true
|
7907adcf781ada4dc3139aa328d65512d51ec61c
| 1,772
|
py
|
Python
|
resources/email.py
|
donovan-PNW/dwellinglybackend
|
448df61f6ea81f00dde7dab751f8b2106f0eb7b1
|
[
"MIT"
] | null | null | null |
resources/email.py
|
donovan-PNW/dwellinglybackend
|
448df61f6ea81f00dde7dab751f8b2106f0eb7b1
|
[
"MIT"
] | null | null | null |
resources/email.py
|
donovan-PNW/dwellinglybackend
|
448df61f6ea81f00dde7dab751f8b2106f0eb7b1
|
[
"MIT"
] | null | null | null |
from flask import current_app, render_template
from flask_restful import Resource, reqparse
from flask_mail import Message
from utils.authorizations import admin_required
from models.user import UserModel
class Email(Resource):
NO_REPLY = "noreply@codeforpdx.org" # Should this be dwellingly address?
parser = reqparse.RequestParser()
parser.add_argument("user_id", required=True)
parser.add_argument("subject", required=True)
parser.add_argument("body", required=True)
@admin_required
def post(self):
data = Email.parser.parse_args()
user = UserModel.find_by_id(data.user_id)
message = Message(data.subject, sender=Email.NO_REPLY, body=data.body)
message.recipients = [user.email]
current_app.mail.send(message)
return {"message": "Message sent"}
@staticmethod
def send_reset_password_msg(user):
token = user.reset_password_token()
msg = Message(
"Reset password for Dwellingly",
sender=Email.NO_REPLY,
recipients=[user.email],
)
msg.body = render_template("emails/reset_msg.txt", user=user, token=token)
msg.html = render_template("emails/reset_msg.html", user=user, token=token)
current_app.mail.send(msg)
@staticmethod
def send_user_invite_msg(user):
token = user.reset_password_token()
msg = Message(
"Create Your Dwellingly Account",
sender=Email.NO_REPLY,
recipients=[user.email],
)
msg.body = render_template("emails/invite_user_msg.txt", user=user, token=token)
msg.html = render_template(
"emails/invite_user_msg.html", user=user, token=token
)
current_app.mail.send(msg)
| 33.433962
| 88
| 0.667607
|
from flask import current_app, render_template
from flask_restful import Resource, reqparse
from flask_mail import Message
from utils.authorizations import admin_required
from models.user import UserModel
class Email(Resource):
NO_REPLY = "noreply@codeforpdx.org"
parser = reqparse.RequestParser()
parser.add_argument("user_id", required=True)
parser.add_argument("subject", required=True)
parser.add_argument("body", required=True)
@admin_required
def post(self):
data = Email.parser.parse_args()
user = UserModel.find_by_id(data.user_id)
message = Message(data.subject, sender=Email.NO_REPLY, body=data.body)
message.recipients = [user.email]
current_app.mail.send(message)
return {"message": "Message sent"}
@staticmethod
def send_reset_password_msg(user):
token = user.reset_password_token()
msg = Message(
"Reset password for Dwellingly",
sender=Email.NO_REPLY,
recipients=[user.email],
)
msg.body = render_template("emails/reset_msg.txt", user=user, token=token)
msg.html = render_template("emails/reset_msg.html", user=user, token=token)
current_app.mail.send(msg)
@staticmethod
def send_user_invite_msg(user):
token = user.reset_password_token()
msg = Message(
"Create Your Dwellingly Account",
sender=Email.NO_REPLY,
recipients=[user.email],
)
msg.body = render_template("emails/invite_user_msg.txt", user=user, token=token)
msg.html = render_template(
"emails/invite_user_msg.html", user=user, token=token
)
current_app.mail.send(msg)
| true
| true
|
7907ae98067bf7e03d8138b9e05d8239fe876567
| 6,882
|
py
|
Python
|
src/oci/data_integration/models/update_connection_from_amazon_s3.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/data_integration/models/update_connection_from_amazon_s3.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/data_integration/models/update_connection_from_amazon_s3.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .update_connection_details import UpdateConnectionDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateConnectionFromAmazonS3(UpdateConnectionDetails):
"""
The details to update an Amazon s3 connection.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateConnectionFromAmazonS3 object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.UpdateConnectionFromAmazonS3.model_type` attribute
of this class is ``AMAZON_S3_CONNECTION`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param model_type:
The value to assign to the model_type property of this UpdateConnectionFromAmazonS3.
Allowed values for this property are: "ORACLE_ADWC_CONNECTION", "ORACLE_ATP_CONNECTION", "ORACLE_OBJECT_STORAGE_CONNECTION", "ORACLEDB_CONNECTION", "MYSQL_CONNECTION", "GENERIC_JDBC_CONNECTION", "BICC_CONNECTION", "AMAZON_S3_CONNECTION", "BIP_CONNECTION"
:type model_type: str
:param key:
The value to assign to the key property of this UpdateConnectionFromAmazonS3.
:type key: str
:param model_version:
The value to assign to the model_version property of this UpdateConnectionFromAmazonS3.
:type model_version: str
:param parent_ref:
The value to assign to the parent_ref property of this UpdateConnectionFromAmazonS3.
:type parent_ref: oci.data_integration.models.ParentReference
:param name:
The value to assign to the name property of this UpdateConnectionFromAmazonS3.
:type name: str
:param description:
The value to assign to the description property of this UpdateConnectionFromAmazonS3.
:type description: str
:param object_status:
The value to assign to the object_status property of this UpdateConnectionFromAmazonS3.
:type object_status: int
:param object_version:
The value to assign to the object_version property of this UpdateConnectionFromAmazonS3.
:type object_version: int
:param identifier:
The value to assign to the identifier property of this UpdateConnectionFromAmazonS3.
:type identifier: str
:param connection_properties:
The value to assign to the connection_properties property of this UpdateConnectionFromAmazonS3.
:type connection_properties: list[oci.data_integration.models.ConnectionProperty]
:param registry_metadata:
The value to assign to the registry_metadata property of this UpdateConnectionFromAmazonS3.
:type registry_metadata: oci.data_integration.models.RegistryMetadata
:param access_key:
The value to assign to the access_key property of this UpdateConnectionFromAmazonS3.
:type access_key: oci.data_integration.models.SensitiveAttribute
:param secret_key:
The value to assign to the secret_key property of this UpdateConnectionFromAmazonS3.
:type secret_key: oci.data_integration.models.SensitiveAttribute
"""
self.swagger_types = {
'model_type': 'str',
'key': 'str',
'model_version': 'str',
'parent_ref': 'ParentReference',
'name': 'str',
'description': 'str',
'object_status': 'int',
'object_version': 'int',
'identifier': 'str',
'connection_properties': 'list[ConnectionProperty]',
'registry_metadata': 'RegistryMetadata',
'access_key': 'SensitiveAttribute',
'secret_key': 'SensitiveAttribute'
}
self.attribute_map = {
'model_type': 'modelType',
'key': 'key',
'model_version': 'modelVersion',
'parent_ref': 'parentRef',
'name': 'name',
'description': 'description',
'object_status': 'objectStatus',
'object_version': 'objectVersion',
'identifier': 'identifier',
'connection_properties': 'connectionProperties',
'registry_metadata': 'registryMetadata',
'access_key': 'accessKey',
'secret_key': 'secretKey'
}
self._model_type = None
self._key = None
self._model_version = None
self._parent_ref = None
self._name = None
self._description = None
self._object_status = None
self._object_version = None
self._identifier = None
self._connection_properties = None
self._registry_metadata = None
self._access_key = None
self._secret_key = None
self._model_type = 'AMAZON_S3_CONNECTION'
@property
def access_key(self):
"""
Gets the access_key of this UpdateConnectionFromAmazonS3.
:return: The access_key of this UpdateConnectionFromAmazonS3.
:rtype: oci.data_integration.models.SensitiveAttribute
"""
return self._access_key
@access_key.setter
def access_key(self, access_key):
"""
Sets the access_key of this UpdateConnectionFromAmazonS3.
:param access_key: The access_key of this UpdateConnectionFromAmazonS3.
:type: oci.data_integration.models.SensitiveAttribute
"""
self._access_key = access_key
@property
def secret_key(self):
"""
Gets the secret_key of this UpdateConnectionFromAmazonS3.
:return: The secret_key of this UpdateConnectionFromAmazonS3.
:rtype: oci.data_integration.models.SensitiveAttribute
"""
return self._secret_key
@secret_key.setter
def secret_key(self, secret_key):
"""
Sets the secret_key of this UpdateConnectionFromAmazonS3.
:param secret_key: The secret_key of this UpdateConnectionFromAmazonS3.
:type: oci.data_integration.models.SensitiveAttribute
"""
self._secret_key = secret_key
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 39.551724
| 266
| 0.671898
|
from .update_connection_details import UpdateConnectionDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateConnectionFromAmazonS3(UpdateConnectionDetails):
def __init__(self, **kwargs):
self.swagger_types = {
'model_type': 'str',
'key': 'str',
'model_version': 'str',
'parent_ref': 'ParentReference',
'name': 'str',
'description': 'str',
'object_status': 'int',
'object_version': 'int',
'identifier': 'str',
'connection_properties': 'list[ConnectionProperty]',
'registry_metadata': 'RegistryMetadata',
'access_key': 'SensitiveAttribute',
'secret_key': 'SensitiveAttribute'
}
self.attribute_map = {
'model_type': 'modelType',
'key': 'key',
'model_version': 'modelVersion',
'parent_ref': 'parentRef',
'name': 'name',
'description': 'description',
'object_status': 'objectStatus',
'object_version': 'objectVersion',
'identifier': 'identifier',
'connection_properties': 'connectionProperties',
'registry_metadata': 'registryMetadata',
'access_key': 'accessKey',
'secret_key': 'secretKey'
}
self._model_type = None
self._key = None
self._model_version = None
self._parent_ref = None
self._name = None
self._description = None
self._object_status = None
self._object_version = None
self._identifier = None
self._connection_properties = None
self._registry_metadata = None
self._access_key = None
self._secret_key = None
self._model_type = 'AMAZON_S3_CONNECTION'
@property
def access_key(self):
return self._access_key
@access_key.setter
def access_key(self, access_key):
self._access_key = access_key
@property
def secret_key(self):
return self._secret_key
@secret_key.setter
def secret_key(self, secret_key):
self._secret_key = secret_key
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
7907afe8f92afd92f1e31b31284c5a83d00fd630
| 189
|
py
|
Python
|
zhighlighter.py
|
herimonster/zoid
|
0ee0e5dcc8416c82e82801ba42abf979eacf2db5
|
[
"MIT"
] | null | null | null |
zhighlighter.py
|
herimonster/zoid
|
0ee0e5dcc8416c82e82801ba42abf979eacf2db5
|
[
"MIT"
] | null | null | null |
zhighlighter.py
|
herimonster/zoid
|
0ee0e5dcc8416c82e82801ba42abf979eacf2db5
|
[
"MIT"
] | null | null | null |
import zutils
class zhighlighter:
def highlight(self, text):
return [(zutils.CL_FG, zutils.CL_BG, zutils.AT_BLINK if i % 2 == 0 else zutils.AT_NORMAL) for i in range(len(text))] #LOL!
| 31.5
| 124
| 0.719577
|
import zutils
class zhighlighter:
def highlight(self, text):
return [(zutils.CL_FG, zutils.CL_BG, zutils.AT_BLINK if i % 2 == 0 else zutils.AT_NORMAL) for i in range(len(text))]
| true
| true
|
7907b0900f6d0a61e1cec5291cbe8aa3cc11e186
| 2,585
|
py
|
Python
|
python/tests/assert.py
|
mizuki-nana/coreVM
|
1ff863b890329265a86ff46b0fdf7bac8e362f0e
|
[
"MIT"
] | 2
|
2017-02-12T21:59:54.000Z
|
2017-02-13T14:57:48.000Z
|
python/tests/assert.py
|
mizuki-nana/coreVM
|
1ff863b890329265a86ff46b0fdf7bac8e362f0e
|
[
"MIT"
] | null | null | null |
python/tests/assert.py
|
mizuki-nana/coreVM
|
1ff863b890329265a86ff46b0fdf7bac8e362f0e
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
# Copyright (c) 2015 Yanzheng Li
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
## -----------------------------------------------------------------------------
def test_assert_true():
try:
assert True
assert True, 'I want to believe.'
except AssertionError:
print 'This should not happen'
## -----------------------------------------------------------------------------
def test_assert_false():
try:
assert False
except AssertionError:
print 'I cannot believe'
## -----------------------------------------------------------------------------
def test_assert_on_truthy_exprs():
try:
assert 1
assert 1 + 1
assert 3.14 - 3.12
assert not False
except AssertionError:
print 'This should not happen'
## -----------------------------------------------------------------------------
def test_assert_on_falsy_exprs():
try:
assert 0
except AssertionError:
print 'I cannot believe'
try:
assert 0 - 1
except AssertionError:
print 'I cannot believe'
try:
assert not True
except AssertionError:
print 'I cannot believe'
try:
assert 3.12 - 3.14
except AssertionError:
print 'I cannot believe'
## -----------------------------------------------------------------------------
test_assert_true()
test_assert_false()
test_assert_on_truthy_exprs()
test_assert_on_falsy_exprs()
## -----------------------------------------------------------------------------
| 31.52439
| 80
| 0.572534
|
want to believe.'
except AssertionError:
print 'This should not happen'
ror:
print 'I cannot believe'
+ 1
assert 3.14 - 3.12
assert not False
except AssertionError:
print 'This should not happen'
ionError:
print 'I cannot believe'
try:
assert 0 - 1
except AssertionError:
print 'I cannot believe'
try:
assert not True
except AssertionError:
print 'I cannot believe'
try:
assert 3.12 - 3.14
except AssertionError:
print 'I cannot believe'
ert_on_falsy_exprs()
| false
| true
|
7907b1571ae62a26340f878d02efc5e767d62286
| 736
|
py
|
Python
|
chap8/get_numbers.py
|
marble-git/python-laoqi
|
74c4bb5459113e54ce64443e5da5a9c6a3052d6a
|
[
"MIT"
] | null | null | null |
chap8/get_numbers.py
|
marble-git/python-laoqi
|
74c4bb5459113e54ce64443e5da5a9c6a3052d6a
|
[
"MIT"
] | null | null | null |
chap8/get_numbers.py
|
marble-git/python-laoqi
|
74c4bb5459113e54ce64443e5da5a9c6a3052d6a
|
[
"MIT"
] | null | null | null |
#coding:utf-8
'''
filename:get_numbers.py
chap:8
subject:2
conditions:file [data],contains: numbers,annotations,empty line
solution:function get_numbers
'''
import sys
def get_numbers(file):
f = None
numbers = []
try:
with open(file,'rt') as f:
for line in f:
try:
numbers.append(int(line))
except ValueError as e:
print('PASS:this line is not pure number:',e)
except OSError as e:
print('Opening file error:',e)
except BaseException as e:
print('Something is wrong :',e)
return numbers
if __name__ == '__main__':
numbers = get_numbers(sys.argv[1])
print(numbers)
| 21.028571
| 67
| 0.569293
|
import sys
def get_numbers(file):
f = None
numbers = []
try:
with open(file,'rt') as f:
for line in f:
try:
numbers.append(int(line))
except ValueError as e:
print('PASS:this line is not pure number:',e)
except OSError as e:
print('Opening file error:',e)
except BaseException as e:
print('Something is wrong :',e)
return numbers
if __name__ == '__main__':
numbers = get_numbers(sys.argv[1])
print(numbers)
| true
| true
|
7907b41746d4436e14fd11921e68dff38fcd1b71
| 888
|
py
|
Python
|
enaml/widgets/calendar.py
|
pberkes/enaml
|
cbcbee929e3117dfe56c0b06dc2385acc832b0e8
|
[
"BSD-3-Clause-Clear"
] | 11
|
2015-03-14T14:30:51.000Z
|
2022-03-15T13:01:44.000Z
|
enaml/widgets/calendar.py
|
pberkes/enaml
|
cbcbee929e3117dfe56c0b06dc2385acc832b0e8
|
[
"BSD-3-Clause-Clear"
] | 3
|
2015-01-31T11:12:56.000Z
|
2022-03-14T00:53:25.000Z
|
enaml/widgets/calendar.py
|
pberkes/enaml
|
cbcbee929e3117dfe56c0b06dc2385acc832b0e8
|
[
"BSD-3-Clause-Clear"
] | 4
|
2015-01-27T01:56:14.000Z
|
2021-02-23T07:21:20.000Z
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed, ForwardTyped
from .bounded_date import BoundedDate, ProxyBoundedDate
class ProxyCalendar(ProxyBoundedDate):
""" The abstract defintion of a proxy Calendar object.
"""
#: A reference to the Calendar declaration.
declaration = ForwardTyped(lambda: Calendar)
class Calendar(BoundedDate):
""" A bounded date control which edits a Python datetime.date using
a widget which resembles a calendar.
"""
#: A reference to the ProxyCalendar object.
proxy = Typed(ProxyCalendar)
| 31.714286
| 79
| 0.606982
|
from atom.api import Typed, ForwardTyped
from .bounded_date import BoundedDate, ProxyBoundedDate
class ProxyCalendar(ProxyBoundedDate):
declaration = ForwardTyped(lambda: Calendar)
class Calendar(BoundedDate):
proxy = Typed(ProxyCalendar)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.