code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import re
from datetime import datetime
from flask import Blueprint
from jwt import PyJWTError
import configs
from app import derive_import_root, add_url_rules_for_blueprint
from application import exception
from application.model.invitation_code import InvitationCode
from application.model.role import Role
from application.model.scholar_payment_account import ScholarPaymentAccount
from application.model.user import User
from application.model.user_role import UserRole
from application.util import authorization, background_task
from application.util.constant import JwtSub
from application.util.database import session_scope
from application.views.base_api import BaseNeedLoginAPI, ApiResult
class UserAPI(BaseNeedLoginAPI):
methods = ['GET', 'POST', 'PUT']
need_login_methods = ['GET']
def get(self):
uuid = self.get_data('uuid')
if self.valid_data(uuid):
return self.get_user_information(uuid)
return self.get_self_information()
def get_self_information(self):
with session_scope() as session:
user = session.query(User).filter(User.uuid == self.user_uuid).first() # type:User
result = ApiResult('获取个人信息成功', payload={
'uuid': user.uuid,
'username': user.username,
'email': user.email,
'register_date': user.created_at.isoformat(),
'created_at': user.created_at.isoformat(),
'status': user.status
})
return result.to_response()
def get_user_information(self, uuid):
with session_scope() as session:
user = session.query(User).filter(User.uuid == uuid).first() # type:User
payload = {
'uuid': user.uuid,
'username': user.username,
'email': user.email,
'register_date': user.created_at.isoformat(),
'created_at': user.created_at.isoformat(),
'status': user.status
}
if self.user_uuid != uuid:
payload['email'] = ''
payload['status'] = -1
result = ApiResult('获取个人信息成功', payload=payload)
return result.to_response()
def post(self):
re_email = re.compile(r'^[a-z0-9\.\-\_]+\@[a-z0-9\-\_]+(\.[a-z0-9\-\_]+){1,4}$')
username = self.get_post_data('username', require=True, error_message='请输入用户名')
email = self.get_post_data('email', require=True, error_message='请输入邮箱')
if not re_email.match(email):
raise exception.api.InvalidRequest('请输入正确的邮箱')
password = self.get_post_data('password', require=True, error_message='请输入密码')
code = self.get_post_data('invitation_code', require=True, error_message='请输入邀请码')
with session_scope() as session:
user = session.query(User).filter(User.username == username, User.status != User.STATUS.DELETED).first()
if user is not None:
raise exception.api.Conflict('用户名已被注册')
user = session.query(User).filter(
User.email == email, ~User.status.in_([User.STATUS.DELETED, User.STATUS.INACTIVATED])).first()
if user is not None:
return exception.api.Conflict('邮箱已被注册')
invitation_code = session.query(InvitationCode) \
.filter(InvitationCode.code == code).first() # type:InvitationCode
if invitation_code is None:
raise exception.api.NotFound('邀请码不存在')
elif invitation_code.status != 1:
raise exception.api.Conflict('邀请码已被使用')
# 将记录写入user表
hashed_password = authorization.toolkit.hash_plaintext(password)
user = User(username=username.strip(), email=email, password=<PASSWORD>) # type: User
session.add(user)
session.flush()
# 进行角色关联
role = session.query(Role).filter(Role.name == Role.BuiltInRole.REGISTRATION_USER.value.name,
Role.status == Role.Status.VALID.value).first() # type: Role
user_role = UserRole(user_uuid=user.uuid, role_uuid=role.uuid)
session.add(user_role)
# 将记录写入invitation_code表
invitation_code.status = 2
invitation_code.invitee_uuid = user.uuid
invitation_code.invited_at = datetime.now()
# 创建学术积分账户
scholar_payment_account = ScholarPaymentAccount(
user_uuid=user.uuid,
balance=0,
)
session.add(scholar_payment_account)
self.send_activation_email(user)
result = ApiResult('注册成功', status=201, payload={
'jwt': authorization.toolkit.derive_jwt_token(
user_id=user.id, user_uuid=user.uuid
)
})
return result.to_response()
def send_activation_email(self, user: User):
expired_in = 48
extra_payload = {
'sub': 'activation'
}
jwt = authorization.toolkit.derive_jwt_token(
user.id, user.uuid, expired_in, extra_payload
)
if configs.DEBUG:
domain = 'http://localhost:8080'
else:
domain = 'http://www.celerysoft.science'
activate_url = '{}/activation?jwt={}'.format(domain, jwt)
background_task.send_activation_email.delay(user_email=user.email,
username=user.username,
activate_url=activate_url)
def put(self):
jwt = self.get_post_data('jwt')
if self.valid_data(jwt):
return self.validate_email(jwt)
def validate_email(self, jwt):
try:
jwt_dict = authorization.toolkit.decode_jwt_token(jwt) # type:dict
except PyJWTError:
raise exception.api.InvalidRequest('激活链接已过期或者激活请求非法')
if 'sub' not in jwt_dict.keys() or jwt_dict['sub'] != JwtSub.Activation.value:
raise exception.api.InvalidRequest('激活请求非法')
uuid = jwt_dict['uuid']
with session_scope() as session:
user = session.query(User).filter(User.uuid == uuid).first()
if user.status == 1:
raise exception.api.Conflict('邮箱已完成验证,无需重复验证')
user.status = 1
scholar_payment_account = session.query(ScholarPaymentAccount) \
.filter(ScholarPaymentAccount.user_uuid == uuid,
ScholarPaymentAccount.status == ScholarPaymentAccount.STATUS.VALID.value) \
.first() # type: ScholarPaymentAccount
if scholar_payment_account is not None:
scholar_payment_account.balance = configs.NEW_USER_SCHOLAR_BALANCE
jwt_token = authorization.toolkit.derive_jwt_token(
user_id=user.id, user_uuid=uuid
)
result = ApiResult('邮箱验证成功', 201, payload={
'jwt': jwt_token
})
return result.to_response()
view = UserAPI
bp = Blueprint(__name__.split('.')[-1], __name__)
root = derive_import_root(__name__)
add_url_rules_for_blueprint(root, bp) | application/views/user/user.py | import re
from datetime import datetime
from flask import Blueprint
from jwt import PyJWTError
import configs
from app import derive_import_root, add_url_rules_for_blueprint
from application import exception
from application.model.invitation_code import InvitationCode
from application.model.role import Role
from application.model.scholar_payment_account import ScholarPaymentAccount
from application.model.user import User
from application.model.user_role import UserRole
from application.util import authorization, background_task
from application.util.constant import JwtSub
from application.util.database import session_scope
from application.views.base_api import BaseNeedLoginAPI, ApiResult
class UserAPI(BaseNeedLoginAPI):
methods = ['GET', 'POST', 'PUT']
need_login_methods = ['GET']
def get(self):
uuid = self.get_data('uuid')
if self.valid_data(uuid):
return self.get_user_information(uuid)
return self.get_self_information()
def get_self_information(self):
with session_scope() as session:
user = session.query(User).filter(User.uuid == self.user_uuid).first() # type:User
result = ApiResult('获取个人信息成功', payload={
'uuid': user.uuid,
'username': user.username,
'email': user.email,
'register_date': user.created_at.isoformat(),
'created_at': user.created_at.isoformat(),
'status': user.status
})
return result.to_response()
def get_user_information(self, uuid):
with session_scope() as session:
user = session.query(User).filter(User.uuid == uuid).first() # type:User
payload = {
'uuid': user.uuid,
'username': user.username,
'email': user.email,
'register_date': user.created_at.isoformat(),
'created_at': user.created_at.isoformat(),
'status': user.status
}
if self.user_uuid != uuid:
payload['email'] = ''
payload['status'] = -1
result = ApiResult('获取个人信息成功', payload=payload)
return result.to_response()
def post(self):
re_email = re.compile(r'^[a-z0-9\.\-\_]+\@[a-z0-9\-\_]+(\.[a-z0-9\-\_]+){1,4}$')
username = self.get_post_data('username', require=True, error_message='请输入用户名')
email = self.get_post_data('email', require=True, error_message='请输入邮箱')
if not re_email.match(email):
raise exception.api.InvalidRequest('请输入正确的邮箱')
password = self.get_post_data('password', require=True, error_message='请输入密码')
code = self.get_post_data('invitation_code', require=True, error_message='请输入邀请码')
with session_scope() as session:
user = session.query(User).filter(User.username == username, User.status != User.STATUS.DELETED).first()
if user is not None:
raise exception.api.Conflict('用户名已被注册')
user = session.query(User).filter(
User.email == email, ~User.status.in_([User.STATUS.DELETED, User.STATUS.INACTIVATED])).first()
if user is not None:
return exception.api.Conflict('邮箱已被注册')
invitation_code = session.query(InvitationCode) \
.filter(InvitationCode.code == code).first() # type:InvitationCode
if invitation_code is None:
raise exception.api.NotFound('邀请码不存在')
elif invitation_code.status != 1:
raise exception.api.Conflict('邀请码已被使用')
# 将记录写入user表
hashed_password = authorization.toolkit.hash_plaintext(password)
user = User(username=username.strip(), email=email, password=<PASSWORD>) # type: User
session.add(user)
session.flush()
# 进行角色关联
role = session.query(Role).filter(Role.name == Role.BuiltInRole.REGISTRATION_USER.value.name,
Role.status == Role.Status.VALID.value).first() # type: Role
user_role = UserRole(user_uuid=user.uuid, role_uuid=role.uuid)
session.add(user_role)
# 将记录写入invitation_code表
invitation_code.status = 2
invitation_code.invitee_uuid = user.uuid
invitation_code.invited_at = datetime.now()
# 创建学术积分账户
scholar_payment_account = ScholarPaymentAccount(
user_uuid=user.uuid,
balance=0,
)
session.add(scholar_payment_account)
self.send_activation_email(user)
result = ApiResult('注册成功', status=201, payload={
'jwt': authorization.toolkit.derive_jwt_token(
user_id=user.id, user_uuid=user.uuid
)
})
return result.to_response()
def send_activation_email(self, user: User):
expired_in = 48
extra_payload = {
'sub': 'activation'
}
jwt = authorization.toolkit.derive_jwt_token(
user.id, user.uuid, expired_in, extra_payload
)
if configs.DEBUG:
domain = 'http://localhost:8080'
else:
domain = 'http://www.celerysoft.science'
activate_url = '{}/activation?jwt={}'.format(domain, jwt)
background_task.send_activation_email.delay(user_email=user.email,
username=user.username,
activate_url=activate_url)
def put(self):
jwt = self.get_post_data('jwt')
if self.valid_data(jwt):
return self.validate_email(jwt)
def validate_email(self, jwt):
try:
jwt_dict = authorization.toolkit.decode_jwt_token(jwt) # type:dict
except PyJWTError:
raise exception.api.InvalidRequest('激活链接已过期或者激活请求非法')
if 'sub' not in jwt_dict.keys() or jwt_dict['sub'] != JwtSub.Activation.value:
raise exception.api.InvalidRequest('激活请求非法')
uuid = jwt_dict['uuid']
with session_scope() as session:
user = session.query(User).filter(User.uuid == uuid).first()
if user.status == 1:
raise exception.api.Conflict('邮箱已完成验证,无需重复验证')
user.status = 1
scholar_payment_account = session.query(ScholarPaymentAccount) \
.filter(ScholarPaymentAccount.user_uuid == uuid,
ScholarPaymentAccount.status == ScholarPaymentAccount.STATUS.VALID.value) \
.first() # type: ScholarPaymentAccount
if scholar_payment_account is not None:
scholar_payment_account.balance = configs.NEW_USER_SCHOLAR_BALANCE
jwt_token = authorization.toolkit.derive_jwt_token(
user_id=user.id, user_uuid=uuid
)
result = ApiResult('邮箱验证成功', 201, payload={
'jwt': jwt_token
})
return result.to_response()
view = UserAPI
bp = Blueprint(__name__.split('.')[-1], __name__)
root = derive_import_root(__name__)
add_url_rules_for_blueprint(root, bp) | 0.284874 | 0.065815 |
import os
from datetime import datetime
import logging
import pandas as pd
import pandas.errors as pandas_errors
import pytz
import settings
logger = logging.getLogger(__name__)
def find_csv(csv_filename_prefix: str, target_dir: str = None) -> str:
"""As we specify only the airodump output file prefix, this helper function finds the whole filename."""
if target_dir is None:
target_dir = os.getcwd()
files_in_directory = os.listdir(target_dir)
files_in_directory.sort(reverse=True)
for file in files_in_directory:
if file.endswith("csv") and file.startswith(csv_filename_prefix):
return os.path.join(target_dir, file)
logger.warning(
"%s WARNING: No CSV file found in %s with prefix %s"
% (settings.TERM_LBL, target_dir, settings.AIRODUMP_FILE_PREFIX)
)
def parse_airomon_datetime(airomon_dt: str) -> datetime:
"""Parse string used by airomon and also make timezone aware."""
aileen_tz = pytz.timezone(settings.TIME_ZONE)
try:
dt: datetime = datetime.strptime(airomon_dt, "%Y-%m-%d %H:%M:%S")
dt = dt.astimezone(aileen_tz)
except ValueError:
print(
"%s Warning: could not parse datetime %s, using 1-1-1970 for this one!"
% (settings.TERM_LBL, airomon_dt)
)
dt = datetime(1970, 1, 1, 1, 1, 1, tzinfo=aileen_tz)
return dt
def get_device_data_from_csv_file(csv_filename: str, min_power: int) -> pd.DataFrame:
"""Read in the data frame and use only the columns which contain device info"""
try:
df = pd.read_csv(csv_filename, header=None, usecols=range(0, 6))
except (pandas_errors.EmptyDataError, ValueError):
print(
"%s WARNING: No data in airomon file %s or file not found"
% (settings.TERM_LBL, csv_filename)
)
return pd.DataFrame(
columns=[
"device_id",
"time_seen",
"total_packets",
"access_point_id",
"device_power",
]
)
# find the row with which starts the device info
header_row = df.loc[df[0] == "Station MAC"]
# get the index of that row
header_row_index = header_row.index[0]
# delete all the information about the device stuff
df = df[header_row_index:]
# rename the columns so the have device headers
df = df.rename(columns=df.iloc[0]).drop(df.index[0])
# remove white spaces from column headers
df.rename(columns=lambda x: x.strip(), inplace=True)
# drop the unnecessary info
df.drop("First time seen", 1, inplace=True)
df.rename(
columns={
"Station MAC": "device_id",
"Last time seen": "time_seen",
"# packets": "total_packets",
"BSSID": "access_point_id",
"Power": "device_power",
},
inplace=True,
)
# remove all blank white space, do custom operations like hashing and parsing dates and floats
df["device_id"] = df["device_id"].map(lambda x: str(x).strip())
df["time_seen"] = df["time_seen"].map(
lambda x: parse_airomon_datetime(str(x).strip())
)
df["total_packets"] = df["total_packets"].map(lambda x: str(x).strip())
df["access_point_id"] = df["access_point_id"].map(lambda x: str(x).strip())
df.device_power = df.device_power.astype(float)
# debug specific devices, if configured
for d_name, d_mac in settings.DEBUG_DEVICES.items():
if d_mac in df.device_id.values:
pd_row = df.loc[df.device_id == d_mac]
last_seen = (
pd_row.time_seen.values[0]
.astype("M8[ms]")
.astype("O")
.replace(tzinfo=pytz.timezone(settings.TIME_ZONE))
)
last_seen_seconds_ago = (
datetime.now(pytz.timezone(settings.TIME_ZONE)) - last_seen
).seconds
logger.info(
"%s %s signal: %s, last seen: %s seconds ago"
% (
settings.TERM_LBL,
d_name,
pd_row.device_power.values[0],
last_seen_seconds_ago,
)
)
# filter out events with too weak signal
df_signal = df[df["device_power"] >= min_power]
logger.info(
"%s %d events (out of %d) had a signal weaker than the minimum power (%d )"
% (
settings.TERM_LBL,
len(df.index) - len(df_signal.index),
len(df.index),
min_power,
)
)
return df_signal
def read_airodump_csv_and_return_df(
airodump_dir: str, csv_filename_prefix: str, min_power: int
):
airodump_csv = find_csv(csv_filename_prefix, target_dir=airodump_dir)
df = get_device_data_from_csv_file(airodump_csv, min_power)
return df
if __name__ == "__main__":
print("main watching airodump (use for testing only)")
print(
read_airodump_csv_and_return_df(
"/tmp/aileen_client_detection_data/", "full_airodump_file", 5
)
) | airo_tasks/watch_airodump_csv.py | import os
from datetime import datetime
import logging
import pandas as pd
import pandas.errors as pandas_errors
import pytz
import settings
logger = logging.getLogger(__name__)
def find_csv(csv_filename_prefix: str, target_dir: str = None) -> str:
"""As we specify only the airodump output file prefix, this helper function finds the whole filename."""
if target_dir is None:
target_dir = os.getcwd()
files_in_directory = os.listdir(target_dir)
files_in_directory.sort(reverse=True)
for file in files_in_directory:
if file.endswith("csv") and file.startswith(csv_filename_prefix):
return os.path.join(target_dir, file)
logger.warning(
"%s WARNING: No CSV file found in %s with prefix %s"
% (settings.TERM_LBL, target_dir, settings.AIRODUMP_FILE_PREFIX)
)
def parse_airomon_datetime(airomon_dt: str) -> datetime:
"""Parse string used by airomon and also make timezone aware."""
aileen_tz = pytz.timezone(settings.TIME_ZONE)
try:
dt: datetime = datetime.strptime(airomon_dt, "%Y-%m-%d %H:%M:%S")
dt = dt.astimezone(aileen_tz)
except ValueError:
print(
"%s Warning: could not parse datetime %s, using 1-1-1970 for this one!"
% (settings.TERM_LBL, airomon_dt)
)
dt = datetime(1970, 1, 1, 1, 1, 1, tzinfo=aileen_tz)
return dt
def get_device_data_from_csv_file(csv_filename: str, min_power: int) -> pd.DataFrame:
"""Read in the data frame and use only the columns which contain device info"""
try:
df = pd.read_csv(csv_filename, header=None, usecols=range(0, 6))
except (pandas_errors.EmptyDataError, ValueError):
print(
"%s WARNING: No data in airomon file %s or file not found"
% (settings.TERM_LBL, csv_filename)
)
return pd.DataFrame(
columns=[
"device_id",
"time_seen",
"total_packets",
"access_point_id",
"device_power",
]
)
# find the row with which starts the device info
header_row = df.loc[df[0] == "Station MAC"]
# get the index of that row
header_row_index = header_row.index[0]
# delete all the information about the device stuff
df = df[header_row_index:]
# rename the columns so the have device headers
df = df.rename(columns=df.iloc[0]).drop(df.index[0])
# remove white spaces from column headers
df.rename(columns=lambda x: x.strip(), inplace=True)
# drop the unnecessary info
df.drop("First time seen", 1, inplace=True)
df.rename(
columns={
"Station MAC": "device_id",
"Last time seen": "time_seen",
"# packets": "total_packets",
"BSSID": "access_point_id",
"Power": "device_power",
},
inplace=True,
)
# remove all blank white space, do custom operations like hashing and parsing dates and floats
df["device_id"] = df["device_id"].map(lambda x: str(x).strip())
df["time_seen"] = df["time_seen"].map(
lambda x: parse_airomon_datetime(str(x).strip())
)
df["total_packets"] = df["total_packets"].map(lambda x: str(x).strip())
df["access_point_id"] = df["access_point_id"].map(lambda x: str(x).strip())
df.device_power = df.device_power.astype(float)
# debug specific devices, if configured
for d_name, d_mac in settings.DEBUG_DEVICES.items():
if d_mac in df.device_id.values:
pd_row = df.loc[df.device_id == d_mac]
last_seen = (
pd_row.time_seen.values[0]
.astype("M8[ms]")
.astype("O")
.replace(tzinfo=pytz.timezone(settings.TIME_ZONE))
)
last_seen_seconds_ago = (
datetime.now(pytz.timezone(settings.TIME_ZONE)) - last_seen
).seconds
logger.info(
"%s %s signal: %s, last seen: %s seconds ago"
% (
settings.TERM_LBL,
d_name,
pd_row.device_power.values[0],
last_seen_seconds_ago,
)
)
# filter out events with too weak signal
df_signal = df[df["device_power"] >= min_power]
logger.info(
"%s %d events (out of %d) had a signal weaker than the minimum power (%d )"
% (
settings.TERM_LBL,
len(df.index) - len(df_signal.index),
len(df.index),
min_power,
)
)
return df_signal
def read_airodump_csv_and_return_df(
airodump_dir: str, csv_filename_prefix: str, min_power: int
):
airodump_csv = find_csv(csv_filename_prefix, target_dir=airodump_dir)
df = get_device_data_from_csv_file(airodump_csv, min_power)
return df
if __name__ == "__main__":
print("main watching airodump (use for testing only)")
print(
read_airodump_csv_and_return_df(
"/tmp/aileen_client_detection_data/", "full_airodump_file", 5
)
) | 0.497803 | 0.252303 |
import argparse
import numpy as np
import numpy.linalg as sla
def op_selectTopR(vct_input, R):
"""
Returns the Rth greatest elements indices
in input vector and store them in idxs_n.
Here, we're using this function instead of
a complete sorting one, where it's more efficient
than complete sorting function in real big data application
parameters
----------
vct_input : array, shape (T)
indicating the input vector which is a
vector we aimed to find the Rth greatest
elements. After finding those elements we
will store the indices of those specific
elements in output vector.
R : integer
indicates Rth greatest elemnts which we
are seeking for.
Returns
-------
idxs_n : array, shape (R)
a vector in which the Rth greatest elements
indices will be stored and returned as major
output of the function.
"""
R = int(R)
temp = np.argpartition(-vct_input, R)
idxs_n = temp[:R]
return idxs_n
def op_getResidual(S, u, v, idxs_n):
"""
Returns the new S matrix by calculating :
S =( S - uv )
Here the product operation between u and v
is an outer product operation.
parameters
----------
S : array, shape (T, P)
The input matrix ( befor we stored the input
file in this matrix at the main module of program)
Here, we need to update this matrix for next iteration.
u : array, shape (T)
indicating 'u_new' vector (new vector
of dictionary elements which will be used
for updating the S matrix)
v : array, shape (P)
indicating 'v' vector ( which would be
finally our output vector but here we are using
this vector for updating S matrix by applying
outer product of specific elements of v
and u_new )
idxs_n : array, shape (R)
which is a vector encompassing Rth
greatest elements indices.
Returns
-------
S : array, shape (T, P)
new S matrix based on above mentioned equation
(updating S matrix for next iteration)
"""
v_sparse = np.zeros(v.shape[0], dtype = np.float)
v_sparse[idxs_n] = v[idxs_n]
S = S - np.outer(u, v_sparse)
return S
def r1dl(S, nonzero, atoms, epsilon, seed = -1):
"""
R1DL dictionary method.
Parameters
----------
S : array, shape (T, P)
Input data: P instances, T features.
nonzero : float
Sparsity of the resulting dictionary (percentage of nonzero elements).
atoms : integer
Number of atoms in the resulting dictionary.
epsilon : float
Convergence epsilon in determining each dictionary atom.
seed : integer
Optional random seed for debugging. Set to -1 to disable (default: -1).
Returns
-------
D : array, shape (M, T)
Dictionary atoms.
Z : array, shape (M, P)
Loading matrix.
"""
T, P = S.shape
max_iteration = P * 10
R = float(nonzero * P)
# Normalize the data.
S -= S.mean(axis = 0)
S /= sla.norm(S, axis = 0)
# Generate the atom vectors.
u_old = np.zeros(T, dtype = np.float)
u_new = np.zeros(T, dtype = np.float)
v = np.zeros(P, dtype = np.float)
Z = np.zeros((atoms, P), dtype = np.float)
D = np.zeros((atoms, T), dtype = np.float)
idxs_n = np.zeros(int(R), dtype = np.int)
# Set a random seed?
if seed > -1:
np.random.seed(seed)
epsilon *= epsilon
for m in range(atoms):
it = 0
u_old = np.random.random(T)
u_old -= u_old.mean()
u_old /= sla.norm(u_old, axis = 0)
while True:
v = np.dot(u_old, S)
# Zero out all elements of v NOT in the top-R. This is how
# sparsity in the final results is explicitly enforced.
idxs_n = op_selectTopR(v, R)
temp_v = np.zeros(v.shape)
temp_v[idxs_n] = v[idxs_n]
v = temp_v
u_new = np.dot(S[:, idxs_n], v[idxs_n])
u_new /= sla.norm(u_new, axis = 0)
diff = sla.norm(u_old - u_new)
if (diff < epsilon):
break
it += 1
if (it > max_iteration):
print('WARNING: Max iteration reached; result may be unstable!\n')
break
# Copying the new vector on old one
u_old = u_new
S = op_getResidual(S, u_new, v, idxs_n)
# totoalResidual = np.sum(S ** 2)
Z[m, :] = v
D[m, :] = u_new
# All done!
return [D, Z]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Python Dictionary Learning',
add_help = 'How to use', prog = 'python R1DL.py <args>')
# Input arguments.
parser.add_argument("-i", "--input", required = True,
help = "Input filename containing matrix S.")
parser.add_argument("-r", "--pnonzero", type = float, required = True,
help = "Percentage of non-zero elements.")
parser.add_argument("-m", "--mDicatom", type = int, required = True,
help = "Number of the dictionary atoms.")
parser.add_argument("-e", "--epsilon", type = float, required = True,
help = "The value of epsilon.")
# Optional, debugging arguments.
parser.add_argument("-s", "--seed", type = float, default = -1,
help = "The random seed used to replicate results. [DEFAULT: -1]")
# Output arguments.
parser.add_argument("-d", "--dictionary", required = True,
help = "Dictionary (D) output file.")
parser.add_argument("-z", "--zmatrix", required = True,
help = "Loading matrix (Z) output file.")
args = vars(parser.parse_args())
# Parse out the command-line arguments.
M = args['mDicatom']
R = args['pnonzero']
epsilon = args['epsilon']
file_s = args['input']
file_D = args['dictionary']
file_Z = args['zmatrix']
# Read the inputs and generate variables to pass to R1DL.
S = np.loadtxt(file_s)
D, Z = r1dl(S, R, M, epsilon, args['seed'])
# Write the output to files.
np.savetxt(file_D, D, fmt = '%.5lf\t')
np.savetxt(file_Z, Z, fmt = '%.5lf\t') | core_numpy.py | import argparse
import numpy as np
import numpy.linalg as sla
def op_selectTopR(vct_input, R):
"""
Returns the Rth greatest elements indices
in input vector and store them in idxs_n.
Here, we're using this function instead of
a complete sorting one, where it's more efficient
than complete sorting function in real big data application
parameters
----------
vct_input : array, shape (T)
indicating the input vector which is a
vector we aimed to find the Rth greatest
elements. After finding those elements we
will store the indices of those specific
elements in output vector.
R : integer
indicates Rth greatest elemnts which we
are seeking for.
Returns
-------
idxs_n : array, shape (R)
a vector in which the Rth greatest elements
indices will be stored and returned as major
output of the function.
"""
R = int(R)
temp = np.argpartition(-vct_input, R)
idxs_n = temp[:R]
return idxs_n
def op_getResidual(S, u, v, idxs_n):
"""
Returns the new S matrix by calculating :
S =( S - uv )
Here the product operation between u and v
is an outer product operation.
parameters
----------
S : array, shape (T, P)
The input matrix ( befor we stored the input
file in this matrix at the main module of program)
Here, we need to update this matrix for next iteration.
u : array, shape (T)
indicating 'u_new' vector (new vector
of dictionary elements which will be used
for updating the S matrix)
v : array, shape (P)
indicating 'v' vector ( which would be
finally our output vector but here we are using
this vector for updating S matrix by applying
outer product of specific elements of v
and u_new )
idxs_n : array, shape (R)
which is a vector encompassing Rth
greatest elements indices.
Returns
-------
S : array, shape (T, P)
new S matrix based on above mentioned equation
(updating S matrix for next iteration)
"""
v_sparse = np.zeros(v.shape[0], dtype = np.float)
v_sparse[idxs_n] = v[idxs_n]
S = S - np.outer(u, v_sparse)
return S
def r1dl(S, nonzero, atoms, epsilon, seed = -1):
"""
R1DL dictionary method.
Parameters
----------
S : array, shape (T, P)
Input data: P instances, T features.
nonzero : float
Sparsity of the resulting dictionary (percentage of nonzero elements).
atoms : integer
Number of atoms in the resulting dictionary.
epsilon : float
Convergence epsilon in determining each dictionary atom.
seed : integer
Optional random seed for debugging. Set to -1 to disable (default: -1).
Returns
-------
D : array, shape (M, T)
Dictionary atoms.
Z : array, shape (M, P)
Loading matrix.
"""
T, P = S.shape
max_iteration = P * 10
R = float(nonzero * P)
# Normalize the data.
S -= S.mean(axis = 0)
S /= sla.norm(S, axis = 0)
# Generate the atom vectors.
u_old = np.zeros(T, dtype = np.float)
u_new = np.zeros(T, dtype = np.float)
v = np.zeros(P, dtype = np.float)
Z = np.zeros((atoms, P), dtype = np.float)
D = np.zeros((atoms, T), dtype = np.float)
idxs_n = np.zeros(int(R), dtype = np.int)
# Set a random seed?
if seed > -1:
np.random.seed(seed)
epsilon *= epsilon
for m in range(atoms):
it = 0
u_old = np.random.random(T)
u_old -= u_old.mean()
u_old /= sla.norm(u_old, axis = 0)
while True:
v = np.dot(u_old, S)
# Zero out all elements of v NOT in the top-R. This is how
# sparsity in the final results is explicitly enforced.
idxs_n = op_selectTopR(v, R)
temp_v = np.zeros(v.shape)
temp_v[idxs_n] = v[idxs_n]
v = temp_v
u_new = np.dot(S[:, idxs_n], v[idxs_n])
u_new /= sla.norm(u_new, axis = 0)
diff = sla.norm(u_old - u_new)
if (diff < epsilon):
break
it += 1
if (it > max_iteration):
print('WARNING: Max iteration reached; result may be unstable!\n')
break
# Copying the new vector on old one
u_old = u_new
S = op_getResidual(S, u_new, v, idxs_n)
# totoalResidual = np.sum(S ** 2)
Z[m, :] = v
D[m, :] = u_new
# All done!
return [D, Z]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Python Dictionary Learning',
add_help = 'How to use', prog = 'python R1DL.py <args>')
# Input arguments.
parser.add_argument("-i", "--input", required = True,
help = "Input filename containing matrix S.")
parser.add_argument("-r", "--pnonzero", type = float, required = True,
help = "Percentage of non-zero elements.")
parser.add_argument("-m", "--mDicatom", type = int, required = True,
help = "Number of the dictionary atoms.")
parser.add_argument("-e", "--epsilon", type = float, required = True,
help = "The value of epsilon.")
# Optional, debugging arguments.
parser.add_argument("-s", "--seed", type = float, default = -1,
help = "The random seed used to replicate results. [DEFAULT: -1]")
# Output arguments.
parser.add_argument("-d", "--dictionary", required = True,
help = "Dictionary (D) output file.")
parser.add_argument("-z", "--zmatrix", required = True,
help = "Loading matrix (Z) output file.")
args = vars(parser.parse_args())
# Parse out the command-line arguments.
M = args['mDicatom']
R = args['pnonzero']
epsilon = args['epsilon']
file_s = args['input']
file_D = args['dictionary']
file_Z = args['zmatrix']
# Read the inputs and generate variables to pass to R1DL.
S = np.loadtxt(file_s)
D, Z = r1dl(S, R, M, epsilon, args['seed'])
# Write the output to files.
np.savetxt(file_D, D, fmt = '%.5lf\t')
np.savetxt(file_Z, Z, fmt = '%.5lf\t') | 0.722527 | 0.649579 |
import re, pywikibot, os
from bs4 import BeautifulSoup, Tag
#os.chdir(r'projects/cee')
site = pywikibot.Site("lv", "wikipedia")
apiq = '''{
"action": "parse",
"format": "json",
"page": "Diāna Hadžijeva",
"prop": "text|langlinks|categories|links|templates|images|externallinks|sections|revid|displaytitle|iwlinks|properties|parsewarnings"
}'''
def get_prose(apiresult):
#http://stackoverflow.com/questions/40052116/how-to-remove-html-tags-in-beautifulsoup-when-i-have-contents
#http://stackoverflow.com/questions/18453176/removing-all-html-tags-along-with-their-content-from-text
soup = BeautifulSoup(apiresult, "html.parser")
for tag in soup.find_all('table'):
tag.replaceWith('')
for tag in soup.find_all('span',{'class':'noexcerpt'}):
tag.replaceWith('')
for tag in soup.find_all('span',{'class':'mw-editsection'}):
tag.replaceWith('')
for tag in soup.find_all('ol',{'class':'references'}):
tag.replaceWith('')
for tag in soup.find_all('h2'):
tag.replaceWith('')
for tag in soup.find_all('h3'):
tag.replaceWith('')
for tag in soup.find_all('h4'):
tag.replaceWith('')
#
thistext = str(soup.get_text()).strip('\s\n')
thistext = thistext.replace('\n','')
#pywikibot.output('---{}-----'.format(thistext))
#print(len(thistext))
return len(thistext)
def do_api(article):
r = pywikibot.data.api.Request(site=site, action='parse', format='json',
page=article, prop='text').submit()
json_data = r['parse']['text']['*']
#itemlist = [blah['title'] for blah in json_data]
return json_data#[0] if len(itemlist)>0 else False
#
def main():
fileop = eval(open('cee2dfgfdfgdfgdfgdgdfgd-2-final.txt', 'r', encoding='utf-8').read())#fileop = eval(open('ceeraksti-prose.txt', 'r', encoding='utf-8').read())
fileop2 = open('ceeraksti-prose2.txt', 'w', encoding='utf-8')
alreadyhave = {}#eval(open('ceeraksti-prose22.txt', 'r', encoding='utf-8').read())
fileop22 = open('ceeraksti-prose22.txt', 'w', encoding='utf-8')
articles = [f[0] for f in fileop if f[0] not in alreadyhave]
print(len(articles))
gdfdf = {}
gdfdf2 = {}
gdfdf2.update(alreadyhave)
for article in articles:
#pywikibot.output(article)
apires = do_api(article)
gdfdf.update({article:apires})
reslen = get_prose(apires)
gdfdf2.update({article:reslen})
#print(reslen)
fileop2.write(str(gdfdf))
fileop22.write(str(gdfdf2))
print('done')
#
main() | prose2.py | import re, pywikibot, os
from bs4 import BeautifulSoup, Tag
#os.chdir(r'projects/cee')
site = pywikibot.Site("lv", "wikipedia")
apiq = '''{
"action": "parse",
"format": "json",
"page": "Diāna Hadžijeva",
"prop": "text|langlinks|categories|links|templates|images|externallinks|sections|revid|displaytitle|iwlinks|properties|parsewarnings"
}'''
def get_prose(apiresult):
#http://stackoverflow.com/questions/40052116/how-to-remove-html-tags-in-beautifulsoup-when-i-have-contents
#http://stackoverflow.com/questions/18453176/removing-all-html-tags-along-with-their-content-from-text
soup = BeautifulSoup(apiresult, "html.parser")
for tag in soup.find_all('table'):
tag.replaceWith('')
for tag in soup.find_all('span',{'class':'noexcerpt'}):
tag.replaceWith('')
for tag in soup.find_all('span',{'class':'mw-editsection'}):
tag.replaceWith('')
for tag in soup.find_all('ol',{'class':'references'}):
tag.replaceWith('')
for tag in soup.find_all('h2'):
tag.replaceWith('')
for tag in soup.find_all('h3'):
tag.replaceWith('')
for tag in soup.find_all('h4'):
tag.replaceWith('')
#
thistext = str(soup.get_text()).strip('\s\n')
thistext = thistext.replace('\n','')
#pywikibot.output('---{}-----'.format(thistext))
#print(len(thistext))
return len(thistext)
def do_api(article):
r = pywikibot.data.api.Request(site=site, action='parse', format='json',
page=article, prop='text').submit()
json_data = r['parse']['text']['*']
#itemlist = [blah['title'] for blah in json_data]
return json_data#[0] if len(itemlist)>0 else False
#
def main():
fileop = eval(open('cee2dfgfdfgdfgdfgdgdfgd-2-final.txt', 'r', encoding='utf-8').read())#fileop = eval(open('ceeraksti-prose.txt', 'r', encoding='utf-8').read())
fileop2 = open('ceeraksti-prose2.txt', 'w', encoding='utf-8')
alreadyhave = {}#eval(open('ceeraksti-prose22.txt', 'r', encoding='utf-8').read())
fileop22 = open('ceeraksti-prose22.txt', 'w', encoding='utf-8')
articles = [f[0] for f in fileop if f[0] not in alreadyhave]
print(len(articles))
gdfdf = {}
gdfdf2 = {}
gdfdf2.update(alreadyhave)
for article in articles:
#pywikibot.output(article)
apires = do_api(article)
gdfdf.update({article:apires})
reslen = get_prose(apires)
gdfdf2.update({article:reslen})
#print(reslen)
fileop2.write(str(gdfdf))
fileop22.write(str(gdfdf2))
print('done')
#
main() | 0.053151 | 0.077938 |
from datetime import datetime
import rdkit
from tckdb.backend.app.models.np_species import NonPhysicalSpecies
timestamp = datetime.timestamp(datetime.utcnow())
formaldehyde_xyz = {'symbols': ('C', 'O', 'H', 'H'),
'isotopes': (12, 16, 1, 1),
'coords': ((-0.0122240982, 0.0001804054, -0.00162116),
(1.2016481968, -0.0177341701, 0.1593624097),
(-0.5971643978, 0.9327281670, 0.0424401022),
(-0.5922597008, -0.9151744023, -0.2001813507))}
formaldehyde_adj = """1 C u0 p0 c0 {2,D} {3,S} {4,S}
2 O u0 p2 c0 {1,D}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}"""
def test_non_physical_species_model():
"""Test creating an instance of NonPhysicalSpecies"""
np_species_1 = NonPhysicalSpecies(label='formaldehyde',
timestamp=timestamp,
reviewed=False,
approved=False,
smiles='C=O',
inchi='InChI=1S/CH2O/c1-2/h1H2',
inchi_key=rdkit.Chem.inchi.InchiToInchiKey('InChI=1S/CH2O/c1-2/h1H2'),
charge=0,
multiplicity=1,
electronic_state='X',
coordinates=formaldehyde_xyz,
graph=formaldehyde_adj,
conformation_method='CCCBDB',
is_well=True,
is_global_min=True,
is_ts=False,
opt_path='path_opt',
freq_path='path_freq',
sp_path='path_sp',
extras={'reason': 'testing extras'},
)
assert np_species_1.label == 'formaldehyde'
assert np_species_1.timestamp == timestamp
assert np_species_1.retracted is None
assert np_species_1.reviewed is False
assert np_species_1.approved is False
assert np_species_1.reviewer_flags is None
assert np_species_1.smiles == 'C=O'
assert np_species_1.inchi == 'InChI=1S/CH2O/c1-2/h1H2'
assert np_species_1.inchi_key == '<KEY>'
assert np_species_1.charge == 0
assert np_species_1.multiplicity == 1
assert np_species_1.electronic_state == 'X'
assert np_species_1.coordinates == formaldehyde_xyz
assert np_species_1.graph == formaldehyde_adj
assert np_species_1.fragments is None
assert np_species_1.fragment_orientation is None
assert np_species_1.conformation_method == 'CCCBDB'
assert np_species_1.is_well is True
assert np_species_1.is_global_min is True
assert np_species_1.global_min_geometry is None
assert np_species_1.is_ts is False
assert np_species_1.irc_trajectories is None
assert np_species_1.opt_path == 'path_opt'
assert np_species_1.freq_path == 'path_freq'
assert np_species_1.scan_paths is None
assert np_species_1.irc_paths is None
assert np_species_1.sp_path == 'path_sp'
assert np_species_1.unconverged_jobs is None
assert np_species_1.extras == {'reason': 'testing extras'}
assert str(np_species_1) == '<NonPhysicalSpecies(id=None, label=formaldehyde, smiles=C=O)>' | tckdb/backend/app/tests/models/test_np_species.py | from datetime import datetime
import rdkit
from tckdb.backend.app.models.np_species import NonPhysicalSpecies
timestamp = datetime.timestamp(datetime.utcnow())
formaldehyde_xyz = {'symbols': ('C', 'O', 'H', 'H'),
'isotopes': (12, 16, 1, 1),
'coords': ((-0.0122240982, 0.0001804054, -0.00162116),
(1.2016481968, -0.0177341701, 0.1593624097),
(-0.5971643978, 0.9327281670, 0.0424401022),
(-0.5922597008, -0.9151744023, -0.2001813507))}
formaldehyde_adj = """1 C u0 p0 c0 {2,D} {3,S} {4,S}
2 O u0 p2 c0 {1,D}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}"""
def test_non_physical_species_model():
"""Test creating an instance of NonPhysicalSpecies"""
np_species_1 = NonPhysicalSpecies(label='formaldehyde',
timestamp=timestamp,
reviewed=False,
approved=False,
smiles='C=O',
inchi='InChI=1S/CH2O/c1-2/h1H2',
inchi_key=rdkit.Chem.inchi.InchiToInchiKey('InChI=1S/CH2O/c1-2/h1H2'),
charge=0,
multiplicity=1,
electronic_state='X',
coordinates=formaldehyde_xyz,
graph=formaldehyde_adj,
conformation_method='CCCBDB',
is_well=True,
is_global_min=True,
is_ts=False,
opt_path='path_opt',
freq_path='path_freq',
sp_path='path_sp',
extras={'reason': 'testing extras'},
)
assert np_species_1.label == 'formaldehyde'
assert np_species_1.timestamp == timestamp
assert np_species_1.retracted is None
assert np_species_1.reviewed is False
assert np_species_1.approved is False
assert np_species_1.reviewer_flags is None
assert np_species_1.smiles == 'C=O'
assert np_species_1.inchi == 'InChI=1S/CH2O/c1-2/h1H2'
assert np_species_1.inchi_key == '<KEY>'
assert np_species_1.charge == 0
assert np_species_1.multiplicity == 1
assert np_species_1.electronic_state == 'X'
assert np_species_1.coordinates == formaldehyde_xyz
assert np_species_1.graph == formaldehyde_adj
assert np_species_1.fragments is None
assert np_species_1.fragment_orientation is None
assert np_species_1.conformation_method == 'CCCBDB'
assert np_species_1.is_well is True
assert np_species_1.is_global_min is True
assert np_species_1.global_min_geometry is None
assert np_species_1.is_ts is False
assert np_species_1.irc_trajectories is None
assert np_species_1.opt_path == 'path_opt'
assert np_species_1.freq_path == 'path_freq'
assert np_species_1.scan_paths is None
assert np_species_1.irc_paths is None
assert np_species_1.sp_path == 'path_sp'
assert np_species_1.unconverged_jobs is None
assert np_species_1.extras == {'reason': 'testing extras'}
assert str(np_species_1) == '<NonPhysicalSpecies(id=None, label=formaldehyde, smiles=C=O)>' | 0.688049 | 0.406509 |
stanCode Breakout Project
Adapted from <NAME>'s Breakout by
<NAME>, <NAME>, <NAME>,
and <NAME>
"""
from campy.graphics.gobjects import GRect
from campy.gui.events.timer import pause
from log_in_page import Log_in_page
from breakoutgraphics import BreakoutGraphics
from campy.gui.events.mouse import onmouseclicked
"""
Global Variables
"""
FRAME_RATE = 1000 / 500 # 120 frames per second.
NUM_LIVES = 3 # this number represent the left chances to challenge the game.
log_in_page = Log_in_page()
def main():
"""
In the beginning, a loading animation will start, once it achieve 100%, two button will show on windows.
If player click these two button, game animation loop will be triggered and game page will show.
"""
onmouseclicked(animation_loop)
log_in_page.loading()
log_in_page.add_play_button()
log_in_page.add_fb_button()
def animation_loop(m):
global NUM_LIVES
enter_game_page(m)
while graphics.total_bricks != 0 and NUM_LIVES != 0:
if graphics.mouse_click:
graphics.ball_move()
graphics.accessory_move()
if graphics.ball.y > graphics.window.height:
graphics.reset_ball_paddle()
NUM_LIVES -= 1
graphics.window.remove(graphics.heart_icon_box[NUM_LIVES])
pause(FRAME_RATE)
if NUM_LIVES == 0:
graphics.is_game_over = True
graphics.show_result_label()
onmouseclicked(play_again)
print('Game Over')
else:
graphics.show_result_label()
print('You Win!')
onmouseclicked(play_again)
def enter_game_page(m):
global graphics # set graphics object to global so animation loop can call it.
obj = log_in_page.window.get_object_at(m.x, m.y)
if obj is log_in_page.play_button or obj is log_in_page.play_label or obj is log_in_page.fb_button \
or obj is log_in_page.fb_label or obj is log_in_page:
graphics = BreakoutGraphics() # new window will show at this moment.
log_in_page.window.close() # close the login window.
graphics.draw_bricks()
graphics.add_score_board()
graphics.add_accessories(10) # give argument to determine the total number of blue/red blocks will show.
graphics.dx_getter()
graphics.dy_getter()
"""
The following part has some bugs I still figuring out, that is, when I use GPolygon to draw a heart
shape and add it to window, it seems to conflict to method: window.get_object_at(x,y). Though it's not perfect,
I use GOval to represent NUM_LIVES on the right-top window.
"""
for i in range(NUM_LIVES):
heart = graphics.draw_heart_icon()
graphics.window.add(heart, graphics.window.width - graphics.heart_shape.width * (i + 1) - 5 * (i + 1),
graphics.score_board_label.y - graphics.heart_shape.height)
def play_again(n):
"""
After finishing the game, if player click the result label, which show Game over or You win, the login page will
show up again and player can start new run of game.
:param n: the mouse positional information after game finished.
"""
global log_in_page, NUM_LIVES
if graphics.window.get_object_at(n.x, n.y) is graphics.result_label:
NUM_LIVES = 3
log_in_page = Log_in_page()
graphics.window.remove(graphics.result_label)
graphics.window.close()
log_in_page.add_play_button()
log_in_page.add_fb_button()
log_in_page.solid_bar = GRect(log_in_page.load_bar.width, log_in_page.load_bar.height)
log_in_page.loading()
log_in_page.window.add(log_in_page.solid_bar, log_in_page.load_bar.x, log_in_page.load_bar.y)
log_in_page.load_label.text = '100%'
log_in_page.window.add(log_in_page.load_label,
log_in_page.load_bar.x + log_in_page.load_bar.width - log_in_page.load_label.width,
log_in_page.load_bar.y + log_in_page.load_bar.height + log_in_page.load_label.height + 5)
onmouseclicked(animation_loop)
if __name__ == '__main__':
main() | breakout.py | stanCode Breakout Project
Adapted from <NAME>'s Breakout by
<NAME>, <NAME>, <NAME>,
and <NAME>
"""
from campy.graphics.gobjects import GRect
from campy.gui.events.timer import pause
from log_in_page import Log_in_page
from breakoutgraphics import BreakoutGraphics
from campy.gui.events.mouse import onmouseclicked
"""
Global Variables
"""
FRAME_RATE = 1000 / 500 # 120 frames per second.
NUM_LIVES = 3 # this number represent the left chances to challenge the game.
log_in_page = Log_in_page()
def main():
"""
In the beginning, a loading animation will start, once it achieve 100%, two button will show on windows.
If player click these two button, game animation loop will be triggered and game page will show.
"""
onmouseclicked(animation_loop)
log_in_page.loading()
log_in_page.add_play_button()
log_in_page.add_fb_button()
def animation_loop(m):
global NUM_LIVES
enter_game_page(m)
while graphics.total_bricks != 0 and NUM_LIVES != 0:
if graphics.mouse_click:
graphics.ball_move()
graphics.accessory_move()
if graphics.ball.y > graphics.window.height:
graphics.reset_ball_paddle()
NUM_LIVES -= 1
graphics.window.remove(graphics.heart_icon_box[NUM_LIVES])
pause(FRAME_RATE)
if NUM_LIVES == 0:
graphics.is_game_over = True
graphics.show_result_label()
onmouseclicked(play_again)
print('Game Over')
else:
graphics.show_result_label()
print('You Win!')
onmouseclicked(play_again)
def enter_game_page(m):
global graphics # set graphics object to global so animation loop can call it.
obj = log_in_page.window.get_object_at(m.x, m.y)
if obj is log_in_page.play_button or obj is log_in_page.play_label or obj is log_in_page.fb_button \
or obj is log_in_page.fb_label or obj is log_in_page:
graphics = BreakoutGraphics() # new window will show at this moment.
log_in_page.window.close() # close the login window.
graphics.draw_bricks()
graphics.add_score_board()
graphics.add_accessories(10) # give argument to determine the total number of blue/red blocks will show.
graphics.dx_getter()
graphics.dy_getter()
"""
The following part has some bugs I still figuring out, that is, when I use GPolygon to draw a heart
shape and add it to window, it seems to conflict to method: window.get_object_at(x,y). Though it's not perfect,
I use GOval to represent NUM_LIVES on the right-top window.
"""
for i in range(NUM_LIVES):
heart = graphics.draw_heart_icon()
graphics.window.add(heart, graphics.window.width - graphics.heart_shape.width * (i + 1) - 5 * (i + 1),
graphics.score_board_label.y - graphics.heart_shape.height)
def play_again(n):
"""
After finishing the game, if player click the result label, which show Game over or You win, the login page will
show up again and player can start new run of game.
:param n: the mouse positional information after game finished.
"""
global log_in_page, NUM_LIVES
if graphics.window.get_object_at(n.x, n.y) is graphics.result_label:
NUM_LIVES = 3
log_in_page = Log_in_page()
graphics.window.remove(graphics.result_label)
graphics.window.close()
log_in_page.add_play_button()
log_in_page.add_fb_button()
log_in_page.solid_bar = GRect(log_in_page.load_bar.width, log_in_page.load_bar.height)
log_in_page.loading()
log_in_page.window.add(log_in_page.solid_bar, log_in_page.load_bar.x, log_in_page.load_bar.y)
log_in_page.load_label.text = '100%'
log_in_page.window.add(log_in_page.load_label,
log_in_page.load_bar.x + log_in_page.load_bar.width - log_in_page.load_label.width,
log_in_page.load_bar.y + log_in_page.load_bar.height + log_in_page.load_label.height + 5)
onmouseclicked(animation_loop)
if __name__ == '__main__':
main() | 0.541409 | 0.206654 |
from abc import ABCMeta, abstractmethod
import glob
import os
import tarfile
from tempfile import TemporaryDirectory
import tensorflow as tf
def affine(input_tensor, output_size, bias=True, bias_start=0.0,
input_size=None, scope="affine", sparse_input=False):
"""Add an affine transformation of `input_tensor` to the current graph.
Note: This op is loosely based on tensorflow.python.ops.rnn_cell.linear.
An affine transformation is a linear transformation with a shift,
`t = tf.matmul(input_tensor, W) + b`.
Parameters
----------
input_tensor : tensorflow Tensor object, rank 2
Input tensor to be transformed.
output_size : int
The output will be size [a, output_size] where `input_tensor` has
shape [a, b].
bias : bool, optional
If True, apply a bias to the transformation. If False, only a linear
transformation is applied (i.e., `t = tf.matmul(W, input_tensor)`).
bias_start : float, optional
The initial value for the bias `b`.
input_size : int, optional
Second dimension of the rank 2 input tensor. Required for sparse input
tensors.
sparse_input : bool, optional
Set to True if `input_tensor` is sparse.
Returns
-------
t : tensorflow tensor object
The affine transformation of `input_tensor`.
"""
# The input size is needed for sparse matrices.
if input_size is None:
input_size = input_tensor.get_shape().as_list()[1]
with tf.variable_scope(scope):
W_0 = tf.get_variable(
"weights0",
[input_size, output_size])
# If the input is sparse, then use a special matmul routine.
matmul = tf.sparse_tensor_dense_matmul if sparse_input else tf.matmul
t = matmul(input_tensor, W_0)
if bias:
b_0 = tf.get_variable(
"bias0",
[output_size],
initializer=tf.constant_initializer(bias_start))
t = tf.add(t, b_0)
return t
class TFPicklingBase(object, metaclass=ABCMeta):
"""Base class for pickling TensorFlow-based scikit-learn estimators.
This base class defines a few standard attributes to enable fairly
transparent pickling of TensorFlow models. Note that TensorFlow has
a custom saving mechanism that makes pickling (and thus using it in
scikit-learn, etc.) not straightforward.
NOTE: This base class must come first in the list of classes any child
class inherits from.
When pickling an object, if the `self._is_fitted` property is True:
1. The session at `self._session` is saved using the saver at
`self._saver` to a temporary file.
2. The saved data is then read into memory and attached to the
object state at '_saved_model'.
3. The fitted state of the model is saved at '_fitted' as True.
When unpickling the object:
1. All variables in the state of the object are set using
`self.__dict__` except the '_saved_model' entry.
2. If the '_fitted' key is in the state of the object and is True
2a. The '_saved_model' entry is written to a temporary file.
2b. A new TF graph is instantiated at `self.graph_`.
2c. `self._build_tf_graph()`` is called. This instantiates a
`tf.Saver` at `self._saver` and a `tf.Session` at
`self._session`.
2d. The `self._saver` is used to restore previous session to the
current one.
To use this base class properly, the child class needs to
1. Implement the abstract method `self._set_up_graph`. This method
should build the required TF graph.
2. Exactly once (e.g., in the `fit` method), instantiate a `tf.Graph`
at `self.graph_` and then call `self._build_tf_graph` inside the
`tf.Graph` context block. `self._build_tf_graph` will call
`self._set_up_graph` and further instantiate the `tf.Saver` and
`tf.Session`.
3. After 2. is done, set `self._is_fitted = True`.
4. Make sure override `__getstate__` to store any extra information
about your estimator to the state of the object. When doing this,
call `state = super().__getstate__()` and then append to the
`state`.
See the example below and also the MLP classes and base class,
MLPBaseEstimator.
Example
-------
```python
# example class for using TFPicklingBase - adds a scalar to input 1d
# arrays
class TFAdder(TFPicklingBase):
def __init__(self, add_val):
# real scikit-learn estimators should do all of this work in the
# fit method
self.add_val = float(add_val)
self.graph_ = tf.Graph()
with self.graph_.as_default():
self._build_tf_graph()
self._session.run(tf.initialize_all_variables())
self._is_fitted = True
def _set_up_graph(self):
self._a = tf.placeholder(tf.float32, shape=[None], name='a')
self._add_val = tf.Variable(self.add_val,
name='add_val',
dtype=tf.float32)
self._sum = tf.add(self._a, self._add_val, name='sum')
def add(self, a):
with self.graph_.as_default():
val = self._session.run(self._sum, feed_dict={self._a: a})
return val
def __getstate__(self):
state = super().__getstate__()
# add add_val to state
state['add_val'] = self.add_val
return state
```
"""
@property
def _is_fitted(self):
"""Return True if the model has been at least partially fitted.
Returns
-------
bool
Notes
-----
This is to indicate whether, e.g., the TensorFlow graph for the model
has been created.
"""
return getattr(self, '_fitted', False)
@_is_fitted.setter
def _is_fitted(self, b):
"""Set whether the model has been at least partially fitted.
Parameters
----------
b : bool
True if the model has been fitted.
"""
self._fitted = b
def __getstate__(self):
# Override __getstate__ so that TF model parameters are pickled
# properly.
if self._is_fitted:
with TemporaryDirectory() as tmpdir:
# Serialize the model.
self._saver.save(
self._session, os.path.join(tmpdir, 'saved_model'))
# TF writes a bunch of files so tar them.
fnames = glob.glob(os.path.join(tmpdir, '*'))
tarname = os.path.join(tmpdir, 'saved_model.tar')
with tarfile.open(tarname, "w") as tar:
for f in fnames:
tar.add(f, arcname=os.path.split(f)[-1])
# Now read the state back into memory.
with open(tarname, 'rb') as f:
saved_model = f.read()
# Note: don't include the graph since it should be recreated.
state = {}
# Add fitted attributes if the model has been fitted.
if self._is_fitted:
state['_fitted'] = True
state['_saved_model'] = saved_model
return state
def __setstate__(self, state):
# Override __setstate__ so that TF model parameters are unpickled
# properly.
for k, v in state.items():
if k != '_saved_model':
self.__dict__[k] = v
if state.get('_fitted', False):
with TemporaryDirectory() as tmpdir:
# Write out the serialized tarfile.
tarname = os.path.join(tmpdir, 'saved_model.tar')
with open(tarname, 'wb') as f:
f.write(state['_saved_model'])
# Untar it.
with tarfile.open(tarname, 'r') as tar:
tar.extractall(path=tmpdir)
# And restore.
self.graph_ = tf.Graph()
with self.graph_.as_default():
self._build_tf_graph()
self._saver.restore(
self._session, os.path.join(tmpdir, 'saved_model'))
def _build_tf_graph(self):
"""Build the TF graph, setup model saving and setup a TF session.
Notes
-----
This method initializes a TF Saver and a TF Session via
```python
self._saver = tf.train.Saver()
self._session = tf.Session()
```
These calls are made after `self._set_up_graph()`` is called.
See the main class docs for how to properly call this method from a
child class.
"""
self._set_up_graph()
self._saver = tf.train.Saver()
self._session = tf.Session()
@abstractmethod
def _set_up_graph(self):
"""Assemble the TF graph for estimator.
Notes
-----
Child classes should add the TF ops to the graph they want to
implement here.
"""
pass | muffnn/core.py | from abc import ABCMeta, abstractmethod
import glob
import os
import tarfile
from tempfile import TemporaryDirectory
import tensorflow as tf
def affine(input_tensor, output_size, bias=True, bias_start=0.0,
input_size=None, scope="affine", sparse_input=False):
"""Add an affine transformation of `input_tensor` to the current graph.
Note: This op is loosely based on tensorflow.python.ops.rnn_cell.linear.
An affine transformation is a linear transformation with a shift,
`t = tf.matmul(input_tensor, W) + b`.
Parameters
----------
input_tensor : tensorflow Tensor object, rank 2
Input tensor to be transformed.
output_size : int
The output will be size [a, output_size] where `input_tensor` has
shape [a, b].
bias : bool, optional
If True, apply a bias to the transformation. If False, only a linear
transformation is applied (i.e., `t = tf.matmul(W, input_tensor)`).
bias_start : float, optional
The initial value for the bias `b`.
input_size : int, optional
Second dimension of the rank 2 input tensor. Required for sparse input
tensors.
sparse_input : bool, optional
Set to True if `input_tensor` is sparse.
Returns
-------
t : tensorflow tensor object
The affine transformation of `input_tensor`.
"""
# The input size is needed for sparse matrices.
if input_size is None:
input_size = input_tensor.get_shape().as_list()[1]
with tf.variable_scope(scope):
W_0 = tf.get_variable(
"weights0",
[input_size, output_size])
# If the input is sparse, then use a special matmul routine.
matmul = tf.sparse_tensor_dense_matmul if sparse_input else tf.matmul
t = matmul(input_tensor, W_0)
if bias:
b_0 = tf.get_variable(
"bias0",
[output_size],
initializer=tf.constant_initializer(bias_start))
t = tf.add(t, b_0)
return t
class TFPicklingBase(object, metaclass=ABCMeta):
"""Base class for pickling TensorFlow-based scikit-learn estimators.
This base class defines a few standard attributes to enable fairly
transparent pickling of TensorFlow models. Note that TensorFlow has
a custom saving mechanism that makes pickling (and thus using it in
scikit-learn, etc.) not straightforward.
NOTE: This base class must come first in the list of classes any child
class inherits from.
When pickling an object, if the `self._is_fitted` property is True:
1. The session at `self._session` is saved using the saver at
`self._saver` to a temporary file.
2. The saved data is then read into memory and attached to the
object state at '_saved_model'.
3. The fitted state of the model is saved at '_fitted' as True.
When unpickling the object:
1. All variables in the state of the object are set using
`self.__dict__` except the '_saved_model' entry.
2. If the '_fitted' key is in the state of the object and is True
2a. The '_saved_model' entry is written to a temporary file.
2b. A new TF graph is instantiated at `self.graph_`.
2c. `self._build_tf_graph()`` is called. This instantiates a
`tf.Saver` at `self._saver` and a `tf.Session` at
`self._session`.
2d. The `self._saver` is used to restore previous session to the
current one.
To use this base class properly, the child class needs to
1. Implement the abstract method `self._set_up_graph`. This method
should build the required TF graph.
2. Exactly once (e.g., in the `fit` method), instantiate a `tf.Graph`
at `self.graph_` and then call `self._build_tf_graph` inside the
`tf.Graph` context block. `self._build_tf_graph` will call
`self._set_up_graph` and further instantiate the `tf.Saver` and
`tf.Session`.
3. After 2. is done, set `self._is_fitted = True`.
4. Make sure override `__getstate__` to store any extra information
about your estimator to the state of the object. When doing this,
call `state = super().__getstate__()` and then append to the
`state`.
See the example below and also the MLP classes and base class,
MLPBaseEstimator.
Example
-------
```python
# example class for using TFPicklingBase - adds a scalar to input 1d
# arrays
class TFAdder(TFPicklingBase):
def __init__(self, add_val):
# real scikit-learn estimators should do all of this work in the
# fit method
self.add_val = float(add_val)
self.graph_ = tf.Graph()
with self.graph_.as_default():
self._build_tf_graph()
self._session.run(tf.initialize_all_variables())
self._is_fitted = True
def _set_up_graph(self):
self._a = tf.placeholder(tf.float32, shape=[None], name='a')
self._add_val = tf.Variable(self.add_val,
name='add_val',
dtype=tf.float32)
self._sum = tf.add(self._a, self._add_val, name='sum')
def add(self, a):
with self.graph_.as_default():
val = self._session.run(self._sum, feed_dict={self._a: a})
return val
def __getstate__(self):
state = super().__getstate__()
# add add_val to state
state['add_val'] = self.add_val
return state
```
"""
@property
def _is_fitted(self):
"""Return True if the model has been at least partially fitted.
Returns
-------
bool
Notes
-----
This is to indicate whether, e.g., the TensorFlow graph for the model
has been created.
"""
return getattr(self, '_fitted', False)
@_is_fitted.setter
def _is_fitted(self, b):
"""Set whether the model has been at least partially fitted.
Parameters
----------
b : bool
True if the model has been fitted.
"""
self._fitted = b
def __getstate__(self):
# Override __getstate__ so that TF model parameters are pickled
# properly.
if self._is_fitted:
with TemporaryDirectory() as tmpdir:
# Serialize the model.
self._saver.save(
self._session, os.path.join(tmpdir, 'saved_model'))
# TF writes a bunch of files so tar them.
fnames = glob.glob(os.path.join(tmpdir, '*'))
tarname = os.path.join(tmpdir, 'saved_model.tar')
with tarfile.open(tarname, "w") as tar:
for f in fnames:
tar.add(f, arcname=os.path.split(f)[-1])
# Now read the state back into memory.
with open(tarname, 'rb') as f:
saved_model = f.read()
# Note: don't include the graph since it should be recreated.
state = {}
# Add fitted attributes if the model has been fitted.
if self._is_fitted:
state['_fitted'] = True
state['_saved_model'] = saved_model
return state
def __setstate__(self, state):
# Override __setstate__ so that TF model parameters are unpickled
# properly.
for k, v in state.items():
if k != '_saved_model':
self.__dict__[k] = v
if state.get('_fitted', False):
with TemporaryDirectory() as tmpdir:
# Write out the serialized tarfile.
tarname = os.path.join(tmpdir, 'saved_model.tar')
with open(tarname, 'wb') as f:
f.write(state['_saved_model'])
# Untar it.
with tarfile.open(tarname, 'r') as tar:
tar.extractall(path=tmpdir)
# And restore.
self.graph_ = tf.Graph()
with self.graph_.as_default():
self._build_tf_graph()
self._saver.restore(
self._session, os.path.join(tmpdir, 'saved_model'))
def _build_tf_graph(self):
"""Build the TF graph, setup model saving and setup a TF session.
Notes
-----
This method initializes a TF Saver and a TF Session via
```python
self._saver = tf.train.Saver()
self._session = tf.Session()
```
These calls are made after `self._set_up_graph()`` is called.
See the main class docs for how to properly call this method from a
child class.
"""
self._set_up_graph()
self._saver = tf.train.Saver()
self._session = tf.Session()
@abstractmethod
def _set_up_graph(self):
"""Assemble the TF graph for estimator.
Notes
-----
Child classes should add the TF ops to the graph they want to
implement here.
"""
pass | 0.935546 | 0.881564 |
import pytest
from pytest import approx
import numpy
from brachiograph import BrachioGraph
import linedraw
class TestBrachioGraph:
bg = BrachioGraph(virtual=True)
def test_defaults_of_default_bg(self):
assert (self.bg.angle_1, self.bg.angle_2) == (-90, 90)
class TestBiDiBrachioGraph:
bg = BrachioGraph(
virtual=True,
servo_1_angle_pws_bidi={
-135: {"cw": 2374, "acw": 2386},
-120: {"cw": 2204, "acw": 2214},
-105: {"cw": 2042, "acw": 2054},
-90: {"cw": 1898, "acw": 1900},
-75: {"cw": 1730, "acw": 1750},
-60: {"cw": 1604, "acw": 1612},
-45: {"cw": 1466, "acw": 1476},
-30: {"cw": 1330, "acw": 1340},
-15: {"cw": 1188, "acw": 1200},
0: {"cw": 1048, "acw": 1060},
15: {"cw": 904, "acw": 910},
30: {"cw": 750, "acw": 766},
},
servo_2_angle_pws_bidi={
15: {"cw": 783, "acw": 761},
30: {"cw": 917, "acw": 901},
45: {"cw": 1053, "acw": 1035},
60: {"cw": 1183, "acw": 1167},
75: {"cw": 1303, "acw": 1287},
90: {"cw": 1427, "acw": 1417},
105: {"cw": 1557, "acw": 1537},
120: {"cw": 1697, "acw": 1681},
135: {"cw": 1843, "acw": 1827},
150: {"cw": 2003, "acw": 1987},
},
pw_up=1400, # pulse-widths for pen up/down
pw_down=1650,
)
def test_defaults_of_bg_with_bidi_pws(self):
assert self.bg.get_pulse_widths() == (
approx(1894 + self.bg.hysteresis_correction_1, abs=1e-0),
approx(1422 + self.bg.hysteresis_correction_2, abs=1e-0),
)
assert (self.bg.angle_1, self.bg.angle_2) == (-90, 90)
# ----------------- drawing methods -----------------
def test_plot_from_file(self):
self.bg.plot_file("test-patterns/accuracy.json")
# ----------------- test pattern methods -----------------
def test_test_pattern(self):
self.bg.test_pattern()
def test_vertical_lines(self):
self.bg.vertical_lines()
def test_horizontal_lines(self):
self.bg.horizontal_lines()
def test_box(self):
self.bg.box()
# ----------------- pen-moving methods -----------------
def test_centre(self):
self.bg.park()
# ----------------- reporting methods -----------------
def test_report(self):
self.bg.report()
class TestErrors:
def test_maths_errors(self):
plotter = BrachioGraph(inner_arm=8.2, outer_arm=8.85, virtual=True)
with pytest.raises(Exception):
plotter.xy_to_angles(-10.2, 13.85) | tests/test_brachiograph.py | import pytest
from pytest import approx
import numpy
from brachiograph import BrachioGraph
import linedraw
class TestBrachioGraph:
bg = BrachioGraph(virtual=True)
def test_defaults_of_default_bg(self):
assert (self.bg.angle_1, self.bg.angle_2) == (-90, 90)
class TestBiDiBrachioGraph:
bg = BrachioGraph(
virtual=True,
servo_1_angle_pws_bidi={
-135: {"cw": 2374, "acw": 2386},
-120: {"cw": 2204, "acw": 2214},
-105: {"cw": 2042, "acw": 2054},
-90: {"cw": 1898, "acw": 1900},
-75: {"cw": 1730, "acw": 1750},
-60: {"cw": 1604, "acw": 1612},
-45: {"cw": 1466, "acw": 1476},
-30: {"cw": 1330, "acw": 1340},
-15: {"cw": 1188, "acw": 1200},
0: {"cw": 1048, "acw": 1060},
15: {"cw": 904, "acw": 910},
30: {"cw": 750, "acw": 766},
},
servo_2_angle_pws_bidi={
15: {"cw": 783, "acw": 761},
30: {"cw": 917, "acw": 901},
45: {"cw": 1053, "acw": 1035},
60: {"cw": 1183, "acw": 1167},
75: {"cw": 1303, "acw": 1287},
90: {"cw": 1427, "acw": 1417},
105: {"cw": 1557, "acw": 1537},
120: {"cw": 1697, "acw": 1681},
135: {"cw": 1843, "acw": 1827},
150: {"cw": 2003, "acw": 1987},
},
pw_up=1400, # pulse-widths for pen up/down
pw_down=1650,
)
def test_defaults_of_bg_with_bidi_pws(self):
assert self.bg.get_pulse_widths() == (
approx(1894 + self.bg.hysteresis_correction_1, abs=1e-0),
approx(1422 + self.bg.hysteresis_correction_2, abs=1e-0),
)
assert (self.bg.angle_1, self.bg.angle_2) == (-90, 90)
# ----------------- drawing methods -----------------
def test_plot_from_file(self):
self.bg.plot_file("test-patterns/accuracy.json")
# ----------------- test pattern methods -----------------
def test_test_pattern(self):
self.bg.test_pattern()
def test_vertical_lines(self):
self.bg.vertical_lines()
def test_horizontal_lines(self):
self.bg.horizontal_lines()
def test_box(self):
self.bg.box()
# ----------------- pen-moving methods -----------------
def test_centre(self):
self.bg.park()
# ----------------- reporting methods -----------------
def test_report(self):
self.bg.report()
class TestErrors:
def test_maths_errors(self):
plotter = BrachioGraph(inner_arm=8.2, outer_arm=8.85, virtual=True)
with pytest.raises(Exception):
plotter.xy_to_angles(-10.2, 13.85) | 0.503418 | 0.428413 |
import os
import sys
import glob
import time
import gc
import logging
import argparse
import multiprocessing as mproc
from functools import partial
import cv2 as cv
import numpy as np
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
from benchmark.utilities.dataset import find_largest_object, project_object_edge
from benchmark.utilities.dataset import load_large_image, save_large_image
from benchmark.utilities.experiments import wrap_execute_sequence
NB_THREADS = int(mproc.cpu_count() * .5)
SCALE_SIZE = 512
CUT_DIMENSION = 0
TISSUE_CONTENT = 0.01
def arg_parse_params():
""" parse the input parameters
:return dict: {str: any}
"""
# SEE: https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--path_images', type=str, required=True,
help='path (pattern) to the input image')
parser.add_argument('--padding', type=float, required=False, default=0.1,
help='padding around the object in image percents')
parser.add_argument('--nb_jobs', type=int, required=False,
help='number of processes running in parallel',
default=NB_THREADS)
args = vars(parser.parse_args())
args['path_images'] = os.path.expanduser(args['path_images'])
logging.info('ARGUMENTS: \n%s' % repr(args))
return args
def crop_image(img_path, crop_dims=(0, 1), padding=0.15):
img = load_large_image(img_path)
scale_factor = max(1, np.mean(img.shape[:2]) / float(SCALE_SIZE))
# work with just a scaled version
sc = 1. / scale_factor
order = cv.INTER_AREA if scale_factor > 1 else cv.INTER_LINEAR
img_small = 255 - cv.resize(img, None, fx=sc, fy=sc, interpolation=order)
crops = {}
for crop_dim in crop_dims:
assert crop_dim in (0, 1), 'not supported dimension'
img_edge = project_object_edge(img_small, crop_dim)
begin, end = find_largest_object(img_edge, threshold=TISSUE_CONTENT)
# img_diag = int(np.sqrt(img.shape[0] ** 2 + img.shape[1] ** 2))
pad_px = padding * (end - begin) * scale_factor
begin_px = max(0, int((begin * scale_factor) - pad_px))
end_px = min(img.shape[crop_dim], int((end * scale_factor) + pad_px))
crops[crop_dim] = (begin_px, end_px)
del img_small
for _ in range(2):
if 0 not in crops:
crops[0] = (0, img.shape[0])
img = img[crops[0][0]:crops[0][1], crops[1][0]:crops[1][1], ...]
save_large_image(img_path, img)
gc.collect()
time.sleep(1)
def wrap_img_crop(img_path, padding=0.1):
try:
crop_image(img_path, crop_dims=(0, 1), padding=padding)
except Exception:
logging.exception('crop image: %s', img_path)
def main(path_images, padding, nb_jobs):
image_paths = sorted(glob.glob(path_images))
if not image_paths:
logging.info('No images found on "%s"', path_images)
return
_wrap_crop = partial(wrap_img_crop, padding=padding)
list(wrap_execute_sequence(_wrap_crop, image_paths,
desc='Crop image tissue', nb_jobs=nb_jobs))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
arg_params = arg_parse_params()
main(arg_params['path_images'], arg_params['padding'],
arg_params['nb_jobs'])
logging.info('DONE') | bm_dataset/crop_dataset_images.py | import os
import sys
import glob
import time
import gc
import logging
import argparse
import multiprocessing as mproc
from functools import partial
import cv2 as cv
import numpy as np
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
from benchmark.utilities.dataset import find_largest_object, project_object_edge
from benchmark.utilities.dataset import load_large_image, save_large_image
from benchmark.utilities.experiments import wrap_execute_sequence
NB_THREADS = int(mproc.cpu_count() * .5)
SCALE_SIZE = 512
CUT_DIMENSION = 0
TISSUE_CONTENT = 0.01
def arg_parse_params():
""" parse the input parameters
:return dict: {str: any}
"""
# SEE: https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--path_images', type=str, required=True,
help='path (pattern) to the input image')
parser.add_argument('--padding', type=float, required=False, default=0.1,
help='padding around the object in image percents')
parser.add_argument('--nb_jobs', type=int, required=False,
help='number of processes running in parallel',
default=NB_THREADS)
args = vars(parser.parse_args())
args['path_images'] = os.path.expanduser(args['path_images'])
logging.info('ARGUMENTS: \n%s' % repr(args))
return args
def crop_image(img_path, crop_dims=(0, 1), padding=0.15):
img = load_large_image(img_path)
scale_factor = max(1, np.mean(img.shape[:2]) / float(SCALE_SIZE))
# work with just a scaled version
sc = 1. / scale_factor
order = cv.INTER_AREA if scale_factor > 1 else cv.INTER_LINEAR
img_small = 255 - cv.resize(img, None, fx=sc, fy=sc, interpolation=order)
crops = {}
for crop_dim in crop_dims:
assert crop_dim in (0, 1), 'not supported dimension'
img_edge = project_object_edge(img_small, crop_dim)
begin, end = find_largest_object(img_edge, threshold=TISSUE_CONTENT)
# img_diag = int(np.sqrt(img.shape[0] ** 2 + img.shape[1] ** 2))
pad_px = padding * (end - begin) * scale_factor
begin_px = max(0, int((begin * scale_factor) - pad_px))
end_px = min(img.shape[crop_dim], int((end * scale_factor) + pad_px))
crops[crop_dim] = (begin_px, end_px)
del img_small
for _ in range(2):
if 0 not in crops:
crops[0] = (0, img.shape[0])
img = img[crops[0][0]:crops[0][1], crops[1][0]:crops[1][1], ...]
save_large_image(img_path, img)
gc.collect()
time.sleep(1)
def wrap_img_crop(img_path, padding=0.1):
try:
crop_image(img_path, crop_dims=(0, 1), padding=padding)
except Exception:
logging.exception('crop image: %s', img_path)
def main(path_images, padding, nb_jobs):
image_paths = sorted(glob.glob(path_images))
if not image_paths:
logging.info('No images found on "%s"', path_images)
return
_wrap_crop = partial(wrap_img_crop, padding=padding)
list(wrap_execute_sequence(_wrap_crop, image_paths,
desc='Crop image tissue', nb_jobs=nb_jobs))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
arg_params = arg_parse_params()
main(arg_params['path_images'], arg_params['padding'],
arg_params['nb_jobs'])
logging.info('DONE') | 0.41561 | 0.198045 |
from ImageUtilities import imageReadRGB, showImageRGB, createImageF
from ImageRegionsUtilities import densityHistogram, colourFeature, meanShift, backProjection,backProjectionImage,regionSize
# Math and iteration
from math import exp
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageNames = Input image names
initialPos = position of the region [column, row]
size = Size of the region [column, row]
sigma = weight control
'''
pathToDir = "../../Images/Chapter9/Input/"
imageNames = ["frame1.bmp", "frame2.bmp", "frame3.bmp", "frame4.bmp", "frame5.bmp", "frame6.bmp"]
histoSize = 64
initialPos = [100, 60]
sizeReg = [12, 18]
sigma = 6.0
# Region position and sizes in each frame
positions = [ ]
positions.append(initialPos)
sizes = [ ]
sizes.append(sizeReg)
# Read image
inputImage, width, height = imageReadRGB(pathToDir + imageNames[0])
#showImageRGB(inputImage)
# Density and back projection of the region to track
q = densityHistogram(inputImage, positions[0], sizeReg, sigma, histoSize)
backProjImage = backProjectionImage(inputImage, q, histoSize)
#plot3DHistogram(q)
# For each frame
numImages = len(imageNames)
for frameNum in range(1, numImages):
# Read next frame and estimate the position by using meanshift
currentImage, _, _ = imageReadRGB(pathToDir + imageNames[frameNum])
newPos = meanShift(currentImage, q, sizeReg, sigma, histoSize, positions[frameNum-1])
# Back project and use the projections to determine the new position and size
newBackProjImage = backProjectionImage(currentImage, q, histoSize)
pos,newSize = regionSize(backProjImage, newBackProjImage, \
positions[frameNum-1], newPos, sizeReg)
positions.append(pos)
sizes.append(newSize)
# Update density and image
inputImage = currentImage
sizeReg = newSize
backProjImage = newBackProjImage
#print(positions)
#print(sizes)
# Show results
for frameNum in range(0, numImages):
image, _, _ = imageReadRGB(pathToDir + imageNames[frameNum])
p = positions[frameNum]
s = sizes[frameNum]
borderDistance = [s[0] -5, s[1] -5]
for x, y in itertools.product(range(p[0]-s[0], p[0]+s[0]), \
range(p[1]-s[1], p[1]+s[1])):
if abs(x-p[0]) > borderDistance[0] or abs(y-p[1]) > borderDistance[1]:
image[y,x] = [20, 20, 80]
showImageRGB(image) | ExamplesPython_3.6/Chapter9/CamShift.py | from ImageUtilities import imageReadRGB, showImageRGB, createImageF
from ImageRegionsUtilities import densityHistogram, colourFeature, meanShift, backProjection,backProjectionImage,regionSize
# Math and iteration
from math import exp
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageNames = Input image names
initialPos = position of the region [column, row]
size = Size of the region [column, row]
sigma = weight control
'''
pathToDir = "../../Images/Chapter9/Input/"
imageNames = ["frame1.bmp", "frame2.bmp", "frame3.bmp", "frame4.bmp", "frame5.bmp", "frame6.bmp"]
histoSize = 64
initialPos = [100, 60]
sizeReg = [12, 18]
sigma = 6.0
# Region position and sizes in each frame
positions = [ ]
positions.append(initialPos)
sizes = [ ]
sizes.append(sizeReg)
# Read image
inputImage, width, height = imageReadRGB(pathToDir + imageNames[0])
#showImageRGB(inputImage)
# Density and back projection of the region to track
q = densityHistogram(inputImage, positions[0], sizeReg, sigma, histoSize)
backProjImage = backProjectionImage(inputImage, q, histoSize)
#plot3DHistogram(q)
# For each frame
numImages = len(imageNames)
for frameNum in range(1, numImages):
# Read next frame and estimate the position by using meanshift
currentImage, _, _ = imageReadRGB(pathToDir + imageNames[frameNum])
newPos = meanShift(currentImage, q, sizeReg, sigma, histoSize, positions[frameNum-1])
# Back project and use the projections to determine the new position and size
newBackProjImage = backProjectionImage(currentImage, q, histoSize)
pos,newSize = regionSize(backProjImage, newBackProjImage, \
positions[frameNum-1], newPos, sizeReg)
positions.append(pos)
sizes.append(newSize)
# Update density and image
inputImage = currentImage
sizeReg = newSize
backProjImage = newBackProjImage
#print(positions)
#print(sizes)
# Show results
for frameNum in range(0, numImages):
image, _, _ = imageReadRGB(pathToDir + imageNames[frameNum])
p = positions[frameNum]
s = sizes[frameNum]
borderDistance = [s[0] -5, s[1] -5]
for x, y in itertools.product(range(p[0]-s[0], p[0]+s[0]), \
range(p[1]-s[1], p[1]+s[1])):
if abs(x-p[0]) > borderDistance[0] or abs(y-p[1]) > borderDistance[1]:
image[y,x] = [20, 20, 80]
showImageRGB(image) | 0.409221 | 0.523299 |
from shexer.utils.target_elements import determine_original_target_nodes_if_needed
from shexer.model.property import Property
from shexer.utils.uri import remove_corners
from shexer.utils.target_elements import tune_target_classes_if_needed
from shexer.consts import SHAPES_DEFAULT_NAMESPACE
from shexer.utils.log import log_msg
from shexer.core.profiling.strategy.direct_features_strategy import DirectFeaturesStrategy
from shexer.core.profiling.strategy.include_reverse_features_strategy import IncludeReverseFeaturesStrategy
from shexer.core.profiling.consts import RDF_TYPE_STR
class ClassProfiler(object):
def __init__(self, triples_yielder, instances_dict, instantiation_property_str=RDF_TYPE_STR,
remove_empty_shapes=True, original_target_classes=None, original_shape_map=None,
shapes_namespace=SHAPES_DEFAULT_NAMESPACE, inverse_paths=False):
self._triples_yielder = triples_yielder
self._instances_dict = instances_dict # TODO refactor: change name once working again
# self._instances_shape_dict = {}
self._shapes_namespace = shapes_namespace
self._shape_names_dict = {} # Will be filled during execution
self._relevant_triples = 0
self._instantiation_property_str = self._decide_instantiation_property(instantiation_property_str)
self._remove_empty_shapes = remove_empty_shapes
self._original_raw_target_classes = original_target_classes
self._classes_shape_dict = {} # Will be filled later
self._class_counts = {} # Will be filled later
self._original_target_nodes = determine_original_target_nodes_if_needed(remove_empty_shapes=remove_empty_shapes,
original_target_classes=original_target_classes,
original_shape_map=original_shape_map,
shapes_namespace=shapes_namespace)
self._strategy = DirectFeaturesStrategy(class_profiler=self) if not inverse_paths \
else IncludeReverseFeaturesStrategy(class_profiler=self)
def profile_classes(self, verbose):
log_msg(verbose=verbose,
msg="Starting class profiler...")
self._init_class_counts_and_shape_dict()
log_msg(verbose=verbose,
msg="Instance counts completed. Annotating instance features...")
self._adapt_instances_dict()
self._build_shape_of_instances()
log_msg(verbose=verbose,
msg="Instance features annotated. Number of relevant triples computed: {}. "
"Building shape profiles...".format(self._relevant_triples))
self._build_class_profile()
log_msg(verbose=verbose,
msg="Draft shape profiles built. Cleaning shape profiles...")
self._clean_class_profile()
log_msg(verbose=verbose,
msg="Shape profiles done. Working with {} shapes.".format(len(self._classes_shape_dict)))
return self._classes_shape_dict, self._class_counts
def get_target_classes_dict(self):
return self._instances_dict
@staticmethod
def _decide_instantiation_property(instantiation_property_str):
if instantiation_property_str == None:
return RDF_TYPE_STR
if type(instantiation_property_str) == Property:
return str(instantiation_property_str)
if type(instantiation_property_str) == str:
return remove_corners(a_uri=instantiation_property_str,
raise_error_if_no_corners=False)
raise ValueError("Unrecognized param type to define instantiation property")
def _init_class_counts_and_shape_dict(self):
"""
IMPORTANT: this method should be called before adapting the instances_dict
:return:
"""
# self._classes_shape_dict
self._init_original_targets()
self._init_annotated_targets()
def _init_annotated_targets(self):
self._strategy.init_annotated_targets()
def _init_original_targets(self):
self._strategy.init_original_targets()
def _build_class_profile(self):
for an_instance in self._instances_dict:
self._strategy.annotate_instance_features(an_instance)
def _clean_class_profile(self):
if not self._remove_empty_shapes:
return
shapes_to_remove = self._detect_shapes_to_remove()
while(len(shapes_to_remove) != 0):
self._iteration_remove_empty_shapes(shapes_to_remove)
shapes_to_remove = self._detect_shapes_to_remove()
def _detect_shapes_to_remove(self):
shapes_to_remove = set()
for a_shape_key in self._classes_shape_dict:
if not self._is_original_target_shape(a_shape_key):
if not self._has_it_annotated_features(a_shape_key):
shapes_to_remove.add(a_shape_key)
return shapes_to_remove
def _is_original_target_shape(self, shape_label):
return shape_label in self._original_target_nodes
def _has_it_annotated_features(self, shape_label):
return self._strategy.has_shape_annotated_features(shape_label)
def _iteration_remove_empty_shapes(self, target_shapes):
for a_shape_label_key in self._classes_shape_dict:
for a_prop_key in self._classes_shape_dict[a_shape_label_key]:
# print(self._classes_shape_dict[a_shape_label_key][a_prop_key])
for a_shape_to_remove in target_shapes:
if a_shape_to_remove in self._classes_shape_dict[a_shape_label_key][a_prop_key]:
del self._classes_shape_dict[a_shape_label_key][a_prop_key][a_shape_to_remove]
for a_shape_to_remove in target_shapes:
if a_shape_to_remove in self._classes_shape_dict:
del self._classes_shape_dict[a_shape_to_remove]
def _build_shape_of_instances(self):
for a_triple in self._yield_relevant_triples():
self._relevant_triples += 1
self._annotate_feature_of_target_instance(a_triple)
def _annotate_feature_of_target_instance(self, a_triple):
self._strategy.annotate_triple_features(a_triple)
def _adapt_instances_dict(self):
self._strategy.adapt_instances_dict()
def _adapt_entry_dict_if_needed(self, str_subj):
if type(self._instances_dict[str_subj]) == list:
self._instances_dict[str_subj] = (self._instances_dict[str_subj], {})
def _yield_relevant_triples(self):
for a_triple in self._triples_yielder.yield_triples():
if self._strategy.is_a_relevant_triple(a_triple):
yield a_triple | shexer/core/profiling/class_profiler.py | from shexer.utils.target_elements import determine_original_target_nodes_if_needed
from shexer.model.property import Property
from shexer.utils.uri import remove_corners
from shexer.utils.target_elements import tune_target_classes_if_needed
from shexer.consts import SHAPES_DEFAULT_NAMESPACE
from shexer.utils.log import log_msg
from shexer.core.profiling.strategy.direct_features_strategy import DirectFeaturesStrategy
from shexer.core.profiling.strategy.include_reverse_features_strategy import IncludeReverseFeaturesStrategy
from shexer.core.profiling.consts import RDF_TYPE_STR
class ClassProfiler(object):
def __init__(self, triples_yielder, instances_dict, instantiation_property_str=RDF_TYPE_STR,
remove_empty_shapes=True, original_target_classes=None, original_shape_map=None,
shapes_namespace=SHAPES_DEFAULT_NAMESPACE, inverse_paths=False):
self._triples_yielder = triples_yielder
self._instances_dict = instances_dict # TODO refactor: change name once working again
# self._instances_shape_dict = {}
self._shapes_namespace = shapes_namespace
self._shape_names_dict = {} # Will be filled during execution
self._relevant_triples = 0
self._instantiation_property_str = self._decide_instantiation_property(instantiation_property_str)
self._remove_empty_shapes = remove_empty_shapes
self._original_raw_target_classes = original_target_classes
self._classes_shape_dict = {} # Will be filled later
self._class_counts = {} # Will be filled later
self._original_target_nodes = determine_original_target_nodes_if_needed(remove_empty_shapes=remove_empty_shapes,
original_target_classes=original_target_classes,
original_shape_map=original_shape_map,
shapes_namespace=shapes_namespace)
self._strategy = DirectFeaturesStrategy(class_profiler=self) if not inverse_paths \
else IncludeReverseFeaturesStrategy(class_profiler=self)
def profile_classes(self, verbose):
log_msg(verbose=verbose,
msg="Starting class profiler...")
self._init_class_counts_and_shape_dict()
log_msg(verbose=verbose,
msg="Instance counts completed. Annotating instance features...")
self._adapt_instances_dict()
self._build_shape_of_instances()
log_msg(verbose=verbose,
msg="Instance features annotated. Number of relevant triples computed: {}. "
"Building shape profiles...".format(self._relevant_triples))
self._build_class_profile()
log_msg(verbose=verbose,
msg="Draft shape profiles built. Cleaning shape profiles...")
self._clean_class_profile()
log_msg(verbose=verbose,
msg="Shape profiles done. Working with {} shapes.".format(len(self._classes_shape_dict)))
return self._classes_shape_dict, self._class_counts
def get_target_classes_dict(self):
return self._instances_dict
@staticmethod
def _decide_instantiation_property(instantiation_property_str):
if instantiation_property_str == None:
return RDF_TYPE_STR
if type(instantiation_property_str) == Property:
return str(instantiation_property_str)
if type(instantiation_property_str) == str:
return remove_corners(a_uri=instantiation_property_str,
raise_error_if_no_corners=False)
raise ValueError("Unrecognized param type to define instantiation property")
def _init_class_counts_and_shape_dict(self):
"""
IMPORTANT: this method should be called before adapting the instances_dict
:return:
"""
# self._classes_shape_dict
self._init_original_targets()
self._init_annotated_targets()
def _init_annotated_targets(self):
self._strategy.init_annotated_targets()
def _init_original_targets(self):
self._strategy.init_original_targets()
def _build_class_profile(self):
for an_instance in self._instances_dict:
self._strategy.annotate_instance_features(an_instance)
def _clean_class_profile(self):
if not self._remove_empty_shapes:
return
shapes_to_remove = self._detect_shapes_to_remove()
while(len(shapes_to_remove) != 0):
self._iteration_remove_empty_shapes(shapes_to_remove)
shapes_to_remove = self._detect_shapes_to_remove()
def _detect_shapes_to_remove(self):
shapes_to_remove = set()
for a_shape_key in self._classes_shape_dict:
if not self._is_original_target_shape(a_shape_key):
if not self._has_it_annotated_features(a_shape_key):
shapes_to_remove.add(a_shape_key)
return shapes_to_remove
def _is_original_target_shape(self, shape_label):
return shape_label in self._original_target_nodes
def _has_it_annotated_features(self, shape_label):
return self._strategy.has_shape_annotated_features(shape_label)
def _iteration_remove_empty_shapes(self, target_shapes):
for a_shape_label_key in self._classes_shape_dict:
for a_prop_key in self._classes_shape_dict[a_shape_label_key]:
# print(self._classes_shape_dict[a_shape_label_key][a_prop_key])
for a_shape_to_remove in target_shapes:
if a_shape_to_remove in self._classes_shape_dict[a_shape_label_key][a_prop_key]:
del self._classes_shape_dict[a_shape_label_key][a_prop_key][a_shape_to_remove]
for a_shape_to_remove in target_shapes:
if a_shape_to_remove in self._classes_shape_dict:
del self._classes_shape_dict[a_shape_to_remove]
def _build_shape_of_instances(self):
for a_triple in self._yield_relevant_triples():
self._relevant_triples += 1
self._annotate_feature_of_target_instance(a_triple)
def _annotate_feature_of_target_instance(self, a_triple):
self._strategy.annotate_triple_features(a_triple)
def _adapt_instances_dict(self):
self._strategy.adapt_instances_dict()
def _adapt_entry_dict_if_needed(self, str_subj):
if type(self._instances_dict[str_subj]) == list:
self._instances_dict[str_subj] = (self._instances_dict[str_subj], {})
def _yield_relevant_triples(self):
for a_triple in self._triples_yielder.yield_triples():
if self._strategy.is_a_relevant_triple(a_triple):
yield a_triple | 0.301362 | 0.119024 |
import math
from keras import activations, layers, initializers
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.tf_utils import shape_type_conversion
@register_keras_serializable(package='TFVan')
class MLP(layers.Layer):
def __init__(self, ratio, dropout, **kwargs):
super().__init__(**kwargs)
self.input_spec = layers.InputSpec(ndim=4)
self.ratio = ratio
self.dropout = dropout
@shape_type_conversion
def build(self, input_shape):
channels = input_shape[-1]
if channels is None:
raise ValueError('Channel dimension of the inputs should be defined. Found `None`.')
self.input_spec = layers.InputSpec(ndim=4, axes={-1: channels})
inner_channels = int(channels * self.ratio)
# noinspection PyAttributeOutsideInit
self.pw1 = layers.Conv2D(
inner_channels, 1, name='fc1',
kernel_initializer=initializers.RandomNormal(0., math.sqrt(2. / inner_channels)))
# noinspection PyAttributeOutsideInit
self.dw1 = layers.DepthwiseConv2D(
3, padding='same', name='dwconv.dwconv',
kernel_initializer=initializers.RandomNormal(0., math.sqrt(2. / 9)))
# noinspection PyAttributeOutsideInit
self.pw2 = layers.Conv2D(
channels, 1, name='fc2',
kernel_initializer=initializers.RandomNormal(0., math.sqrt(2. / channels)))
# noinspection PyAttributeOutsideInit
self.drop = layers.Dropout(self.dropout)
super().build(input_shape)
def call(self, inputs, *args, **kwargs):
outputs = self.pw1(inputs)
outputs = self.dw1(outputs)
outputs = activations.gelu(outputs)
outputs = self.drop(outputs)
outputs = self.pw2(outputs)
outputs = self.drop(outputs)
return outputs
@shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({
'ratio': self.ratio,
'dropout': self.dropout
})
return config | tfvan/mlp.py | import math
from keras import activations, layers, initializers
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.tf_utils import shape_type_conversion
@register_keras_serializable(package='TFVan')
class MLP(layers.Layer):
def __init__(self, ratio, dropout, **kwargs):
super().__init__(**kwargs)
self.input_spec = layers.InputSpec(ndim=4)
self.ratio = ratio
self.dropout = dropout
@shape_type_conversion
def build(self, input_shape):
channels = input_shape[-1]
if channels is None:
raise ValueError('Channel dimension of the inputs should be defined. Found `None`.')
self.input_spec = layers.InputSpec(ndim=4, axes={-1: channels})
inner_channels = int(channels * self.ratio)
# noinspection PyAttributeOutsideInit
self.pw1 = layers.Conv2D(
inner_channels, 1, name='fc1',
kernel_initializer=initializers.RandomNormal(0., math.sqrt(2. / inner_channels)))
# noinspection PyAttributeOutsideInit
self.dw1 = layers.DepthwiseConv2D(
3, padding='same', name='dwconv.dwconv',
kernel_initializer=initializers.RandomNormal(0., math.sqrt(2. / 9)))
# noinspection PyAttributeOutsideInit
self.pw2 = layers.Conv2D(
channels, 1, name='fc2',
kernel_initializer=initializers.RandomNormal(0., math.sqrt(2. / channels)))
# noinspection PyAttributeOutsideInit
self.drop = layers.Dropout(self.dropout)
super().build(input_shape)
def call(self, inputs, *args, **kwargs):
outputs = self.pw1(inputs)
outputs = self.dw1(outputs)
outputs = activations.gelu(outputs)
outputs = self.drop(outputs)
outputs = self.pw2(outputs)
outputs = self.drop(outputs)
return outputs
@shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({
'ratio': self.ratio,
'dropout': self.dropout
})
return config | 0.910523 | 0.285901 |
import os
import time
import unittest
from meross_iot.api import MerossHttpClient
EMAIL = os.environ.get('MEROSS_EMAIL')
PASSWORD = os.environ.get('MEROSS_PASSWORD')
class TestHttpMethods(unittest.TestCase):
def setUp(self):
self.client = MerossHttpClient(email=EMAIL, password=PASSWORD)
def test_device_listing(self):
devices = self.client.list_devices()
assert devices is not None
assert len(devices) > 0
def test_supported_device_listing(self):
devices = self.client.list_supported_devices()
assert devices is not None
assert len(devices) > 0
class TestMSS210Test(unittest.TestCase):
def setUp(self):
httpHandler = MerossHttpClient(email=EMAIL, password=PASSWORD)
# Retrieves the list of supported devices
devices = httpHandler.list_supported_devices()
for counter, device in enumerate(devices):
if (device._type == 'mss210'):
self.device = device
break
def test_power_cycle(self):
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
def test_get_info(self):
state = self.device.get_status()
assert state is not None
wifi_list = self.device.get_wifi_list()
assert wifi_list is not None
trace = self.device.get_trace()
assert trace is not None
debug = self.device.get_debug()
assert debug is not None
class TestMSS310Test(unittest.TestCase):
def setUp(self):
httpHandler = MerossHttpClient(email=EMAIL, password=PASSWORD)
# Retrieves the list of supported devices
devices = httpHandler.list_supported_devices()
for counter, device in enumerate(devices):
if (device._type == 'mss310'):
self.device = device
break
def test_power_cycle(self):
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
def test_get_info(self):
consumption = self.device.get_power_consumption()
assert consumption is not None
wifi_list = self.device.get_wifi_list()
assert wifi_list is not None
trace = self.device.get_trace()
assert trace is not None
debug = self.device.get_debug()
assert debug is not None
abilities = self.device.get_abilities()
assert abilities is not None
electricity = self.device.get_electricity()
assert electricity is not None
class TestMSS425ETest(unittest.TestCase):
def setUp(self):
httpHandler = MerossHttpClient(email=EMAIL, password=PASSWORD)
# Retrieves the list of supported devices
devices = httpHandler.list_supported_devices()
for counter, device in enumerate(devices):
if (device._type == 'mss425e'):
self.device = device
break
def test_power_cycle(self):
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
def test_usb(self):
self.device.enable_usb()
time.sleep(2)
self.assertTrue(self.device.get_usb_status())
self.device.enable_usb()
time.sleep(2)
self.assertTrue(self.device.get_usb_status())
def test_channels(self):
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
# Test each channel one by one
for c in self.device.get_channels():
self.device.turn_on_channel(c)
time.sleep(2)
self.assertTrue(self.device.get_channel_status(c))
time.sleep(2)
self.device.turn_off_channel(c)
time.sleep(2)
self.assertFalse(self.device.get_channel_status(c))
def test_get_info(self):
state = self.device.get_status()
assert state is not None
wifi_list = self.device.get_wifi_list()
assert wifi_list is not None
trace = self.device.get_trace()
assert trace is not None
debug = self.device.get_debug()
assert debug is not None
class TestMSS530HTest(unittest.TestCase):
def setUp(self):
httpHandler = MerossHttpClient(email=EMAIL, password=PASSWORD)
# Retrieves the list of supported devices
devices = httpHandler.list_supported_devices()
for counter, device in enumerate(devices):
if device._type == 'mss530h':
self.device = device
break
def test_power_cycle(self):
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
def test_get_info(self):
state = self.device.get_status()
assert state is not None
wifi_list = self.device.get_wifi_list()
assert wifi_list is not None
trace = self.device.get_trace()
assert trace is not None
debug = self.device.get_debug()
assert debug is not None | tests/test_power_plugs.py | import os
import time
import unittest
from meross_iot.api import MerossHttpClient
EMAIL = os.environ.get('MEROSS_EMAIL')
PASSWORD = os.environ.get('MEROSS_PASSWORD')
class TestHttpMethods(unittest.TestCase):
def setUp(self):
self.client = MerossHttpClient(email=EMAIL, password=PASSWORD)
def test_device_listing(self):
devices = self.client.list_devices()
assert devices is not None
assert len(devices) > 0
def test_supported_device_listing(self):
devices = self.client.list_supported_devices()
assert devices is not None
assert len(devices) > 0
class TestMSS210Test(unittest.TestCase):
def setUp(self):
httpHandler = MerossHttpClient(email=EMAIL, password=PASSWORD)
# Retrieves the list of supported devices
devices = httpHandler.list_supported_devices()
for counter, device in enumerate(devices):
if (device._type == 'mss210'):
self.device = device
break
def test_power_cycle(self):
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
def test_get_info(self):
state = self.device.get_status()
assert state is not None
wifi_list = self.device.get_wifi_list()
assert wifi_list is not None
trace = self.device.get_trace()
assert trace is not None
debug = self.device.get_debug()
assert debug is not None
class TestMSS310Test(unittest.TestCase):
def setUp(self):
httpHandler = MerossHttpClient(email=EMAIL, password=PASSWORD)
# Retrieves the list of supported devices
devices = httpHandler.list_supported_devices()
for counter, device in enumerate(devices):
if (device._type == 'mss310'):
self.device = device
break
def test_power_cycle(self):
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
def test_get_info(self):
consumption = self.device.get_power_consumption()
assert consumption is not None
wifi_list = self.device.get_wifi_list()
assert wifi_list is not None
trace = self.device.get_trace()
assert trace is not None
debug = self.device.get_debug()
assert debug is not None
abilities = self.device.get_abilities()
assert abilities is not None
electricity = self.device.get_electricity()
assert electricity is not None
class TestMSS425ETest(unittest.TestCase):
def setUp(self):
httpHandler = MerossHttpClient(email=EMAIL, password=PASSWORD)
# Retrieves the list of supported devices
devices = httpHandler.list_supported_devices()
for counter, device in enumerate(devices):
if (device._type == 'mss425e'):
self.device = device
break
def test_power_cycle(self):
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
def test_usb(self):
self.device.enable_usb()
time.sleep(2)
self.assertTrue(self.device.get_usb_status())
self.device.enable_usb()
time.sleep(2)
self.assertTrue(self.device.get_usb_status())
def test_channels(self):
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
# Test each channel one by one
for c in self.device.get_channels():
self.device.turn_on_channel(c)
time.sleep(2)
self.assertTrue(self.device.get_channel_status(c))
time.sleep(2)
self.device.turn_off_channel(c)
time.sleep(2)
self.assertFalse(self.device.get_channel_status(c))
def test_get_info(self):
state = self.device.get_status()
assert state is not None
wifi_list = self.device.get_wifi_list()
assert wifi_list is not None
trace = self.device.get_trace()
assert trace is not None
debug = self.device.get_debug()
assert debug is not None
class TestMSS530HTest(unittest.TestCase):
def setUp(self):
httpHandler = MerossHttpClient(email=EMAIL, password=PASSWORD)
# Retrieves the list of supported devices
devices = httpHandler.list_supported_devices()
for counter, device in enumerate(devices):
if device._type == 'mss530h':
self.device = device
break
def test_power_cycle(self):
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
def test_get_info(self):
state = self.device.get_status()
assert state is not None
wifi_list = self.device.get_wifi_list()
assert wifi_list is not None
trace = self.device.get_trace()
assert trace is not None
debug = self.device.get_debug()
assert debug is not None | 0.512693 | 0.37711 |
import os.path
import re
def validfilename(filename, fullpath=False, posixchars=False, iso9660=False,
posixlenght=False, msdoslenght=False, lenghterror=False):
r"""
Remove all invalid characters from a file or folder name and check its
validity on Linux, Microsoft Windows, Microsoft MS-DOS and Apple Macintosh.
Remove:
- All characters <= 31 on ASCII table (Linux, Windows, Macintosh).\n
- Following special characters: "\", "/", ":", "*", "?", '"', ">", "<" and
"|" (Windows).
- " " on start and end of names.
- "." on end of names (Windows).
- "-" on start of names (Linux).
Check also for Windows/MS-DOS reserved names:
"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4","COM5", "COM6",
"COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6",
"LPT7", "LPT8", "LPT9".
Parameters
----------
filename : str
File or folder name or path (see "fullpath" parameter).
fullpath : bool, optional
Set to "True" if "filename" contain full path. Set to "False"
if "filename" contain only file or folder name to check.
posixchars : bool, optional
If "True", remove all unauthorized characters with POSIX specification.
With this, only alphanumeric, ".", "-" and "_" are authorized.
iso9660 : bool, optional
If "True", remove all "-" that are incompatible with ISO9660 level 1
optic disk formatting.
posixlenght : bool, optional
If "True", check if length is greater than 14.
msdoslenght : bool, optional
If "True", check if length is greater than 8 for name and 3 for
extension.
lenghterror : bool, optional
If "True", raise error if length is invalid, else, truncate filename.
Return
-------
out : str
Fixed filename.
"""
# Split directory and name
if fullpath:
directory, filename = os.path.split(filename)
else:
directory = ""
# Remove invalid characters
if posixchars:
# Remove POSIX invalid characters
validname = re.sub("[^a-zA-Z0-9_.-]", "", filename)
else:
# Remove Windows and ASCII<31 invalid characters
validname = ""
for char in filename:
if not (char in '\/:*?"><|') and ord(char) > 31:
validname += char
if iso9660:
# Remove '-' for ISO9660
validname = re.sub("[-]", "", validname)
# Remove ending and starting characters that can generate OS errors
def checkendstart(string):
"""- ' ', '.' on end, '-' on start"""
prevlen = 0
while len(string) != prevlen:
prevlen = len(string)
# Remove spaces on start and end
string = string.strip()
# Remove '.' on end
string = string.rstrip('.')
# Remove '-' on start
string = string.lstrip('-')
return string
validname = checkendstart(validname)
# Check if filename is not empty
if not validname:
raise ValueError('All characters in filename are invalid')
# Check MS-DOS length
if msdoslenght:
base, ext = os.path.splitext(validname)
if len(base) > 8:
if lenghterror:
raise ValueError('Filename too long for MS-DOS (8 characters)')
else:
# Truncate basename
validname = base[:8]
if len(ext) > 4:
if lenghterror:
raise ValueError('Extension too long for MS-DOS '
'(3 characters)')
else:
# Truncate extension
validname += ext[:4]
validname = checkendstart(validname)
# Check POSIX length
if posixlenght and len(validname) > 14:
if lenghterror:
# Raise error
raise ValueError('Filename too long for POSIX (14 characters)')
else:
# Truncate name
validname = checkendstart(validname[:14])
# Check Windows/MS-DOS reserved name:
if validname in ('CON', 'PRN', 'AUX', 'NUL', 'COM1', 'COM2', 'COM3',
'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9', 'LPT1',
'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8',
'LPT9'):
raise ValueError("Filename is a Windows/MS-DOS reserved name")
# Return valid filename
if directory:
validname = os.path.join(directory, validname)
return validname | skio/system.py |
import os.path
import re
def validfilename(filename, fullpath=False, posixchars=False, iso9660=False,
posixlenght=False, msdoslenght=False, lenghterror=False):
r"""
Remove all invalid characters from a file or folder name and check its
validity on Linux, Microsoft Windows, Microsoft MS-DOS and Apple Macintosh.
Remove:
- All characters <= 31 on ASCII table (Linux, Windows, Macintosh).\n
- Following special characters: "\", "/", ":", "*", "?", '"', ">", "<" and
"|" (Windows).
- " " on start and end of names.
- "." on end of names (Windows).
- "-" on start of names (Linux).
Check also for Windows/MS-DOS reserved names:
"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4","COM5", "COM6",
"COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6",
"LPT7", "LPT8", "LPT9".
Parameters
----------
filename : str
File or folder name or path (see "fullpath" parameter).
fullpath : bool, optional
Set to "True" if "filename" contain full path. Set to "False"
if "filename" contain only file or folder name to check.
posixchars : bool, optional
If "True", remove all unauthorized characters with POSIX specification.
With this, only alphanumeric, ".", "-" and "_" are authorized.
iso9660 : bool, optional
If "True", remove all "-" that are incompatible with ISO9660 level 1
optic disk formatting.
posixlenght : bool, optional
If "True", check if length is greater than 14.
msdoslenght : bool, optional
If "True", check if length is greater than 8 for name and 3 for
extension.
lenghterror : bool, optional
If "True", raise error if length is invalid, else, truncate filename.
Return
-------
out : str
Fixed filename.
"""
# Split directory and name
if fullpath:
directory, filename = os.path.split(filename)
else:
directory = ""
# Remove invalid characters
if posixchars:
# Remove POSIX invalid characters
validname = re.sub("[^a-zA-Z0-9_.-]", "", filename)
else:
# Remove Windows and ASCII<31 invalid characters
validname = ""
for char in filename:
if not (char in '\/:*?"><|') and ord(char) > 31:
validname += char
if iso9660:
# Remove '-' for ISO9660
validname = re.sub("[-]", "", validname)
# Remove ending and starting characters that can generate OS errors
def checkendstart(string):
"""- ' ', '.' on end, '-' on start"""
prevlen = 0
while len(string) != prevlen:
prevlen = len(string)
# Remove spaces on start and end
string = string.strip()
# Remove '.' on end
string = string.rstrip('.')
# Remove '-' on start
string = string.lstrip('-')
return string
validname = checkendstart(validname)
# Check if filename is not empty
if not validname:
raise ValueError('All characters in filename are invalid')
# Check MS-DOS length
if msdoslenght:
base, ext = os.path.splitext(validname)
if len(base) > 8:
if lenghterror:
raise ValueError('Filename too long for MS-DOS (8 characters)')
else:
# Truncate basename
validname = base[:8]
if len(ext) > 4:
if lenghterror:
raise ValueError('Extension too long for MS-DOS '
'(3 characters)')
else:
# Truncate extension
validname += ext[:4]
validname = checkendstart(validname)
# Check POSIX length
if posixlenght and len(validname) > 14:
if lenghterror:
# Raise error
raise ValueError('Filename too long for POSIX (14 characters)')
else:
# Truncate name
validname = checkendstart(validname[:14])
# Check Windows/MS-DOS reserved name:
if validname in ('CON', 'PRN', 'AUX', 'NUL', 'COM1', 'COM2', 'COM3',
'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9', 'LPT1',
'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8',
'LPT9'):
raise ValueError("Filename is a Windows/MS-DOS reserved name")
# Return valid filename
if directory:
validname = os.path.join(directory, validname)
return validname | 0.526586 | 0.2641 |
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
from skimage import color
from skimage import img_as_ubyte, img_as_float
from skimage.transform import rescale
from skimage.segmentation import slic
from skimage.future.graph import cut_normalized
from skimage.future.graph import rag_mean_color
from sklearn.feature_extraction.image import img_to_graph
from sklearn.cluster import spectral_clustering
from sklearn.cluster import SpectralClustering
from sklearn.cluster import Birch
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
def normalised_cut_clustering(im):
# [0-1] -> [0,255]
im = img_as_ubyte(im)
# region adjacency graph
im_labels_rag = slic(im, n_segments=500, compactness=30)
# normalized cut
g = rag_mean_color(im, im_labels_rag, mode='similarity')
im_labels_nc = cut_normalized(im_labels_rag, g, num_cuts=3)
# labeling
im_labels_rac = color.label2rgb(im_labels_rag, im, kind='avg')
im_labels_nc = color.label2rgb(im_labels_nc, im, kind='avg')
return im_labels_rac, im_labels_nc
def spectral_graph_clustering(im, scale=0.1):
# resize it to 10% of the original size to speed up the processing
im_s = rescale(im, scale, anti_aliasing=False)
# convert the image into a graph with the value of the gradient on the edges
graph = img_to_graph(im_s)
# define decreasing function of the gradient
beta = 10
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# clustering
im_labels = spectral_clustering(graph, n_clusters=2, assign_labels='discretize', random_state=1)
# resize back
im_labels = im_labels.reshape(im_s.shape)
return im_labels
def spectral_graph_clustering_new(im, scale=0.1):
# resize it to 10% of the original size to speed up the processing
im_s = rescale(im, scale, anti_aliasing=False)
# convert the image into a graph with the value of the gradient on the edges
graph = img_to_graph(im_s)
# define decreasing function of the gradient
beta = 10
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# model
model = SpectralClustering(n_clusters=2,
affinity='precomputed',
assign_labels='discretize',
random_state=1)
# clustering
labels = model.fit_predict(graph)
im_labels_s = labels.reshape(im_s.shape)
im_labels = im_labels_s.reshape(im_s.shape)
return im_labels
def spectral_nn_clustering(im, scale=0.1):
# resize it to 10% of the original size to speed up the processing
im_s = rescale(im, scale, anti_aliasing=False)
# reshape
x, y = im_s.shape
v = im_s.reshape(x*y, 1)
# model
model = SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity='nearest_neighbors')
# clustering
labels = model.fit_predict(v)
# reshape back
im_labels_s = labels.reshape(im_s.shape)
im_labels = im_labels_s.reshape(im_s.shape)
return im_labels
def agglomerative_graph_clustering(im, scale=0.1):
# resize it to 10% of the original size to speed up the processing
im_s = rescale(im, scale, anti_aliasing=False)
# reshape
x, y = im_s.shape
v = im_s.reshape(x*y, 1)
# connectivity matrix for structured Ward
conn = kneighbors_graph(v, n_neighbors=10, include_self=False)
# make connectivity symmetric
conn = 0.5 * (conn + conn.T)
# model
model = AgglomerativeClustering(n_clusters=2,
linkage='ward',
connectivity=conn)
# clustering
labels = model.fit_predict(v)
# reshape back
im_labels_s = labels.reshape(im_s.shape)
im_labels = im_labels_s.reshape(im_s.shape)
return im_labels
def birch_clustering(im, scale=0.1):
# resize it to 10% of the original size to speed up the processing
im_s = rescale(im, scale, anti_aliasing=False)
# reshape
x, y = im_s.shape
v = im_s.reshape(x*y, 1)
# model
model = Birch(n_clusters=2, threshold=0.1)
# clustering
labels = model.fit_predict(v)
# reshape back
im_labels_s = labels.reshape(im_s.shape)
im_labels = im_labels_s.reshape(im_s.shape)
return im_labels
def dbscan_clustering(im, scale=0.1):
# resize it to 10% of the original size to speed up the processing
im_s = rescale(im, scale, anti_aliasing=False)
# reshape
x, y = im_s.shape
v = im_s.reshape(x*y, 1)
# model
model = DBSCAN(eps=0.05, min_samples=100, metric='euclidean')
# clustering
labels = model.fit_predict(v)
# reshape back
im_labels_s = labels.reshape(im_s.shape)
im_labels = im_labels_s.reshape(im_s.shape)
return im_labels
def display_segm(ims, titles, num_cols=4):
num_rows = np.math.ceil((len(titles)) / num_cols)
fig, ax = plt.subplots(num_rows, num_cols, sharex=True, sharey=True)
ax = ax.ravel()
for i in range(0, len(titles)):
if i==0:
ax[i].imshow(im, cmap='gray')
else:
ax[i].imshow(ims[i], cmap='jet')
ax[i].set_title(titles[i])
ax[i].axis('off')
ax[i].autoscale(tight=True)
for i in range(len(titles), num_rows * num_cols):
fig.delaxes(ax[i])
#fig.tight_layout()
plt.show()
if __name__ == '__main__':
# lists
ims = []
titles = []
# load image
filename = './im/cell2d.png'
im = io.imread(filename)
im = img_as_float(im)
im = rescale(im, 0.2, anti_aliasing=False)
ims.append(im)
titles.append('image')
# auto semi-supervised image clustering
_, im_labels = normalised_cut_clustering(im)
ims.append(im_labels)
titles.append('normalised_cut_clustering')
im_labels = spectral_graph_clustering(im, scale=1)
ims.append(im_labels)
titles.append('spectral_graph_clustering')
im_labels = spectral_graph_clustering_new(im, scale=1)
ims.append(im_labels)
titles.append('spectral_graph_clustering_new')
im_labels = spectral_nn_clustering(im, scale=1)
ims.append(im_labels)
titles.append('spectral_nn_clustering')
im_labels = agglomerative_graph_clustering(im, scale=1)
ims.append(im_labels)
titles.append('agglomerative_graph_clustering')
im_labels = birch_clustering(im, scale=1)
ims.append(im_labels)
titles.append('birch_clustering')
im_labels = dbscan_clustering(im, scale=1)
ims.append(im_labels)
titles.append('dbscan_clustering')
# plot
display_segm(ims, titles) | image_clustering_2d.py | import numpy as np
import matplotlib.pyplot as plt
from skimage import io
from skimage import color
from skimage import img_as_ubyte, img_as_float
from skimage.transform import rescale
from skimage.segmentation import slic
from skimage.future.graph import cut_normalized
from skimage.future.graph import rag_mean_color
from sklearn.feature_extraction.image import img_to_graph
from sklearn.cluster import spectral_clustering
from sklearn.cluster import SpectralClustering
from sklearn.cluster import Birch
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
def normalised_cut_clustering(im):
# [0-1] -> [0,255]
im = img_as_ubyte(im)
# region adjacency graph
im_labels_rag = slic(im, n_segments=500, compactness=30)
# normalized cut
g = rag_mean_color(im, im_labels_rag, mode='similarity')
im_labels_nc = cut_normalized(im_labels_rag, g, num_cuts=3)
# labeling
im_labels_rac = color.label2rgb(im_labels_rag, im, kind='avg')
im_labels_nc = color.label2rgb(im_labels_nc, im, kind='avg')
return im_labels_rac, im_labels_nc
def spectral_graph_clustering(im, scale=0.1):
# resize it to 10% of the original size to speed up the processing
im_s = rescale(im, scale, anti_aliasing=False)
# convert the image into a graph with the value of the gradient on the edges
graph = img_to_graph(im_s)
# define decreasing function of the gradient
beta = 10
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# clustering
im_labels = spectral_clustering(graph, n_clusters=2, assign_labels='discretize', random_state=1)
# resize back
im_labels = im_labels.reshape(im_s.shape)
return im_labels
def spectral_graph_clustering_new(im, scale=0.1):
# resize it to 10% of the original size to speed up the processing
im_s = rescale(im, scale, anti_aliasing=False)
# convert the image into a graph with the value of the gradient on the edges
graph = img_to_graph(im_s)
# define decreasing function of the gradient
beta = 10
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# model
model = SpectralClustering(n_clusters=2,
affinity='precomputed',
assign_labels='discretize',
random_state=1)
# clustering
labels = model.fit_predict(graph)
im_labels_s = labels.reshape(im_s.shape)
im_labels = im_labels_s.reshape(im_s.shape)
return im_labels
def spectral_nn_clustering(im, scale=0.1):
# resize it to 10% of the original size to speed up the processing
im_s = rescale(im, scale, anti_aliasing=False)
# reshape
x, y = im_s.shape
v = im_s.reshape(x*y, 1)
# model
model = SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity='nearest_neighbors')
# clustering
labels = model.fit_predict(v)
# reshape back
im_labels_s = labels.reshape(im_s.shape)
im_labels = im_labels_s.reshape(im_s.shape)
return im_labels
def agglomerative_graph_clustering(im, scale=0.1):
# resize it to 10% of the original size to speed up the processing
im_s = rescale(im, scale, anti_aliasing=False)
# reshape
x, y = im_s.shape
v = im_s.reshape(x*y, 1)
# connectivity matrix for structured Ward
conn = kneighbors_graph(v, n_neighbors=10, include_self=False)
# make connectivity symmetric
conn = 0.5 * (conn + conn.T)
# model
model = AgglomerativeClustering(n_clusters=2,
linkage='ward',
connectivity=conn)
# clustering
labels = model.fit_predict(v)
# reshape back
im_labels_s = labels.reshape(im_s.shape)
im_labels = im_labels_s.reshape(im_s.shape)
return im_labels
def birch_clustering(im, scale=0.1):
# resize it to 10% of the original size to speed up the processing
im_s = rescale(im, scale, anti_aliasing=False)
# reshape
x, y = im_s.shape
v = im_s.reshape(x*y, 1)
# model
model = Birch(n_clusters=2, threshold=0.1)
# clustering
labels = model.fit_predict(v)
# reshape back
im_labels_s = labels.reshape(im_s.shape)
im_labels = im_labels_s.reshape(im_s.shape)
return im_labels
def dbscan_clustering(im, scale=0.1):
# resize it to 10% of the original size to speed up the processing
im_s = rescale(im, scale, anti_aliasing=False)
# reshape
x, y = im_s.shape
v = im_s.reshape(x*y, 1)
# model
model = DBSCAN(eps=0.05, min_samples=100, metric='euclidean')
# clustering
labels = model.fit_predict(v)
# reshape back
im_labels_s = labels.reshape(im_s.shape)
im_labels = im_labels_s.reshape(im_s.shape)
return im_labels
def display_segm(ims, titles, num_cols=4):
num_rows = np.math.ceil((len(titles)) / num_cols)
fig, ax = plt.subplots(num_rows, num_cols, sharex=True, sharey=True)
ax = ax.ravel()
for i in range(0, len(titles)):
if i==0:
ax[i].imshow(im, cmap='gray')
else:
ax[i].imshow(ims[i], cmap='jet')
ax[i].set_title(titles[i])
ax[i].axis('off')
ax[i].autoscale(tight=True)
for i in range(len(titles), num_rows * num_cols):
fig.delaxes(ax[i])
#fig.tight_layout()
plt.show()
if __name__ == '__main__':
# lists
ims = []
titles = []
# load image
filename = './im/cell2d.png'
im = io.imread(filename)
im = img_as_float(im)
im = rescale(im, 0.2, anti_aliasing=False)
ims.append(im)
titles.append('image')
# auto semi-supervised image clustering
_, im_labels = normalised_cut_clustering(im)
ims.append(im_labels)
titles.append('normalised_cut_clustering')
im_labels = spectral_graph_clustering(im, scale=1)
ims.append(im_labels)
titles.append('spectral_graph_clustering')
im_labels = spectral_graph_clustering_new(im, scale=1)
ims.append(im_labels)
titles.append('spectral_graph_clustering_new')
im_labels = spectral_nn_clustering(im, scale=1)
ims.append(im_labels)
titles.append('spectral_nn_clustering')
im_labels = agglomerative_graph_clustering(im, scale=1)
ims.append(im_labels)
titles.append('agglomerative_graph_clustering')
im_labels = birch_clustering(im, scale=1)
ims.append(im_labels)
titles.append('birch_clustering')
im_labels = dbscan_clustering(im, scale=1)
ims.append(im_labels)
titles.append('dbscan_clustering')
# plot
display_segm(ims, titles) | 0.731251 | 0.491456 |
import os
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
DESCRIPTION = "Somecode Twitter Science and Research Platform"
LONG_DESCRIPTION = """\
SOMECODE is a research platform for serious observation and analysis
of Twitter data. SOMECODE brings together 9 years of unbroken continuity
in developing social media research tools. Previous tools and processes
developed by the contributor team are in daily use by many FORTUNE100
companies and major advertising agencies. SOMECODE is the solution we
always wanted to build, but due to the kinds of restraints commercial
entities have, never got to.
"""
DISTNAME = 'somecode'
MAINTAINER = '<NAME>'
MAINTAINER_EMAIL = '<EMAIL>'
URL = 'http://botlab.io'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/S0MEC0DE/'
VERSION = '0.9.9'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
def check_dependencies():
install_requires = []
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import seaborn
except ImportError:
install_requires.append('seaborn')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import pandas
except ImportError:
install_requires.append('pandas')
try:
import nltk
except ImportError:
install_requires.append('nltk')
try:
import tweepy
except ImportError:
install_requires.append('tweepy')
try:
import twython
except ImportError:
install_requires.append('twython')
try:
import IPython
except ImportError:
install_requires.append('IPython')
install_requires.append('python-tk')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['somecode'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
) | setup.py | import os
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
DESCRIPTION = "Somecode Twitter Science and Research Platform"
LONG_DESCRIPTION = """\
SOMECODE is a research platform for serious observation and analysis
of Twitter data. SOMECODE brings together 9 years of unbroken continuity
in developing social media research tools. Previous tools and processes
developed by the contributor team are in daily use by many FORTUNE100
companies and major advertising agencies. SOMECODE is the solution we
always wanted to build, but due to the kinds of restraints commercial
entities have, never got to.
"""
DISTNAME = 'somecode'
MAINTAINER = '<NAME>'
MAINTAINER_EMAIL = '<EMAIL>'
URL = 'http://botlab.io'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/S0MEC0DE/'
VERSION = '0.9.9'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
def check_dependencies():
install_requires = []
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import seaborn
except ImportError:
install_requires.append('seaborn')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import pandas
except ImportError:
install_requires.append('pandas')
try:
import nltk
except ImportError:
install_requires.append('nltk')
try:
import tweepy
except ImportError:
install_requires.append('tweepy')
try:
import twython
except ImportError:
install_requires.append('twython')
try:
import IPython
except ImportError:
install_requires.append('IPython')
install_requires.append('python-tk')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['somecode'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
) | 0.199152 | 0.16455 |
import xlwt
import re
from structure import TABLE, COLUMN, DRIVER
class ReTableDriver(DRIVER):
def parse(self, s):
comment, table_name, columns, engine, charset = "", "None", [], "Default", "Default"
primary_keys, foreign_keys, index_keys, unique_keys = [], [], [], []
# 提取表信息
res = re.search(r'(/\*.*\*/)?\s*CREATE TABLE(?: IF NOT EXISTS)?\s*(\w+)\s*\(.*\)', s, re.I|re.S)
if res:
comment, table_name = res.groups()
engine, charset = self.pick_value(s, ['ENGINE', 'DEFAULT CHARSET'])
res = re.search(r'(?:/\*.*\*/)?\s*CREATE TABLE(?: IF NOT EXISTS)?\s*\w+\s*\((.*)\)', s, re.I|re.S)
# 构建列信息
if res:
sentences = res.group(1).split('\n') # Do not use re.split() here!
for i in sentences: # every sentence only contains no more than 1 blank as devider
if len(i)<=0:
continue
res_pri = re.search(r'PRIMARY KEY ?\( *(\w+) *\)', i, re.I)
res_for = re.search(r'FOREIGN KEY ?\( *(\w+) *\) ?REFERENCES (\w+) ?\( *(\w+) *\)', i, re.I)
res_index = re.search(r'INDEX \w* *\( *(\w+) *\)', i, re.I)
res_unique = re.search(r'^ ?UNIQUE[\w ]*\( *([\w+, ]+) *\)', i, re.I)
if res_pri:
primary_keys.append(res_pri.groups())
elif res_for:
foreign_keys.append(res_for.groups())
elif res_index:
index_keys.append(res_index.groups())
elif res_unique:
unique_keys.extend(re.sub(r' *', '', res_unique.group(1)).split(','))
unique_keys = list(set(unique_keys)) # 去重
else:
columns.append(COLUMN(i, ReCulomnDriver()))
# 构建键约束
for i in primary_keys:
for c in columns:
if(c.column_name == i[0]):
c.key_constraint = 'PRI'
for i in foreign_keys:
for c in columns:
if(c.column_name == i[0]):
c.key_constraint += '; FOR %s.%s' % (i[1], i[2])
for i in index_keys:
for c in columns:
if(c.column_name == i[0]):
c.key_constraint += '; INDEX'
for i in unique_keys:
for c in columns:
if(c.column_name == i):
c.key_constraint += '; CO-UNIQUE'
c.desc += '; UNIQUE%s' % unique_keys
return comment, table_name, columns, engine, charset
def pick_value(self, s, keys):
# 按键值对获取信息,如 a=c,将返回c
values = []
for k in keys:
value, res = "Default", re.search(r''+k+' *= *(\w+)', s, re.I)
if res:
value = res.group(1)
values.append(value)
return tuple(values)
class ReCulomnDriver(DRIVER):
def parse(self, s):
title, column_name, key_constraint, key_type, default_value, not_null, desc = "/", "/", "", "/", "", "YES", "/"
# 提取列基本信息
res = re.search(r'/\*(.*)\*/\s*(\w+) ([\w\(\)]+).*/\*(.*)\*/', s, re.I)
if res:
title, column_name, key_type, desc = res.groups()
# 查询默认值或枚举值 r'DEFAULT (\d|(?:\'.*?\'))' || r'ENUM ?\( ?(.*?) ?\)'
# res = re.search(r'DEFAULT (\d|(?:\'.*?\'))|ENUM ?\( ?(.*?) ?\)', s, re.I)
res = re.search(r'DEFAULT (\d|(?:\'.*?\'))', s, re.I)
if res:
default_value = res.group(1)
res = re.search(r'ENUM ?(\(.*?\))', s, re.I)
if res:
key_type += res.group(1)
# 是否可空
not_null = "YES" if re.search(r'NOT NULL', s, re.I) is not None or default_value != "" else "NO"
# 查询是否为UNIQUE键值
key_constraint = "UNI" if re.search(r'UNIQUE', s, re.I) is not None else ""
# 查询是否为无符号值
key_type += " UNSIGNED" if re.search(r'UNSIGNED', s, re.I) is not None else ""
# 是否自增,在备注中标出
desc += "; AUTO_INC" if re.search(r'AUTO_INCREMENT', s, re.I) is not None else ""
return title, column_name, key_constraint, key_type, default_value, not_null, desc
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--path', default=None, type=str)
parser.add_argument('--charset', default="utf-8", type=str)
parser.add_argument('--output', default="./output.xls", type=str)
parser.add_argument('--debug', default=False, type=bool)
args = parser.parse_args()
print(args)
tables = []
with open(args.path, encoding=args.charset) as file:
sentences = re.sub(r'[ \t\r\f]+',' ', file.read()).split(";") # remove multiple blanks
for i in sentences:
if len(i)>0:
tables.append(TABLE(i, ReTableDriver()))
xls = xlwt.Workbook()
for t in tables:
# 控制台输出测试信息
if args.debug:
print(t)
for c in t.columns:
print(c)
print()
t.output(xls)
xls.save(args.output)
print('Success!') | src/dbvd.py | import xlwt
import re
from structure import TABLE, COLUMN, DRIVER
class ReTableDriver(DRIVER):
def parse(self, s):
comment, table_name, columns, engine, charset = "", "None", [], "Default", "Default"
primary_keys, foreign_keys, index_keys, unique_keys = [], [], [], []
# 提取表信息
res = re.search(r'(/\*.*\*/)?\s*CREATE TABLE(?: IF NOT EXISTS)?\s*(\w+)\s*\(.*\)', s, re.I|re.S)
if res:
comment, table_name = res.groups()
engine, charset = self.pick_value(s, ['ENGINE', 'DEFAULT CHARSET'])
res = re.search(r'(?:/\*.*\*/)?\s*CREATE TABLE(?: IF NOT EXISTS)?\s*\w+\s*\((.*)\)', s, re.I|re.S)
# 构建列信息
if res:
sentences = res.group(1).split('\n') # Do not use re.split() here!
for i in sentences: # every sentence only contains no more than 1 blank as devider
if len(i)<=0:
continue
res_pri = re.search(r'PRIMARY KEY ?\( *(\w+) *\)', i, re.I)
res_for = re.search(r'FOREIGN KEY ?\( *(\w+) *\) ?REFERENCES (\w+) ?\( *(\w+) *\)', i, re.I)
res_index = re.search(r'INDEX \w* *\( *(\w+) *\)', i, re.I)
res_unique = re.search(r'^ ?UNIQUE[\w ]*\( *([\w+, ]+) *\)', i, re.I)
if res_pri:
primary_keys.append(res_pri.groups())
elif res_for:
foreign_keys.append(res_for.groups())
elif res_index:
index_keys.append(res_index.groups())
elif res_unique:
unique_keys.extend(re.sub(r' *', '', res_unique.group(1)).split(','))
unique_keys = list(set(unique_keys)) # 去重
else:
columns.append(COLUMN(i, ReCulomnDriver()))
# 构建键约束
for i in primary_keys:
for c in columns:
if(c.column_name == i[0]):
c.key_constraint = 'PRI'
for i in foreign_keys:
for c in columns:
if(c.column_name == i[0]):
c.key_constraint += '; FOR %s.%s' % (i[1], i[2])
for i in index_keys:
for c in columns:
if(c.column_name == i[0]):
c.key_constraint += '; INDEX'
for i in unique_keys:
for c in columns:
if(c.column_name == i):
c.key_constraint += '; CO-UNIQUE'
c.desc += '; UNIQUE%s' % unique_keys
return comment, table_name, columns, engine, charset
def pick_value(self, s, keys):
# 按键值对获取信息,如 a=c,将返回c
values = []
for k in keys:
value, res = "Default", re.search(r''+k+' *= *(\w+)', s, re.I)
if res:
value = res.group(1)
values.append(value)
return tuple(values)
class ReCulomnDriver(DRIVER):
def parse(self, s):
title, column_name, key_constraint, key_type, default_value, not_null, desc = "/", "/", "", "/", "", "YES", "/"
# 提取列基本信息
res = re.search(r'/\*(.*)\*/\s*(\w+) ([\w\(\)]+).*/\*(.*)\*/', s, re.I)
if res:
title, column_name, key_type, desc = res.groups()
# 查询默认值或枚举值 r'DEFAULT (\d|(?:\'.*?\'))' || r'ENUM ?\( ?(.*?) ?\)'
# res = re.search(r'DEFAULT (\d|(?:\'.*?\'))|ENUM ?\( ?(.*?) ?\)', s, re.I)
res = re.search(r'DEFAULT (\d|(?:\'.*?\'))', s, re.I)
if res:
default_value = res.group(1)
res = re.search(r'ENUM ?(\(.*?\))', s, re.I)
if res:
key_type += res.group(1)
# 是否可空
not_null = "YES" if re.search(r'NOT NULL', s, re.I) is not None or default_value != "" else "NO"
# 查询是否为UNIQUE键值
key_constraint = "UNI" if re.search(r'UNIQUE', s, re.I) is not None else ""
# 查询是否为无符号值
key_type += " UNSIGNED" if re.search(r'UNSIGNED', s, re.I) is not None else ""
# 是否自增,在备注中标出
desc += "; AUTO_INC" if re.search(r'AUTO_INCREMENT', s, re.I) is not None else ""
return title, column_name, key_constraint, key_type, default_value, not_null, desc
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--path', default=None, type=str)
parser.add_argument('--charset', default="utf-8", type=str)
parser.add_argument('--output', default="./output.xls", type=str)
parser.add_argument('--debug', default=False, type=bool)
args = parser.parse_args()
print(args)
tables = []
with open(args.path, encoding=args.charset) as file:
sentences = re.sub(r'[ \t\r\f]+',' ', file.read()).split(";") # remove multiple blanks
for i in sentences:
if len(i)>0:
tables.append(TABLE(i, ReTableDriver()))
xls = xlwt.Workbook()
for t in tables:
# 控制台输出测试信息
if args.debug:
print(t)
for c in t.columns:
print(c)
print()
t.output(xls)
xls.save(args.output)
print('Success!') | 0.165189 | 0.095139 |
import os
import sys
sys.path.insert(0, './')
import pickle
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dim', type = int, default = 2,
help = 'the number of dimensions, default = 2')
parser.add_argument('--pts', type = int, default = 10000,
help = 'the number of points, default = 10000')
parser.add_argument('--classes', type = int, default = 10,
help = 'the number of classes, default = 10')
parser.add_argument('--out_file', type = str, default = None,
help = 'the output file')
args = parser.parse_args()
if args.out_file is None:
raise ValueError('the output file need to be specified.')
if not os.path.exists(os.path.dirname(args.out_file)):
os.makedirs(os.path.dirname(args.out_file))
# construct base points
base_pts = [np.random.uniform(low = -1., high = 1., size = [args.dim,]) for _ in range(args.classes)]
base_pts = np.array(base_pts)
print('Base points constructed!')
# construct data points
data_set = []
label_set = []
for idx in range(args.pts):
sys.stdout.write('%d / %d loaded\r' % (idx + 1, args.pts))
data_pt = np.random.uniform(low = -1., high = 1., size = [args.dim,])
distance_list = [(idx, np.linalg.norm(base_pt - data_pt) ** 2) for idx, base_pt in enumerate(base_pts)]
distance_list = sorted(distance_list, key = lambda x: x[1])
label_pt = distance_list[0][0]
data_set.append(data_pt)
label_set.append(label_pt)
data_set = np.array(data_set)
label_set = np.array(label_set, dtype = int)
print('Data points constructed!')
# calculate boundary
boundary_pts = []
for base_idx1 in range(args.classes):
for base_idx2 in range(base_idx1 + 1, args.classes):
pt1 = base_pts[base_idx1]
pt2 = base_pts[base_idx2]
mid = (pt1 + pt2) / 2.
arr = pt2 - pt1
arr = np.array([arr[1], - arr[0]]) / np.linalg.norm(arr)
min_x = (- 1. - mid[0]) / arr[0]
max_x = (1. - mid[0]) / arr[0]
min_y = (- 1. - mid[1]) / arr[1]
max_y = (1. - mid[1]) / arr[1]
_, min_idx, max_idx, _ = list(sorted([min_x, min_y, max_x, max_y]))
for idx in np.arange(min_idx, max_idx, 0.005):
pt = mid + idx * arr
boundary = True
dis = np.linalg.norm(pt - pt1)
for base_idx in range(args.classes):
if base_idx in [base_idx1, base_idx2]:
continue
dis_ = np.linalg.norm(pt - base_pts[base_idx])
if dis_ < dis:
boundary = False
break
if boundary == True:
boundary_pts.append((pt[0], pt[1]))
print('Boundary points constructed!')
pickle.dump({'data': data_set, 'label': label_set, 'base_points': base_pts,
'classes': args.classes, 'boundary': boundary_pts}, open(args.out_file, 'wb'))
print('Information dumpped in file %s' % args.out_file) | gen_syn.py | import os
import sys
sys.path.insert(0, './')
import pickle
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dim', type = int, default = 2,
help = 'the number of dimensions, default = 2')
parser.add_argument('--pts', type = int, default = 10000,
help = 'the number of points, default = 10000')
parser.add_argument('--classes', type = int, default = 10,
help = 'the number of classes, default = 10')
parser.add_argument('--out_file', type = str, default = None,
help = 'the output file')
args = parser.parse_args()
if args.out_file is None:
raise ValueError('the output file need to be specified.')
if not os.path.exists(os.path.dirname(args.out_file)):
os.makedirs(os.path.dirname(args.out_file))
# construct base points
base_pts = [np.random.uniform(low = -1., high = 1., size = [args.dim,]) for _ in range(args.classes)]
base_pts = np.array(base_pts)
print('Base points constructed!')
# construct data points
data_set = []
label_set = []
for idx in range(args.pts):
sys.stdout.write('%d / %d loaded\r' % (idx + 1, args.pts))
data_pt = np.random.uniform(low = -1., high = 1., size = [args.dim,])
distance_list = [(idx, np.linalg.norm(base_pt - data_pt) ** 2) for idx, base_pt in enumerate(base_pts)]
distance_list = sorted(distance_list, key = lambda x: x[1])
label_pt = distance_list[0][0]
data_set.append(data_pt)
label_set.append(label_pt)
data_set = np.array(data_set)
label_set = np.array(label_set, dtype = int)
print('Data points constructed!')
# calculate boundary
boundary_pts = []
for base_idx1 in range(args.classes):
for base_idx2 in range(base_idx1 + 1, args.classes):
pt1 = base_pts[base_idx1]
pt2 = base_pts[base_idx2]
mid = (pt1 + pt2) / 2.
arr = pt2 - pt1
arr = np.array([arr[1], - arr[0]]) / np.linalg.norm(arr)
min_x = (- 1. - mid[0]) / arr[0]
max_x = (1. - mid[0]) / arr[0]
min_y = (- 1. - mid[1]) / arr[1]
max_y = (1. - mid[1]) / arr[1]
_, min_idx, max_idx, _ = list(sorted([min_x, min_y, max_x, max_y]))
for idx in np.arange(min_idx, max_idx, 0.005):
pt = mid + idx * arr
boundary = True
dis = np.linalg.norm(pt - pt1)
for base_idx in range(args.classes):
if base_idx in [base_idx1, base_idx2]:
continue
dis_ = np.linalg.norm(pt - base_pts[base_idx])
if dis_ < dis:
boundary = False
break
if boundary == True:
boundary_pts.append((pt[0], pt[1]))
print('Boundary points constructed!')
pickle.dump({'data': data_set, 'label': label_set, 'base_points': base_pts,
'classes': args.classes, 'boundary': boundary_pts}, open(args.out_file, 'wb'))
print('Information dumpped in file %s' % args.out_file) | 0.221519 | 0.13569 |
# Author: <NAME>
# Description: Show Network and According Measurements
import sys, os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
## Parameters
##
config = {
"acolor": "#0336FF",
"bcolor": "#FF0266",
"awcolor": "#6ebeff",
"canvas_xrange": (+0.0, 9.0),
"canvas_yrange": (-0.5, 12.5),
"separator": "\t",
"offset": 0.1,
}
kwargs_plot = {
"linewidth" : 1.5,
"linestyle" : "solid",
"color" : "#000000",
}
kwargs_figure = {
"figsize" : (15/2.54, 10/2.54),
"dpi" : 300,
}
kwargs_font = {
# 'family' : 'normal',
# 'weight' : 'bold',
'size' : 10,
}
arrow_dict = {
"head_width" : 0.1,
"head_length" : 0.15,
"length_includes_head" : True,
"width" : 0.01,
"picker": 10,
}
matplotlib.rc('font', **kwargs_font)
dict_annotation = {
"horizontalalignment": "right",
"verticalalignment": "center",
"fontdict" : {"size" : 4},
}
states = [
(( 8,0, 8), 1.3, 0.0),
(( 9,0, 9), 1.3, 1.0),
((10,0,10), 1.3, 2.5),
((11,0,11), 1.3, 4.6),
((12,0,12), 1.3, 7.4),
((13,0,13), 1.3,10.4),
(( 8,1, 8), 3.4, 0.4),
(( 9,1, 9), 3.4, 1.4),
((10,1,10), 3.4, 2.9),
((11,1,11), 3.4, 5.0),
((12,1,12), 3.4, 7.8),
((13,1,13), 3.4,10.8),
(( 8,1, 7), 6.2, 1.1),
(( 9,1, 8), 6.2, 2.1),
((10,1, 9), 6.2, 3.6),
((11,1,10), 6.2, 5.7),
((12,1,11), 6.2, 8.5),
((13,1,12), 6.2,11.5),
(( 8,2, 7), 8.3, 1.5),
(( 9,2, 8), 8.3, 2.5),
((10,2, 9), 8.3, 4.0),
((11,2,10), 8.3, 6.1),
((12,2,11), 8.3, 8.9),
((13,2,12), 8.3,11.9),
]
transitions = [
(( 9,0, 9), ( 8,0, 8), "qR0/DR_88057.98_Pump_79430.5+79310.5_18-Jan-21-6.26.49 PM.dat"),
((10,0,10), ( 9,0, 9), "qR0/DR_88057.98_Pump_79430.5+79310.5_18-Jan-21-6.26.49 PM.dat"),
((11,0,11), (10,0,10), "qR0/DR_96635.44_Pump_88057.98+87937.98_18-Jan-21-6.28.36 PM.dat"),
((12,0,12), (11,0,11), "qR0/DR_105166.675_Pump_96635.44+96515.44_18-Jan-21-6.30.27 PM.dat"),
((13,0,13), (12,0,12), "qR0/DR_113657.775_Pump_105166.675+105046.675_18-Jan-21-6.31.21 PM.dat"),
(( 9,1, 9), ( 8,1, 8), "qR1/DR_78000_Pump_92319.2832+92199.2832_09-Apr-21-1.31.51 PM_onlyForward.dat"),
((10,1,10), ( 9,1, 9), "qR1/DR_87000_Pump_78103.9+77983.9_09-Apr-21-3.14.41 PM_onlyForward.dat"),
((11,1,11), (10,1,10), "qR1/DR_95271_Pump_86697.7253+86577.7253_09-Apr-21-3.27.02 PM_onlyForward.dat"),
((12,1,12), (11,1,11), "qR1/DR_104000_Pump_95285.3785+95165.3785_09-Apr-21-3.59.47 PM_onlyForward.dat"),
((13,1,13), (12,1,12), "qR1/DR_112800_Pump_103864.1459+103744.1459_09-Apr-21-4.27.10 PM_onlyForward.dat"),
((10,0,10), ( 9,1, 9), "pR1/DR_L1_75169.2_Pump_86697.7253+86577.7253_09-Apr-21-4.41.38 PM_onlyForward.dat"),
((11,0,11), (10,1,10), "pR1/DR_L2_85106.9147_Pump_95285.3785+95165.3785_09-Apr-21-4.42.23 PM_onlyForward.dat"),
((12,0,12), (11,1,11), "pR1/DR_L3_94988.2112_Pump_103864.1459+103744.1459_09-Apr-21-4.43.07 PM_onlyForward.dat"),
((13,0,13), (12,1,12), "pR1/DR_L4_104781.8403_Pump_112432.4613+112312.4613_09-Apr-21-4.43.52 PM_onlyForward.dat"),
(( 9,1, 9), ( 8,0, 8), "rR0/DR_92500_Pump_79430.5+79310.5_09-Apr-21-10.31.31 AM_onlyForward.dat"),
((10,1,10), ( 9,0, 9), "rR0/DR_L1_99586.5053_Pump_88057.98+87937.98_09-Apr-21-4.35.30 PM_onlyForward.dat"),
((11,1,11), (10,0,10), "rR0/DR_L2_106813.9038_Pump_96635.44+96515.44_09-Apr-21-4.37.15 PM_onlyForward.dat"),
((12,1,12), (11,0,11), "rR0/DR_L3_114042.6097_Pump_105166.675+105046.675_09-Apr-21-4.39.00 PM_onlyForward.dat"),
((13,1,13), (12,0,12), "rR0/DR_L4_121308.396_Pump_113657.775+113537.775_09-Apr-21-4.40.46 PM_onlyForward.dat"),
]
## Plotting figure
##
def main():
fig, ax = plt.subplots(1, 1, **kwargs_figure)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.axis("off")
ax.axis('equal')
ax.set_xlim(*config["canvas_xrange"])
ax.set_ylim(*config["canvas_yrange"])
for state in states:
xs = np.linspace(-0.5, +0.5, 101) + state[1]
ys = xs*0+state[2]
ax.plot(xs, ys, **kwargs_plot)
label = f"${state[0][0]}_{{{state[0][1]}, {state[0][2]}}}$"
ax.text(**dict_annotation, x=state[1]-0.6, y=state[2]+0.05, s=label)
def get_coords(state):
for tmp in states:
if state == tmp[0]:
return(tmp)
def get_color(transition):
delta_J = transition[0][0] - transition[1][0]
delta_Ka = transition[0][1] - transition[1][1]
delta_Kc = transition[0][2] - transition[1][2]
if delta_J == 1 and delta_Ka == 0 and delta_Kc != 0:
return(config["acolor"])
elif delta_J == 0 and delta_Ka == 0 and delta_Kc != 0:
return(config["awcolor"])
else:
return(config["bcolor"])
def get_offset(start, end):
if start > end:
return -config["offset"]
elif end > start:
return +config["offset"]
else:
return 0
arrows = {}
for transition in transitions:
_, x1, y1 = get_coords(transition[0])
_, x2, y2 = get_coords(transition[1])
color = get_color(transition)
xoffset = get_offset(x2, x1)
yoffset = get_offset(y2, y1)
arrow = ax.arrow(x2+xoffset, y2+yoffset, x1-x2-2*xoffset, y1-y2-2*yoffset, color=color, **arrow_dict)
arrows[arrow] = transition
def onclick(event):
transition = arrows[event.artist]
fig, ax = plt.subplots()
data = np.genfromtxt(transition[2], delimiter=config["separator"])
xs = data[:, 0]
ys = data[:, 1]
ax.plot(xs, ys)
fig.show()
cid = fig.canvas.mpl_connect('pick_event', onclick)
fig.tight_layout()
plt.show()
if __name__ == "__main__":
main() | NetworkViewer/network.py |
# Author: <NAME>
# Description: Show Network and According Measurements
import sys, os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
## Parameters
##
config = {
"acolor": "#0336FF",
"bcolor": "#FF0266",
"awcolor": "#6ebeff",
"canvas_xrange": (+0.0, 9.0),
"canvas_yrange": (-0.5, 12.5),
"separator": "\t",
"offset": 0.1,
}
kwargs_plot = {
"linewidth" : 1.5,
"linestyle" : "solid",
"color" : "#000000",
}
kwargs_figure = {
"figsize" : (15/2.54, 10/2.54),
"dpi" : 300,
}
kwargs_font = {
# 'family' : 'normal',
# 'weight' : 'bold',
'size' : 10,
}
arrow_dict = {
"head_width" : 0.1,
"head_length" : 0.15,
"length_includes_head" : True,
"width" : 0.01,
"picker": 10,
}
matplotlib.rc('font', **kwargs_font)
dict_annotation = {
"horizontalalignment": "right",
"verticalalignment": "center",
"fontdict" : {"size" : 4},
}
states = [
(( 8,0, 8), 1.3, 0.0),
(( 9,0, 9), 1.3, 1.0),
((10,0,10), 1.3, 2.5),
((11,0,11), 1.3, 4.6),
((12,0,12), 1.3, 7.4),
((13,0,13), 1.3,10.4),
(( 8,1, 8), 3.4, 0.4),
(( 9,1, 9), 3.4, 1.4),
((10,1,10), 3.4, 2.9),
((11,1,11), 3.4, 5.0),
((12,1,12), 3.4, 7.8),
((13,1,13), 3.4,10.8),
(( 8,1, 7), 6.2, 1.1),
(( 9,1, 8), 6.2, 2.1),
((10,1, 9), 6.2, 3.6),
((11,1,10), 6.2, 5.7),
((12,1,11), 6.2, 8.5),
((13,1,12), 6.2,11.5),
(( 8,2, 7), 8.3, 1.5),
(( 9,2, 8), 8.3, 2.5),
((10,2, 9), 8.3, 4.0),
((11,2,10), 8.3, 6.1),
((12,2,11), 8.3, 8.9),
((13,2,12), 8.3,11.9),
]
transitions = [
(( 9,0, 9), ( 8,0, 8), "qR0/DR_88057.98_Pump_79430.5+79310.5_18-Jan-21-6.26.49 PM.dat"),
((10,0,10), ( 9,0, 9), "qR0/DR_88057.98_Pump_79430.5+79310.5_18-Jan-21-6.26.49 PM.dat"),
((11,0,11), (10,0,10), "qR0/DR_96635.44_Pump_88057.98+87937.98_18-Jan-21-6.28.36 PM.dat"),
((12,0,12), (11,0,11), "qR0/DR_105166.675_Pump_96635.44+96515.44_18-Jan-21-6.30.27 PM.dat"),
((13,0,13), (12,0,12), "qR0/DR_113657.775_Pump_105166.675+105046.675_18-Jan-21-6.31.21 PM.dat"),
(( 9,1, 9), ( 8,1, 8), "qR1/DR_78000_Pump_92319.2832+92199.2832_09-Apr-21-1.31.51 PM_onlyForward.dat"),
((10,1,10), ( 9,1, 9), "qR1/DR_87000_Pump_78103.9+77983.9_09-Apr-21-3.14.41 PM_onlyForward.dat"),
((11,1,11), (10,1,10), "qR1/DR_95271_Pump_86697.7253+86577.7253_09-Apr-21-3.27.02 PM_onlyForward.dat"),
((12,1,12), (11,1,11), "qR1/DR_104000_Pump_95285.3785+95165.3785_09-Apr-21-3.59.47 PM_onlyForward.dat"),
((13,1,13), (12,1,12), "qR1/DR_112800_Pump_103864.1459+103744.1459_09-Apr-21-4.27.10 PM_onlyForward.dat"),
((10,0,10), ( 9,1, 9), "pR1/DR_L1_75169.2_Pump_86697.7253+86577.7253_09-Apr-21-4.41.38 PM_onlyForward.dat"),
((11,0,11), (10,1,10), "pR1/DR_L2_85106.9147_Pump_95285.3785+95165.3785_09-Apr-21-4.42.23 PM_onlyForward.dat"),
((12,0,12), (11,1,11), "pR1/DR_L3_94988.2112_Pump_103864.1459+103744.1459_09-Apr-21-4.43.07 PM_onlyForward.dat"),
((13,0,13), (12,1,12), "pR1/DR_L4_104781.8403_Pump_112432.4613+112312.4613_09-Apr-21-4.43.52 PM_onlyForward.dat"),
(( 9,1, 9), ( 8,0, 8), "rR0/DR_92500_Pump_79430.5+79310.5_09-Apr-21-10.31.31 AM_onlyForward.dat"),
((10,1,10), ( 9,0, 9), "rR0/DR_L1_99586.5053_Pump_88057.98+87937.98_09-Apr-21-4.35.30 PM_onlyForward.dat"),
((11,1,11), (10,0,10), "rR0/DR_L2_106813.9038_Pump_96635.44+96515.44_09-Apr-21-4.37.15 PM_onlyForward.dat"),
((12,1,12), (11,0,11), "rR0/DR_L3_114042.6097_Pump_105166.675+105046.675_09-Apr-21-4.39.00 PM_onlyForward.dat"),
((13,1,13), (12,0,12), "rR0/DR_L4_121308.396_Pump_113657.775+113537.775_09-Apr-21-4.40.46 PM_onlyForward.dat"),
]
## Plotting figure
##
def main():
fig, ax = plt.subplots(1, 1, **kwargs_figure)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.axis("off")
ax.axis('equal')
ax.set_xlim(*config["canvas_xrange"])
ax.set_ylim(*config["canvas_yrange"])
for state in states:
xs = np.linspace(-0.5, +0.5, 101) + state[1]
ys = xs*0+state[2]
ax.plot(xs, ys, **kwargs_plot)
label = f"${state[0][0]}_{{{state[0][1]}, {state[0][2]}}}$"
ax.text(**dict_annotation, x=state[1]-0.6, y=state[2]+0.05, s=label)
def get_coords(state):
for tmp in states:
if state == tmp[0]:
return(tmp)
def get_color(transition):
delta_J = transition[0][0] - transition[1][0]
delta_Ka = transition[0][1] - transition[1][1]
delta_Kc = transition[0][2] - transition[1][2]
if delta_J == 1 and delta_Ka == 0 and delta_Kc != 0:
return(config["acolor"])
elif delta_J == 0 and delta_Ka == 0 and delta_Kc != 0:
return(config["awcolor"])
else:
return(config["bcolor"])
def get_offset(start, end):
if start > end:
return -config["offset"]
elif end > start:
return +config["offset"]
else:
return 0
arrows = {}
for transition in transitions:
_, x1, y1 = get_coords(transition[0])
_, x2, y2 = get_coords(transition[1])
color = get_color(transition)
xoffset = get_offset(x2, x1)
yoffset = get_offset(y2, y1)
arrow = ax.arrow(x2+xoffset, y2+yoffset, x1-x2-2*xoffset, y1-y2-2*yoffset, color=color, **arrow_dict)
arrows[arrow] = transition
def onclick(event):
transition = arrows[event.artist]
fig, ax = plt.subplots()
data = np.genfromtxt(transition[2], delimiter=config["separator"])
xs = data[:, 0]
ys = data[:, 1]
ax.plot(xs, ys)
fig.show()
cid = fig.canvas.mpl_connect('pick_event', onclick)
fig.tight_layout()
plt.show()
if __name__ == "__main__":
main() | 0.354545 | 0.221277 |
from datetime import datetime, timedelta, timezone
import pytest
from assertpy import assert_that
from common.utils import time_is_up
from slurm_plugin.common import TIMESTAMP_FORMAT, get_clustermgtd_heartbeat
@pytest.mark.parametrize(
"initial_time, current_time, grace_time, expected_result",
[
(datetime(2020, 1, 1, 0, 0, 0), datetime(2020, 1, 1, 0, 0, 29), 30, False),
(datetime(2020, 1, 1, 0, 0, 0), datetime(2020, 1, 1, 0, 0, 30), 30, True),
(
datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
# local timezone is 1 hours ahead of UTC, so this time stamp is actually 30 mins before initial_time
datetime(2020, 1, 1, 0, 30, 0, tzinfo=timezone(timedelta(hours=1))),
30 * 60,
False,
),
(
datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
# local timezone is 1 hours ahead of UTC, so this time stamp is actually 30 mins after initial_time
datetime(2020, 1, 1, 1, 30, 0, tzinfo=timezone(timedelta(hours=1))),
30 * 60,
True,
),
(
datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
# local timezone is 1 hours behind of UTC, so this time stamp is actually 1.5 hrs after initial_time
datetime(2020, 1, 1, 0, 30, 0, tzinfo=timezone(-timedelta(hours=1))),
90 * 60,
True,
),
(
datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
# local timezone is 1 hours behind of UTC, so this time stamp is actually 1 hrs after initial_time
datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone(-timedelta(hours=1))),
90 * 60,
False,
),
],
)
def test_time_is_up(initial_time, current_time, grace_time, expected_result):
assert_that(time_is_up(initial_time, current_time, grace_time)).is_equal_to(expected_result)
@pytest.mark.parametrize(
"time, expected_parsed_time",
[
(
datetime(2020, 7, 30, 19, 34, 2, 613338, tzinfo=timezone.utc),
datetime(2020, 7, 30, 19, 34, 2, 613338, tzinfo=timezone.utc),
),
(
datetime(2020, 7, 30, 10, 1, 1, tzinfo=timezone(timedelta(hours=1))),
datetime(2020, 7, 30, 10, 1, 1, tzinfo=timezone(timedelta(hours=1))),
),
],
)
def test_get_clustermgtd_heartbeat(time, expected_parsed_time, mocker):
mocker.patch(
"slurm_plugin.common.check_command_output",
return_value=f"some_random_stdout\n{time.strftime(TIMESTAMP_FORMAT)}",
)
assert_that(get_clustermgtd_heartbeat("some file path")).is_equal_to(expected_parsed_time) | tests/slurm_plugin/test_common.py |
from datetime import datetime, timedelta, timezone
import pytest
from assertpy import assert_that
from common.utils import time_is_up
from slurm_plugin.common import TIMESTAMP_FORMAT, get_clustermgtd_heartbeat
@pytest.mark.parametrize(
"initial_time, current_time, grace_time, expected_result",
[
(datetime(2020, 1, 1, 0, 0, 0), datetime(2020, 1, 1, 0, 0, 29), 30, False),
(datetime(2020, 1, 1, 0, 0, 0), datetime(2020, 1, 1, 0, 0, 30), 30, True),
(
datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
# local timezone is 1 hours ahead of UTC, so this time stamp is actually 30 mins before initial_time
datetime(2020, 1, 1, 0, 30, 0, tzinfo=timezone(timedelta(hours=1))),
30 * 60,
False,
),
(
datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
# local timezone is 1 hours ahead of UTC, so this time stamp is actually 30 mins after initial_time
datetime(2020, 1, 1, 1, 30, 0, tzinfo=timezone(timedelta(hours=1))),
30 * 60,
True,
),
(
datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
# local timezone is 1 hours behind of UTC, so this time stamp is actually 1.5 hrs after initial_time
datetime(2020, 1, 1, 0, 30, 0, tzinfo=timezone(-timedelta(hours=1))),
90 * 60,
True,
),
(
datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
# local timezone is 1 hours behind of UTC, so this time stamp is actually 1 hrs after initial_time
datetime(2020, 1, 1, 0, 0, 0, tzinfo=timezone(-timedelta(hours=1))),
90 * 60,
False,
),
],
)
def test_time_is_up(initial_time, current_time, grace_time, expected_result):
assert_that(time_is_up(initial_time, current_time, grace_time)).is_equal_to(expected_result)
@pytest.mark.parametrize(
"time, expected_parsed_time",
[
(
datetime(2020, 7, 30, 19, 34, 2, 613338, tzinfo=timezone.utc),
datetime(2020, 7, 30, 19, 34, 2, 613338, tzinfo=timezone.utc),
),
(
datetime(2020, 7, 30, 10, 1, 1, tzinfo=timezone(timedelta(hours=1))),
datetime(2020, 7, 30, 10, 1, 1, tzinfo=timezone(timedelta(hours=1))),
),
],
)
def test_get_clustermgtd_heartbeat(time, expected_parsed_time, mocker):
mocker.patch(
"slurm_plugin.common.check_command_output",
return_value=f"some_random_stdout\n{time.strftime(TIMESTAMP_FORMAT)}",
)
assert_that(get_clustermgtd_heartbeat("some file path")).is_equal_to(expected_parsed_time) | 0.763307 | 0.482795 |
from os import stat
from tkinter import *
import tkinter as tk
import serial
import time
from serial.tools.list_ports import comports
import sys
from PIL import Image, ImageTk
import struct
import os
# GUI Parameters
WINDOW_SIZE = "1600x900"
TEXT_COLOR = "white"
BACKGROUND_COLOR = "gray10"
connected = False
validated = False
# Serial Port Parameters
BAUD = 115200
TIMEOUT = 0.2
uart = 0
# Parameters Buffer
paramBuffer = [None]*20
def loadParams(pos,data):
global paramBuffer
paramBuffer[pos] = int(data)
def serial_ports():
if sys.platform.startswith("win"):
ports = ["COM%s" % (i + 1) for i in range(256)]
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
uart = serial.Serial(str(serial_ports()[0]), baudrate=BAUD, timeout=TIMEOUT)
presets_file = pd.read_excel(r'D:\kamscan presets\Presets.xlsx',index_col= 'Presets')
presets_numbers = presets_file.shape[0]
preset_id = 1
# Root
root = Tk()
root.configure(bg=BACKGROUND_COLOR)
root.geometry(WINDOW_SIZE)
root.resizable(False, False)
root.title("CamScan Tool v2.0")
version_label = Label(
root,
text="CamScan Tool v2.0",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
version_label.place(x=1490, y=880)
mode = IntVar()
duplexMode = IntVar()
d = True
"""----------------------------------------------------------------------------------"""
# Step Mode Frame
"""----------------------------------------------------------------------------------"""
def loadPresetData(id):
yaw_step_angle.insert(0,presets_file.iloc[id]["Rotation Step Angle"])
yaw_rotation_angle.insert(0,presets_file.iloc[id]["Rotation Angle"])
roll_step_angle.insert(0,presets_file.iloc[id]["Tilt Step Angle"])
roll_rotation_angle.insert(0,presets_file.iloc[id]["Tilt Rotation Angle"])
delay_between_steps.insert(0,presets_file.iloc[id]["Delay Between Steps"])
home_yaw.insert(0,presets_file.iloc[id]["Home Rotation"])
home_roll.insert(0,presets_file.iloc[id]["Home Tilt"])
yaw_speed_set.insert(0,presets_file.iloc[id]["Rotation Speed"])
roll_speed_set.insert(0,presets_file.iloc[id]["Tilt Speed"])
yaw_rotation_time.insert(0,presets_file.iloc[id]["Rotation Time"])
yaw_rotation_angle_c.insert(0,presets_file.iloc[id]["Rotation Angle (cont)"])
def clearPresetData():
yaw_step_angle.delete(0,END)
yaw_rotation_angle.delete(0,END)
roll_step_angle.delete(0,END)
roll_rotation_angle.delete(0,END)
delay_between_steps.delete(0,END)
home_yaw.delete(0,END)
home_roll.delete(0,END)
yaw_speed_set.delete(0,END)
roll_speed_set.delete(0,END)
yaw_rotation_time.delete(0,END)
yaw_rotation_angle_c.delete(0,END)
def cycleStepPresets():
clearPresetData()
global presets_numbers,preset_id
if preset_id >= presets_numbers:
preset_id = 0
loadPresetData(preset_id)
current_preset.configure(text=str(preset_id + 1))
preset_id += 1
def stepModeSelected():
yaw_step_angle.configure(state=NORMAL)
yaw_rotation_angle.configure(state=NORMAL)
roll_step_angle.configure(state=NORMAL)
roll_rotation_angle.configure(state=NORMAL)
delay_between_steps.configure(state=NORMAL)
home_roll.configure(state=NORMAL)
home_yaw.configure(state=NORMAL)
step_mode_enable_button.configure(fg="green4")
continuous_mode_enable_button.configure(fg = "red")
yaw_rotation_time.configure(state=DISABLED)
yaw_rotation_angle_c.configure(state=DISABLED)
step_mode_frame.configure(bg = "gray20")
yaw_step_angel_label.configure(bg = "gray20")
yaw_rotation_angel_label.configure(bg = "gray20")
roll_step_angel_label.configure(bg = "gray20")
roll_rotation_angel_label.configure(bg = "gray20")
delay_between_steps_label.configure(bg = "gray20")
home_yaw_label.configure(bg = "gray20")
home_roll_label.configure(bg = "gray20")
continuous_mode_frame.configure(bg="gray10" )
yaw_rotation_angle_c_label.configure(bg = "gray10")
yaw_rotation_time_label.configure(bg = "gray10")
global paramBuffer
mode.set(1)
def duplexModeSelected():
global d
d = not d
duplexMode.set(d)
if d == True:
duplex_mode_enable_button.configure(fg = "green4")
if d == False:
duplex_mode_enable_button.configure(fg = "red")
step_mode_enable_button = Radiobutton(
root,
text="Step Mode",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
command=stepModeSelected,
value=1,
variable=mode
)
step_mode_enable_button.place(x=50, y=30)
duplex_mode_enable_button = Radiobutton(
root,
text="Rotate Step Then Tilt Step",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
command=duplexModeSelected,
value=1,
variable=duplexMode
)
duplex_mode_enable_button.place(x=200, y=30)
current_preset_label = Label(
root,
text = "Current Preset :",
font = 10,
bg = BACKGROUND_COLOR,
fg = TEXT_COLOR
)
current_preset_label.place(x=1250,y=30)
current_preset = Label(
root,
text = str(preset_id),
font = 10,
bg = BACKGROUND_COLOR,
fg = "green2"
)
current_preset.place(x=1365,y = 30)
step_mode_presets_cycle_button = Button(
root,
text="Presets",
font=2,
fg="white",
bg="bisque4",
width=10,
height=1,
borderwidth=5,
command = cycleStepPresets
)
step_mode_presets_cycle_button.place(x=1400, y=20)
step_mode_frame = Frame(
root,
padx=5,
pady=5,
borderwidth=5,
relief="groove",
bg=BACKGROUND_COLOR
)
step_mode_frame.place(x=50, y=70)
yaw_step_angel_label = Label(
step_mode_frame,
text="Rotation Step Angle (Max: 45°)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
yaw_step_angel_label.grid(row=0, column=0)
yaw_step_angle = Entry(step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
yaw_step_angle.grid(row=0, column=1, padx=5, pady=10)
yaw_rotation_angel_label = Label(
step_mode_frame,
text="Rotation Angle (Max: 360°)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
yaw_rotation_angel_label.grid(row=1, column=0)
yaw_rotation_angle = Entry(
step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
yaw_rotation_angle.grid(row=1, column=1, padx=5, pady=10)
roll_step_angel_label = Label(
step_mode_frame,
text="Tilt Step Angle (Max: 45°)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
roll_step_angel_label.grid(row=2, column=0)
roll_step_angle = Entry(step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
roll_step_angle.grid(row=2, column=1, padx=5, pady=10)
roll_rotation_angel_label = Label(
step_mode_frame,
text="Tilt Rotation Angle (Max: 90°)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
roll_rotation_angel_label.grid(row=3, column=0)
roll_rotation_angle = Entry(
step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
roll_rotation_angle.grid(row=3, column=1, padx=5, pady=10)
delay_between_steps_label = Label(
step_mode_frame,
text="Delay Between Steps (min:1s)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
delay_between_steps_label.grid(row=4, column=0)
delay_between_steps = Entry(
step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
delay_between_steps.grid(row=4, column=1, padx=5, pady=10)
home_roll_label = Label(
step_mode_frame,
text="Home Tilt (-90°~+90)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
home_roll_label.grid(row=5, column=0)
home_roll = Entry(step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
home_roll.grid(row=5, column=1, padx=5, pady=10)
home_yaw_label = Label(
step_mode_frame,
text="Home Rotation (0°~360°)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
home_yaw_label.grid(row=6, column=0)
home_yaw = Entry(step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
home_yaw.grid(row=6, column=1, padx=5, pady=10)
"""----------------------------------------------------------------------"""
# Continous Mode Frame
"""----------------------------------------------------------------------"""
def continuousModeSelected():
yaw_step_angle.configure(state=DISABLED)
yaw_rotation_angle.configure(state=DISABLED)
roll_step_angle.configure(state=DISABLED)
roll_rotation_angle.configure(state=DISABLED)
delay_between_steps.configure(state=DISABLED)
home_roll.configure(state=DISABLED)
home_yaw.configure(state=DISABLED)
step_mode_enable_button.configure(fg="red")
continuous_mode_enable_button.configure(fg = "green4")
yaw_rotation_time.configure(state=NORMAL)
yaw_rotation_angle_c.configure(state=NORMAL)
step_mode_frame.configure(bg = "gray10")
continuous_mode_frame.configure(bg="gray20" )
yaw_rotation_angle_c_label.configure(bg = "gray20")
yaw_rotation_time_label.configure(bg = "gray20")
yaw_step_angel_label.configure(bg = "gray10")
yaw_rotation_angel_label.configure(bg = "gray10")
roll_step_angel_label.configure(bg = "gray10")
roll_rotation_angel_label.configure(bg = "gray10")
delay_between_steps_label.configure(bg = "gray10")
home_yaw_label.configure(bg = "gray10")
home_roll_label.configure(bg = "gray10")
global paramBuffer
mode.set(0)
continuous_mode_enable_button = Radiobutton(
root,
text="Continuous Mode",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
command=continuousModeSelected,
value = 0,
variable=mode
)
continuous_mode_enable_button.place(x=50, y=410)
continuous_mode_frame = Frame(
root, padx=5, pady=5, borderwidth=5, relief="groove", bg=BACKGROUND_COLOR
)
continuous_mode_frame.place(x=50, y=450)
yaw_rotation_time_label = Label(
continuous_mode_frame,
text="Rotation Time (min:1s)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
yaw_rotation_time_label.grid(row=0, column=0)
yaw_rotation_time = Entry(
continuous_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
yaw_rotation_time.grid(row=0, column=1, padx=5, pady=10)
yaw_rotation_angle_c_label = Label(
continuous_mode_frame,
text="Rotation Angel (max:360°)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
yaw_rotation_angle_c_label.grid(row=1, column=0)
yaw_rotation_angle_c = Entry(
continuous_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
yaw_rotation_angle_c.grid(row=1, column=1, padx=5, pady=10)
"""----------------------------------------------------------------------------------"""
# General Settings Frame
"""----------------------------------------------------------------------------------"""
camera_placement = IntVar()
roll_direction = IntVar()
def frontSelected():
camera_position_select_front_button.configure(fg='green4')
camera_position_select_top_button.configure(fg = 'red')
def topSelected():
camera_position_select_front_button.configure(fg='red')
camera_position_select_top_button.configure(fg = 'green4')
def fwdSelected():
roll_cw_select_button.configure(fg='green4')
roll_ccw_select_button.configure(fg = 'red')
def bwdSelected():
roll_cw_select_button.configure(fg='red')
roll_ccw_select_button.configure(fg = 'green4')
general_settings_frame = Frame(
root, padx=5, pady=5, borderwidth=0, relief="groove", bg=BACKGROUND_COLOR
)
general_settings_frame.place(x=50, y=570)
camera_position_select_label = Label(
general_settings_frame,
text="Camera Position:",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
camera_position_select_label.grid(row=0, column=0)
camera_position_select_front_button = Radiobutton(
general_settings_frame,
text="Front",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
activebackground=BACKGROUND_COLOR,
activeforeground=TEXT_COLOR,
value=2,
variable = camera_placement,
command=frontSelected
)
camera_position_select_front_button.grid(row=0, column=1)
camera_position_select_top_button = Radiobutton(
general_settings_frame,
text="TOP",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
activebackground=BACKGROUND_COLOR,
activeforeground=TEXT_COLOR,
value=3,
variable=camera_placement,
command=topSelected
)
camera_position_select_top_button.grid(row=0, column=2)
roll_direction_select_label = Label(
general_settings_frame,
text="Tilt Direction:",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
roll_direction_select_label.grid(row=1, column=0)
roll_cw_select_button = Radiobutton(
general_settings_frame,
text="Forward",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
activebackground=BACKGROUND_COLOR,
activeforeground=TEXT_COLOR,
value=1,
variable = roll_direction,
command=fwdSelected
)
roll_cw_select_button.grid(row=1, column=1)
roll_ccw_select_button = Radiobutton(
general_settings_frame,
text="Backward",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
activebackground=BACKGROUND_COLOR,
activeforeground=TEXT_COLOR,
value=0,
variable = roll_direction,
command=bwdSelected
)
roll_ccw_select_button.grid(row=1, column=2)
yaw_speed_set_label = Label(
general_settings_frame,
text="Rotation Speed (Max: 100°/s)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
yaw_speed_set_label.grid(row=2, column=0)
yaw_speed_set = Entry(
general_settings_frame,
width=5,
font=3,
disabledbackground="gray50"
)
yaw_speed_set.grid(row=2, column=1, padx=5, pady=10)
roll_speed_set_label = Label(
general_settings_frame,
text="Tilt Speed (Max: 100°/s)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
roll_speed_set_label.grid(row=3, column=0)
roll_speed_set = Entry(
general_settings_frame,
width=5,
font=3,
disabledbackground="gray50"
)
roll_speed_set.grid(row=3, column=1, padx=5, pady=10)
"""----------------------------------------------------------------------"""
# Validation
"""----------------------------------------------------------------------"""
validation = {
"max_yaw_step_angle":45,"min_yaw_step_angle":1,
"max_yaw_rotation_angle":360,"min_yaw_rotation_angle":0,
"max_roll_step_angle":45,"min_roll_step_angle":1,
"max_roll_rotation_angle":90,"min_roll_rotation_angle":0,
"max_yaw_home":360,"min_yaw_home":0,
"max_cont_angle":360,"min_cont_angle":0,
"max_motor_speed":100,"min_motor_speed":10,
"max_roll_home":90,
"min_cont_time":1,
"min_delay_between_steps":1,
}
def validate():
if(
(int(yaw_step_angle.get()) <= validation["max_yaw_step_angle"] and int(yaw_step_angle.get()) >= validation["min_yaw_step_angle"]) and
(int(yaw_rotation_angle.get()) <= validation["max_yaw_rotation_angle"] and int(yaw_rotation_angle.get()) >= validation["min_yaw_rotation_angle"]) and
((int(yaw_rotation_angle.get()) % int(yaw_step_angle.get())) == 0) and
(int(roll_step_angle.get()) <= validation["max_roll_step_angle"] and int(roll_step_angle.get()) >= validation["min_roll_step_angle"]) and
(int(roll_rotation_angle.get()) <= validation["max_roll_rotation_angle"] and int(roll_rotation_angle.get()) >= validation["min_roll_rotation_angle"]) and
((int(roll_rotation_angle.get()) % int(roll_step_angle.get())) == 0) and
(int(home_yaw.get()) <= validation["max_yaw_home"] and int(home_yaw.get()) >= validation["min_yaw_home"]) and
(int(yaw_rotation_angle_c.get()) <= validation["max_cont_angle"] and int(yaw_rotation_angle_c.get()) >= validation["min_cont_angle"]) and
(int(yaw_speed_set.get()) <= validation["max_motor_speed"] and int(yaw_speed_set.get()) >= validation["min_motor_speed"]) and
(int(roll_speed_set.get()) <= validation["max_motor_speed"] and int(roll_speed_set.get()) >= validation["min_motor_speed"]) and
(int(home_roll.get()) <= validation["max_roll_home"]) and
(int(yaw_rotation_time.get()) >= validation["min_cont_time"]) and
(int(delay_between_steps.get()) >= validation["min_delay_between_steps"])
):
global validated,connected,paramBuffer
# loadParams(0,mode.get())
# loadParams(1,yaw_step_angle.get())
# loadParams(2,yaw_rotation_angle.get())
# loadParams(3,roll_step_angle.get())
# loadParams(4,roll_rotation_angle.get())
# loadParams(5,delay_between_steps.get())
# loadParams(6,home_roll.get())
# loadParams(7,home_yaw.get())
# loadParams(8,yaw_rotation_time.get())
# loadParams(9,yaw_rotation_angle_c.get())
# loadParams(10,camera_placement.get())
# loadParams(11,roll_direction.get())
# loadParams(12,yaw_speed_set.get())
# loadParams(13,roll_speed_set.get())
# loadParams(14,duplexMode.get())
loadParams(15,int(lr))
loadParams(16,int(ly))
loadParams(17,1)
loadParams(18,1)
loadParams(19,0)
validated = True
validate_button.configure(bg="dodger blue")
print("Validated")
print(paramBuffer)
else:
validated = False
validate_button.configure(bg="red")
print("Error")
if validated and connected:
upload_button.configure(state=NORMAL)
else:
upload_button.configure(state=DISABLED)
"""----------------------------------------------------------------------------------"""
# Buttons Frame
"""----------------------------------------------------------------------------------"""
lr = True
ly = True
def lockRoll():
global lr
lr = not lr
loadParams(15,int(lr))
loadParams(17,int(0))
loadParams(18,int(0))
upload(0)
def lockYaw():
global ly
ly = not ly
loadParams(16,int(ly))
loadParams(17,int(0))
loadParams(18,int(0))
upload(0)
def homeRoll():
loadParams(17,int(1))
loadParams(18,int(0))
upload(0)
def homeYaw():
loadParams(17,int(0))
loadParams(18,int(1))
upload(0)
buttons_frame = Frame(
root, padx=5, pady=5, borderwidth=0, relief="groove", bg=BACKGROUND_COLOR
)
buttons_frame.place(x=50, y=730)
validate_button = Button(
buttons_frame,
text="Validate & save",
font=5,
fg="white",
bg="red",
width=22,
borderwidth=5,
command=validate
)
validate_button.grid(row=0, column=0, columnspan=2)
lock_roll_motor_button = Button(
buttons_frame,
text="Lock Tilt",
font=5,
fg="white",
bg="gray50",
width=10,
borderwidth=4,
state=DISABLED,
command=lockRoll
)
lock_roll_motor_button.grid(row=1, column=0)
lock_yaw_motor_button = Button(
buttons_frame,
text="Lock Rotation",
font=5,
fg="white",
bg="gray50",
width=10,
borderwidth=4,
state=DISABLED,
command = lockYaw
)
lock_yaw_motor_button.grid(row=1, column=1)
home_roll_axis_button = Button(
buttons_frame,
text="Home Tilt",
font=5,
fg="white",
bg="gray50",
width=10,
borderwidth=4,
state=DISABLED,
command=homeRoll
)
home_roll_axis_button.grid(row=2, column=0)
home_yaw_axis_button = Button(
buttons_frame,
text="Home Rot",
font=5,
fg="white",
bg="gray50",
width=10,
borderwidth=4,
state=DISABLED,
command=homeYaw
)
home_yaw_axis_button.grid(row=2, column=1)
def upload(af):
uart = serial.Serial(str(serial_ports()[0]), baudrate=BAUD, timeout=TIMEOUT)
loadParams(19,af)
time.sleep(2)
buf = []
for d in paramBuffer:
buf.append(struct.pack(">H",d))
for b in buf:
uart.write(b)
#time.sleep(0.1)
root.after(1000, updateImage)
print(paramBuffer)
def updateImage():
os.chdir("C:\\Users\\<NAME>\\Pictures\\digiCamControl\\Session1\\")
try:
image = Image.open(os.listdir()[-1])
resize_image = image.resize((1100, 619))
img = ImageTk.PhotoImage(resize_image)
image_frame = Label(image=img, borderwidth=2, relief="groove")
image_frame.image = img
image_frame.place(x=470, y=100)
except:
print("folder is empty")
root.after(1000, updateImage)
upload_button = Button(
buttons_frame,
text="Upload & Start",
font=5,
fg="white",
bg="gray50",
width=22,
borderwidth=5,
state=DISABLED,
command = lambda:upload(255)
)
upload_button.grid(row=3, column=0, columnspan=2)
def connect():
try:
uart = serial.Serial(str(serial_ports()[0]), baudrate=BAUD, timeout=TIMEOUT)
global connected,validated
connected = True
connect_button.configure(bg='green3',text="Connected")
lock_roll_motor_button.configure(bg="dodger blue",state=NORMAL)
lock_yaw_motor_button.configure(bg="dodger blue",state=NORMAL)
home_yaw_axis_button.configure(bg="dodger blue",state=NORMAL)
home_roll_axis_button.configure(bg="dodger blue",state=NORMAL)
if validated and connected:
upload_button.configure(bg="dodger blue",state=NORMAL)
except:
connected = False
connect_button.configure(bg='red',text="Connect")
lock_roll_motor_button.configure(bg="gray50",state=DISABLED)
lock_yaw_motor_button.configure(bg="gray50",state=DISABLED)
home_yaw_axis_button.configure(bg="gray50",state=DISABLED)
home_roll_axis_button.configure(bg="gray50",state=DISABLED)
if not connected or not validated:
upload_button.configure(bg="gray50",state=DISABLED)
print("Error Connecting to Device")
connect_button = Button(
root,
text="Connect",
font=5,
fg="white",
bg="red",
width=20,
borderwidth=5,
command=connect
)
connect_button.place(x = 1250 ,y = 850)
loadPresetData(0)
stepModeSelected()
duplexModeSelected()
topSelected()
bwdSelected()
root.mainloop() | GUI/gui.py | from os import stat
from tkinter import *
import tkinter as tk
import serial
import time
from serial.tools.list_ports import comports
import sys
from PIL import Image, ImageTk
import struct
import os
# GUI Parameters
WINDOW_SIZE = "1600x900"
TEXT_COLOR = "white"
BACKGROUND_COLOR = "gray10"
connected = False
validated = False
# Serial Port Parameters
BAUD = 115200
TIMEOUT = 0.2
uart = 0
# Parameters Buffer
paramBuffer = [None]*20
def loadParams(pos,data):
global paramBuffer
paramBuffer[pos] = int(data)
def serial_ports():
if sys.platform.startswith("win"):
ports = ["COM%s" % (i + 1) for i in range(256)]
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
uart = serial.Serial(str(serial_ports()[0]), baudrate=BAUD, timeout=TIMEOUT)
presets_file = pd.read_excel(r'D:\kamscan presets\Presets.xlsx',index_col= 'Presets')
presets_numbers = presets_file.shape[0]
preset_id = 1
# Root
root = Tk()
root.configure(bg=BACKGROUND_COLOR)
root.geometry(WINDOW_SIZE)
root.resizable(False, False)
root.title("CamScan Tool v2.0")
version_label = Label(
root,
text="CamScan Tool v2.0",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
version_label.place(x=1490, y=880)
mode = IntVar()
duplexMode = IntVar()
d = True
"""----------------------------------------------------------------------------------"""
# Step Mode Frame
"""----------------------------------------------------------------------------------"""
def loadPresetData(id):
yaw_step_angle.insert(0,presets_file.iloc[id]["Rotation Step Angle"])
yaw_rotation_angle.insert(0,presets_file.iloc[id]["Rotation Angle"])
roll_step_angle.insert(0,presets_file.iloc[id]["Tilt Step Angle"])
roll_rotation_angle.insert(0,presets_file.iloc[id]["Tilt Rotation Angle"])
delay_between_steps.insert(0,presets_file.iloc[id]["Delay Between Steps"])
home_yaw.insert(0,presets_file.iloc[id]["Home Rotation"])
home_roll.insert(0,presets_file.iloc[id]["Home Tilt"])
yaw_speed_set.insert(0,presets_file.iloc[id]["Rotation Speed"])
roll_speed_set.insert(0,presets_file.iloc[id]["Tilt Speed"])
yaw_rotation_time.insert(0,presets_file.iloc[id]["Rotation Time"])
yaw_rotation_angle_c.insert(0,presets_file.iloc[id]["Rotation Angle (cont)"])
def clearPresetData():
yaw_step_angle.delete(0,END)
yaw_rotation_angle.delete(0,END)
roll_step_angle.delete(0,END)
roll_rotation_angle.delete(0,END)
delay_between_steps.delete(0,END)
home_yaw.delete(0,END)
home_roll.delete(0,END)
yaw_speed_set.delete(0,END)
roll_speed_set.delete(0,END)
yaw_rotation_time.delete(0,END)
yaw_rotation_angle_c.delete(0,END)
def cycleStepPresets():
clearPresetData()
global presets_numbers,preset_id
if preset_id >= presets_numbers:
preset_id = 0
loadPresetData(preset_id)
current_preset.configure(text=str(preset_id + 1))
preset_id += 1
def stepModeSelected():
yaw_step_angle.configure(state=NORMAL)
yaw_rotation_angle.configure(state=NORMAL)
roll_step_angle.configure(state=NORMAL)
roll_rotation_angle.configure(state=NORMAL)
delay_between_steps.configure(state=NORMAL)
home_roll.configure(state=NORMAL)
home_yaw.configure(state=NORMAL)
step_mode_enable_button.configure(fg="green4")
continuous_mode_enable_button.configure(fg = "red")
yaw_rotation_time.configure(state=DISABLED)
yaw_rotation_angle_c.configure(state=DISABLED)
step_mode_frame.configure(bg = "gray20")
yaw_step_angel_label.configure(bg = "gray20")
yaw_rotation_angel_label.configure(bg = "gray20")
roll_step_angel_label.configure(bg = "gray20")
roll_rotation_angel_label.configure(bg = "gray20")
delay_between_steps_label.configure(bg = "gray20")
home_yaw_label.configure(bg = "gray20")
home_roll_label.configure(bg = "gray20")
continuous_mode_frame.configure(bg="gray10" )
yaw_rotation_angle_c_label.configure(bg = "gray10")
yaw_rotation_time_label.configure(bg = "gray10")
global paramBuffer
mode.set(1)
def duplexModeSelected():
global d
d = not d
duplexMode.set(d)
if d == True:
duplex_mode_enable_button.configure(fg = "green4")
if d == False:
duplex_mode_enable_button.configure(fg = "red")
step_mode_enable_button = Radiobutton(
root,
text="Step Mode",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
command=stepModeSelected,
value=1,
variable=mode
)
step_mode_enable_button.place(x=50, y=30)
duplex_mode_enable_button = Radiobutton(
root,
text="Rotate Step Then Tilt Step",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
command=duplexModeSelected,
value=1,
variable=duplexMode
)
duplex_mode_enable_button.place(x=200, y=30)
current_preset_label = Label(
root,
text = "Current Preset :",
font = 10,
bg = BACKGROUND_COLOR,
fg = TEXT_COLOR
)
current_preset_label.place(x=1250,y=30)
current_preset = Label(
root,
text = str(preset_id),
font = 10,
bg = BACKGROUND_COLOR,
fg = "green2"
)
current_preset.place(x=1365,y = 30)
step_mode_presets_cycle_button = Button(
root,
text="Presets",
font=2,
fg="white",
bg="bisque4",
width=10,
height=1,
borderwidth=5,
command = cycleStepPresets
)
step_mode_presets_cycle_button.place(x=1400, y=20)
step_mode_frame = Frame(
root,
padx=5,
pady=5,
borderwidth=5,
relief="groove",
bg=BACKGROUND_COLOR
)
step_mode_frame.place(x=50, y=70)
yaw_step_angel_label = Label(
step_mode_frame,
text="Rotation Step Angle (Max: 45°)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
yaw_step_angel_label.grid(row=0, column=0)
yaw_step_angle = Entry(step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
yaw_step_angle.grid(row=0, column=1, padx=5, pady=10)
yaw_rotation_angel_label = Label(
step_mode_frame,
text="Rotation Angle (Max: 360°)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
yaw_rotation_angel_label.grid(row=1, column=0)
yaw_rotation_angle = Entry(
step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
yaw_rotation_angle.grid(row=1, column=1, padx=5, pady=10)
roll_step_angel_label = Label(
step_mode_frame,
text="Tilt Step Angle (Max: 45°)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
roll_step_angel_label.grid(row=2, column=0)
roll_step_angle = Entry(step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
roll_step_angle.grid(row=2, column=1, padx=5, pady=10)
roll_rotation_angel_label = Label(
step_mode_frame,
text="Tilt Rotation Angle (Max: 90°)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
roll_rotation_angel_label.grid(row=3, column=0)
roll_rotation_angle = Entry(
step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
roll_rotation_angle.grid(row=3, column=1, padx=5, pady=10)
delay_between_steps_label = Label(
step_mode_frame,
text="Delay Between Steps (min:1s)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
delay_between_steps_label.grid(row=4, column=0)
delay_between_steps = Entry(
step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
delay_between_steps.grid(row=4, column=1, padx=5, pady=10)
home_roll_label = Label(
step_mode_frame,
text="Home Tilt (-90°~+90)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
home_roll_label.grid(row=5, column=0)
home_roll = Entry(step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
home_roll.grid(row=5, column=1, padx=5, pady=10)
home_yaw_label = Label(
step_mode_frame,
text="Home Rotation (0°~360°)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
home_yaw_label.grid(row=6, column=0)
home_yaw = Entry(step_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
home_yaw.grid(row=6, column=1, padx=5, pady=10)
"""----------------------------------------------------------------------"""
# Continous Mode Frame
"""----------------------------------------------------------------------"""
def continuousModeSelected():
yaw_step_angle.configure(state=DISABLED)
yaw_rotation_angle.configure(state=DISABLED)
roll_step_angle.configure(state=DISABLED)
roll_rotation_angle.configure(state=DISABLED)
delay_between_steps.configure(state=DISABLED)
home_roll.configure(state=DISABLED)
home_yaw.configure(state=DISABLED)
step_mode_enable_button.configure(fg="red")
continuous_mode_enable_button.configure(fg = "green4")
yaw_rotation_time.configure(state=NORMAL)
yaw_rotation_angle_c.configure(state=NORMAL)
step_mode_frame.configure(bg = "gray10")
continuous_mode_frame.configure(bg="gray20" )
yaw_rotation_angle_c_label.configure(bg = "gray20")
yaw_rotation_time_label.configure(bg = "gray20")
yaw_step_angel_label.configure(bg = "gray10")
yaw_rotation_angel_label.configure(bg = "gray10")
roll_step_angel_label.configure(bg = "gray10")
roll_rotation_angel_label.configure(bg = "gray10")
delay_between_steps_label.configure(bg = "gray10")
home_yaw_label.configure(bg = "gray10")
home_roll_label.configure(bg = "gray10")
global paramBuffer
mode.set(0)
continuous_mode_enable_button = Radiobutton(
root,
text="Continuous Mode",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
command=continuousModeSelected,
value = 0,
variable=mode
)
continuous_mode_enable_button.place(x=50, y=410)
continuous_mode_frame = Frame(
root, padx=5, pady=5, borderwidth=5, relief="groove", bg=BACKGROUND_COLOR
)
continuous_mode_frame.place(x=50, y=450)
yaw_rotation_time_label = Label(
continuous_mode_frame,
text="Rotation Time (min:1s)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
yaw_rotation_time_label.grid(row=0, column=0)
yaw_rotation_time = Entry(
continuous_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
yaw_rotation_time.grid(row=0, column=1, padx=5, pady=10)
yaw_rotation_angle_c_label = Label(
continuous_mode_frame,
text="Rotation Angel (max:360°)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
yaw_rotation_angle_c_label.grid(row=1, column=0)
yaw_rotation_angle_c = Entry(
continuous_mode_frame,
width=5,
font=3,
disabledbackground="gray80"
)
yaw_rotation_angle_c.grid(row=1, column=1, padx=5, pady=10)
"""----------------------------------------------------------------------------------"""
# General Settings Frame
"""----------------------------------------------------------------------------------"""
camera_placement = IntVar()
roll_direction = IntVar()
def frontSelected():
camera_position_select_front_button.configure(fg='green4')
camera_position_select_top_button.configure(fg = 'red')
def topSelected():
camera_position_select_front_button.configure(fg='red')
camera_position_select_top_button.configure(fg = 'green4')
def fwdSelected():
roll_cw_select_button.configure(fg='green4')
roll_ccw_select_button.configure(fg = 'red')
def bwdSelected():
roll_cw_select_button.configure(fg='red')
roll_ccw_select_button.configure(fg = 'green4')
general_settings_frame = Frame(
root, padx=5, pady=5, borderwidth=0, relief="groove", bg=BACKGROUND_COLOR
)
general_settings_frame.place(x=50, y=570)
camera_position_select_label = Label(
general_settings_frame,
text="Camera Position:",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
camera_position_select_label.grid(row=0, column=0)
camera_position_select_front_button = Radiobutton(
general_settings_frame,
text="Front",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
activebackground=BACKGROUND_COLOR,
activeforeground=TEXT_COLOR,
value=2,
variable = camera_placement,
command=frontSelected
)
camera_position_select_front_button.grid(row=0, column=1)
camera_position_select_top_button = Radiobutton(
general_settings_frame,
text="TOP",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
activebackground=BACKGROUND_COLOR,
activeforeground=TEXT_COLOR,
value=3,
variable=camera_placement,
command=topSelected
)
camera_position_select_top_button.grid(row=0, column=2)
roll_direction_select_label = Label(
general_settings_frame,
text="Tilt Direction:",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
roll_direction_select_label.grid(row=1, column=0)
roll_cw_select_button = Radiobutton(
general_settings_frame,
text="Forward",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
activebackground=BACKGROUND_COLOR,
activeforeground=TEXT_COLOR,
value=1,
variable = roll_direction,
command=fwdSelected
)
roll_cw_select_button.grid(row=1, column=1)
roll_ccw_select_button = Radiobutton(
general_settings_frame,
text="Backward",
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR,
font=10,
activebackground=BACKGROUND_COLOR,
activeforeground=TEXT_COLOR,
value=0,
variable = roll_direction,
command=bwdSelected
)
roll_ccw_select_button.grid(row=1, column=2)
yaw_speed_set_label = Label(
general_settings_frame,
text="Rotation Speed (Max: 100°/s)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
yaw_speed_set_label.grid(row=2, column=0)
yaw_speed_set = Entry(
general_settings_frame,
width=5,
font=3,
disabledbackground="gray50"
)
yaw_speed_set.grid(row=2, column=1, padx=5, pady=10)
roll_speed_set_label = Label(
general_settings_frame,
text="Tilt Speed (Max: 100°/s)",
font=3,
bg=BACKGROUND_COLOR,
fg=TEXT_COLOR
)
roll_speed_set_label.grid(row=3, column=0)
roll_speed_set = Entry(
general_settings_frame,
width=5,
font=3,
disabledbackground="gray50"
)
roll_speed_set.grid(row=3, column=1, padx=5, pady=10)
"""----------------------------------------------------------------------"""
# Validation
"""----------------------------------------------------------------------"""
validation = {
"max_yaw_step_angle":45,"min_yaw_step_angle":1,
"max_yaw_rotation_angle":360,"min_yaw_rotation_angle":0,
"max_roll_step_angle":45,"min_roll_step_angle":1,
"max_roll_rotation_angle":90,"min_roll_rotation_angle":0,
"max_yaw_home":360,"min_yaw_home":0,
"max_cont_angle":360,"min_cont_angle":0,
"max_motor_speed":100,"min_motor_speed":10,
"max_roll_home":90,
"min_cont_time":1,
"min_delay_between_steps":1,
}
def validate():
if(
(int(yaw_step_angle.get()) <= validation["max_yaw_step_angle"] and int(yaw_step_angle.get()) >= validation["min_yaw_step_angle"]) and
(int(yaw_rotation_angle.get()) <= validation["max_yaw_rotation_angle"] and int(yaw_rotation_angle.get()) >= validation["min_yaw_rotation_angle"]) and
((int(yaw_rotation_angle.get()) % int(yaw_step_angle.get())) == 0) and
(int(roll_step_angle.get()) <= validation["max_roll_step_angle"] and int(roll_step_angle.get()) >= validation["min_roll_step_angle"]) and
(int(roll_rotation_angle.get()) <= validation["max_roll_rotation_angle"] and int(roll_rotation_angle.get()) >= validation["min_roll_rotation_angle"]) and
((int(roll_rotation_angle.get()) % int(roll_step_angle.get())) == 0) and
(int(home_yaw.get()) <= validation["max_yaw_home"] and int(home_yaw.get()) >= validation["min_yaw_home"]) and
(int(yaw_rotation_angle_c.get()) <= validation["max_cont_angle"] and int(yaw_rotation_angle_c.get()) >= validation["min_cont_angle"]) and
(int(yaw_speed_set.get()) <= validation["max_motor_speed"] and int(yaw_speed_set.get()) >= validation["min_motor_speed"]) and
(int(roll_speed_set.get()) <= validation["max_motor_speed"] and int(roll_speed_set.get()) >= validation["min_motor_speed"]) and
(int(home_roll.get()) <= validation["max_roll_home"]) and
(int(yaw_rotation_time.get()) >= validation["min_cont_time"]) and
(int(delay_between_steps.get()) >= validation["min_delay_between_steps"])
):
global validated,connected,paramBuffer
# loadParams(0,mode.get())
# loadParams(1,yaw_step_angle.get())
# loadParams(2,yaw_rotation_angle.get())
# loadParams(3,roll_step_angle.get())
# loadParams(4,roll_rotation_angle.get())
# loadParams(5,delay_between_steps.get())
# loadParams(6,home_roll.get())
# loadParams(7,home_yaw.get())
# loadParams(8,yaw_rotation_time.get())
# loadParams(9,yaw_rotation_angle_c.get())
# loadParams(10,camera_placement.get())
# loadParams(11,roll_direction.get())
# loadParams(12,yaw_speed_set.get())
# loadParams(13,roll_speed_set.get())
# loadParams(14,duplexMode.get())
loadParams(15,int(lr))
loadParams(16,int(ly))
loadParams(17,1)
loadParams(18,1)
loadParams(19,0)
validated = True
validate_button.configure(bg="dodger blue")
print("Validated")
print(paramBuffer)
else:
validated = False
validate_button.configure(bg="red")
print("Error")
if validated and connected:
upload_button.configure(state=NORMAL)
else:
upload_button.configure(state=DISABLED)
"""----------------------------------------------------------------------------------"""
# Buttons Frame
"""----------------------------------------------------------------------------------"""
lr = True
ly = True
def lockRoll():
global lr
lr = not lr
loadParams(15,int(lr))
loadParams(17,int(0))
loadParams(18,int(0))
upload(0)
def lockYaw():
global ly
ly = not ly
loadParams(16,int(ly))
loadParams(17,int(0))
loadParams(18,int(0))
upload(0)
def homeRoll():
loadParams(17,int(1))
loadParams(18,int(0))
upload(0)
def homeYaw():
loadParams(17,int(0))
loadParams(18,int(1))
upload(0)
buttons_frame = Frame(
root, padx=5, pady=5, borderwidth=0, relief="groove", bg=BACKGROUND_COLOR
)
buttons_frame.place(x=50, y=730)
validate_button = Button(
buttons_frame,
text="Validate & save",
font=5,
fg="white",
bg="red",
width=22,
borderwidth=5,
command=validate
)
validate_button.grid(row=0, column=0, columnspan=2)
lock_roll_motor_button = Button(
buttons_frame,
text="Lock Tilt",
font=5,
fg="white",
bg="gray50",
width=10,
borderwidth=4,
state=DISABLED,
command=lockRoll
)
lock_roll_motor_button.grid(row=1, column=0)
lock_yaw_motor_button = Button(
buttons_frame,
text="Lock Rotation",
font=5,
fg="white",
bg="gray50",
width=10,
borderwidth=4,
state=DISABLED,
command = lockYaw
)
lock_yaw_motor_button.grid(row=1, column=1)
home_roll_axis_button = Button(
buttons_frame,
text="Home Tilt",
font=5,
fg="white",
bg="gray50",
width=10,
borderwidth=4,
state=DISABLED,
command=homeRoll
)
home_roll_axis_button.grid(row=2, column=0)
home_yaw_axis_button = Button(
buttons_frame,
text="Home Rot",
font=5,
fg="white",
bg="gray50",
width=10,
borderwidth=4,
state=DISABLED,
command=homeYaw
)
home_yaw_axis_button.grid(row=2, column=1)
def upload(af):
uart = serial.Serial(str(serial_ports()[0]), baudrate=BAUD, timeout=TIMEOUT)
loadParams(19,af)
time.sleep(2)
buf = []
for d in paramBuffer:
buf.append(struct.pack(">H",d))
for b in buf:
uart.write(b)
#time.sleep(0.1)
root.after(1000, updateImage)
print(paramBuffer)
def updateImage():
os.chdir("C:\\Users\\<NAME>\\Pictures\\digiCamControl\\Session1\\")
try:
image = Image.open(os.listdir()[-1])
resize_image = image.resize((1100, 619))
img = ImageTk.PhotoImage(resize_image)
image_frame = Label(image=img, borderwidth=2, relief="groove")
image_frame.image = img
image_frame.place(x=470, y=100)
except:
print("folder is empty")
root.after(1000, updateImage)
upload_button = Button(
buttons_frame,
text="Upload & Start",
font=5,
fg="white",
bg="gray50",
width=22,
borderwidth=5,
state=DISABLED,
command = lambda:upload(255)
)
upload_button.grid(row=3, column=0, columnspan=2)
def connect():
try:
uart = serial.Serial(str(serial_ports()[0]), baudrate=BAUD, timeout=TIMEOUT)
global connected,validated
connected = True
connect_button.configure(bg='green3',text="Connected")
lock_roll_motor_button.configure(bg="dodger blue",state=NORMAL)
lock_yaw_motor_button.configure(bg="dodger blue",state=NORMAL)
home_yaw_axis_button.configure(bg="dodger blue",state=NORMAL)
home_roll_axis_button.configure(bg="dodger blue",state=NORMAL)
if validated and connected:
upload_button.configure(bg="dodger blue",state=NORMAL)
except:
connected = False
connect_button.configure(bg='red',text="Connect")
lock_roll_motor_button.configure(bg="gray50",state=DISABLED)
lock_yaw_motor_button.configure(bg="gray50",state=DISABLED)
home_yaw_axis_button.configure(bg="gray50",state=DISABLED)
home_roll_axis_button.configure(bg="gray50",state=DISABLED)
if not connected or not validated:
upload_button.configure(bg="gray50",state=DISABLED)
print("Error Connecting to Device")
connect_button = Button(
root,
text="Connect",
font=5,
fg="white",
bg="red",
width=20,
borderwidth=5,
command=connect
)
connect_button.place(x = 1250 ,y = 850)
loadPresetData(0)
stepModeSelected()
duplexModeSelected()
topSelected()
bwdSelected()
root.mainloop() | 0.246896 | 0.143908 |
import xml.etree.ElementTree as ET
import time
import select
from io import StringIO
from threading import Thread, Event, Lock
from os import read
from .coqapi import Ok, Err
from .xmltype import *
class CoqHandler:
def __init__(self, state_manager, printer):
self.printer = printer
self.state_manager = state_manager
self.currentContent = ""
self.oldProcess = None
self.currentProcess = None
self.messageLevel = None
self.val = None
self.state_id = None
self.nextFlush = True
self.goals = None
self.goals_fg = []
self.goals_bg = 0
self.goals_shelved = 0
self.goals_given_up = 0
self.goal_id = None
self.goal_hyps = []
self.goal_ccl = None
# Call when an element starts
def start(self, tag, attributes):
if tag == 'value':
self.currentProcess = 'value'
self.val = attributes['val']
self.loc_s = None if not 'loc_s' in attributes else attributes['loc_s']
self.loc_e = None if not 'loc_e' in attributes else attributes['loc_e']
if tag == 'option' and attributes['val'] == 'none' and self.currentProcess == 'value':
self.printer.addGoal(None)
elif tag == 'goals' and self.currentProcess == 'value':
self.currentProcess = 'goals_fg'
elif tag == 'list' and self.currentProcess == 'goals_fg':
self.currentProcess = 'fg'
elif tag == 'goal' and self.currentProcess == 'fg':
self.currentProcess = 'goal'
elif tag == 'pair' and self.currentProcess == 'goals_bg':
self.currentProcess = 'goals_bg_in'
elif tag == 'goal' and self.currentProcess == 'goals_bg_in':
self.goals_bg += 1
# TODO
elif tag == 'goal' and self.currentProcess == 'goals_shelved':
self.goals_shelved += 1
# TODO
elif tag == 'goal' and self.currentProcess == 'goals_given_up':
self.goals_given_up += 1
elif tag == 'string' and self.currentProcess == 'goal':
self.currentProcess = 'goal_id'
elif tag == 'list' and self.currentProcess == 'goal':
self.currentProcess = 'goal_hyps'
elif tag == 'state_id' and self.currentProcess == 'value':
self.state_id = attributes['val']
elif tag == 'feedback_content' and attributes['val'] == 'message':
self.currentProcess = 'waitmessage'
elif tag == 'feedback_content' and attributes['val'] == 'processingin':
self.currentProcess = 'waitworker'
elif self.currentProcess == 'message' and tag == 'message_level':
self.messageLevel = attributes['val']
elif tag == 'message':
# older coq (8.6) use a message tag at top-level, newer ones use a
# message tag inside a feedback_content one.
# Since there might be more than one message, we want to track when
# we came from a 'waitmessage' (newer coq).
self.oldProcess = self.currentProcess
self.currentProcess = 'message'
# Call when an element ends
def end(self, tag):
if tag == "value":
if self.nextFlush:
self.printer.flushInfo()
self.nextFlush = True
if self.val == 'good':
self.state_manager.pull_event(Ok(self.state_id))
else:
self.state_manager.pull_event(
Err(None, False if not hasattr(self, "loc_s") or self.loc_s is None else int(self.loc_s),
False if not hasattr(self, "loc_e") or self.loc_e is None else int(self.loc_e)))
self.printer.addInfo(self.currentContent)
self.currentContent = ''
self.nextFlush = False
self.state_id = None
self.val = None
self.currentProcess = None
elif tag == 'goals':
self.printer.debug("Goals: " + str(self.goals_fg) + "\n;; " + str(self.goals_bg) + "\n;; " + str(self.goals_shelved) + "\n;; " + str(self.goals_given_up) + "\n")
self.printer.addGoal(Goals(self.goals_fg, self.goals_bg, self.goals_shelved, self.goals_given_up))
self.goals_fg = []
self.goals_bg = 0
self.goals_shelved = 0
self.goals_given_up = 0
self.currentProcess = 'value'
elif tag == 'string' and self.currentProcess == 'goal_id':
self.goal_id = self.currentContent
self.currentProcess = 'goal'
self.currentContent = ''
elif tag == 'goal' and self.currentProcess == 'goal':
self.goals_fg.append(Goal(self.goal_id, self.goal_hyps, self.currentContent))
self.goal_hyps = []
self.currentContent = ''
self.currentProcess = 'fg'
elif tag == 'richpp' and self.currentProcess == 'goal_hyps':
self.goal_hyps.append(self.currentContent)
self.currentContent = ''
elif tag == 'list' and self.currentProcess == 'goal_hyps':
self.currentContent = ''
self.currentProcess = 'goal'
elif tag == 'list' and self.currentProcess == 'fg':
self.currentContent = ''
self.currentProcess = 'goals_bg'
elif tag == 'pair' and self.currentProcess == 'goals_bg_in':
self.currentContent = ''
self.currentProcess = 'goals_bg'
elif tag == 'feedback_content' and self.currentProcess == 'waitmessage':
self.currentProcess = None
self.oldProcess = None
self.messageLevel = None
self.currentContent = ''
elif tag == 'feedback_content' and self.currentProcess == 'waitworker':
self.state_manager.setWorker(self.currentContent)
self.currentContent = ''
elif tag == 'message' and self.currentProcess == 'message':
self.currentProcess = 'waitmessage'
self.printer.debug(self.messageLevel + ": " + str(self.currentContent) + "\n\n")
self.printer.addInfo(self.currentContent)
self.currentProcess = self.oldProcess
self.messageLevel = None
self.currentContent = ''
# Call when a character is read
def data(self, content):
if self.currentProcess == 'message' or self.currentProcess == 'value' or \
self.currentProcess == 'goal_id' or self.currentProcess == 'goal' or \
self.currentProcess == 'waitworker' or self.currentProcess == 'goal_hyps':
self.currentContent += content
class CoqParser(Thread):
def __init__(self, process, state_manager, printer):
Thread.__init__(self)
self.cont = True
self.process = process
self.printer = printer
self.target = CoqHandler(state_manager, printer)
self.parser = ET.XMLParser(target=self.target)
self.parser.feed("""
<!DOCTYPE coq [
<!-- we replace non-breakable spaces with normal spaces, because it would
make copy-pasting harder -->
<!ENTITY nbsp \" \">
<!ENTITY gt \">\">
<!ENTITY lt \"<\">
<!ENTITY apos \"'\">
]>
<Root>
""")
def run(self):
self.printer.debug("Running parser...\n")
try:
f = self.process.stdout
while self.cont:
r, w, e = select.select([ f ], [], [], 0.1)
if f in r:
content = read(f.fileno(), 0x400)
self.printer.debug("<< " + str(content) + "\n")
self.parser.feed(content)
except Exception as e:
self.printer.debug("WHOOPS!\n")
self.printer.debug("WHOOPS! " + str(e) + "\n")
self.printer.debug("WHOOPS! " + str(traceback.format_exc()) + "\n")
try:
self.parser.feed("</Root>")
except:
pass
self.printer.debug("END OF PARSING\n")
def stop(self):
self.cont = False | rplugin/python3/pycoqtop/coqxml.py | import xml.etree.ElementTree as ET
import time
import select
from io import StringIO
from threading import Thread, Event, Lock
from os import read
from .coqapi import Ok, Err
from .xmltype import *
class CoqHandler:
def __init__(self, state_manager, printer):
self.printer = printer
self.state_manager = state_manager
self.currentContent = ""
self.oldProcess = None
self.currentProcess = None
self.messageLevel = None
self.val = None
self.state_id = None
self.nextFlush = True
self.goals = None
self.goals_fg = []
self.goals_bg = 0
self.goals_shelved = 0
self.goals_given_up = 0
self.goal_id = None
self.goal_hyps = []
self.goal_ccl = None
# Call when an element starts
def start(self, tag, attributes):
if tag == 'value':
self.currentProcess = 'value'
self.val = attributes['val']
self.loc_s = None if not 'loc_s' in attributes else attributes['loc_s']
self.loc_e = None if not 'loc_e' in attributes else attributes['loc_e']
if tag == 'option' and attributes['val'] == 'none' and self.currentProcess == 'value':
self.printer.addGoal(None)
elif tag == 'goals' and self.currentProcess == 'value':
self.currentProcess = 'goals_fg'
elif tag == 'list' and self.currentProcess == 'goals_fg':
self.currentProcess = 'fg'
elif tag == 'goal' and self.currentProcess == 'fg':
self.currentProcess = 'goal'
elif tag == 'pair' and self.currentProcess == 'goals_bg':
self.currentProcess = 'goals_bg_in'
elif tag == 'goal' and self.currentProcess == 'goals_bg_in':
self.goals_bg += 1
# TODO
elif tag == 'goal' and self.currentProcess == 'goals_shelved':
self.goals_shelved += 1
# TODO
elif tag == 'goal' and self.currentProcess == 'goals_given_up':
self.goals_given_up += 1
elif tag == 'string' and self.currentProcess == 'goal':
self.currentProcess = 'goal_id'
elif tag == 'list' and self.currentProcess == 'goal':
self.currentProcess = 'goal_hyps'
elif tag == 'state_id' and self.currentProcess == 'value':
self.state_id = attributes['val']
elif tag == 'feedback_content' and attributes['val'] == 'message':
self.currentProcess = 'waitmessage'
elif tag == 'feedback_content' and attributes['val'] == 'processingin':
self.currentProcess = 'waitworker'
elif self.currentProcess == 'message' and tag == 'message_level':
self.messageLevel = attributes['val']
elif tag == 'message':
# older coq (8.6) use a message tag at top-level, newer ones use a
# message tag inside a feedback_content one.
# Since there might be more than one message, we want to track when
# we came from a 'waitmessage' (newer coq).
self.oldProcess = self.currentProcess
self.currentProcess = 'message'
# Call when an element ends
def end(self, tag):
if tag == "value":
if self.nextFlush:
self.printer.flushInfo()
self.nextFlush = True
if self.val == 'good':
self.state_manager.pull_event(Ok(self.state_id))
else:
self.state_manager.pull_event(
Err(None, False if not hasattr(self, "loc_s") or self.loc_s is None else int(self.loc_s),
False if not hasattr(self, "loc_e") or self.loc_e is None else int(self.loc_e)))
self.printer.addInfo(self.currentContent)
self.currentContent = ''
self.nextFlush = False
self.state_id = None
self.val = None
self.currentProcess = None
elif tag == 'goals':
self.printer.debug("Goals: " + str(self.goals_fg) + "\n;; " + str(self.goals_bg) + "\n;; " + str(self.goals_shelved) + "\n;; " + str(self.goals_given_up) + "\n")
self.printer.addGoal(Goals(self.goals_fg, self.goals_bg, self.goals_shelved, self.goals_given_up))
self.goals_fg = []
self.goals_bg = 0
self.goals_shelved = 0
self.goals_given_up = 0
self.currentProcess = 'value'
elif tag == 'string' and self.currentProcess == 'goal_id':
self.goal_id = self.currentContent
self.currentProcess = 'goal'
self.currentContent = ''
elif tag == 'goal' and self.currentProcess == 'goal':
self.goals_fg.append(Goal(self.goal_id, self.goal_hyps, self.currentContent))
self.goal_hyps = []
self.currentContent = ''
self.currentProcess = 'fg'
elif tag == 'richpp' and self.currentProcess == 'goal_hyps':
self.goal_hyps.append(self.currentContent)
self.currentContent = ''
elif tag == 'list' and self.currentProcess == 'goal_hyps':
self.currentContent = ''
self.currentProcess = 'goal'
elif tag == 'list' and self.currentProcess == 'fg':
self.currentContent = ''
self.currentProcess = 'goals_bg'
elif tag == 'pair' and self.currentProcess == 'goals_bg_in':
self.currentContent = ''
self.currentProcess = 'goals_bg'
elif tag == 'feedback_content' and self.currentProcess == 'waitmessage':
self.currentProcess = None
self.oldProcess = None
self.messageLevel = None
self.currentContent = ''
elif tag == 'feedback_content' and self.currentProcess == 'waitworker':
self.state_manager.setWorker(self.currentContent)
self.currentContent = ''
elif tag == 'message' and self.currentProcess == 'message':
self.currentProcess = 'waitmessage'
self.printer.debug(self.messageLevel + ": " + str(self.currentContent) + "\n\n")
self.printer.addInfo(self.currentContent)
self.currentProcess = self.oldProcess
self.messageLevel = None
self.currentContent = ''
# Call when a character is read
def data(self, content):
if self.currentProcess == 'message' or self.currentProcess == 'value' or \
self.currentProcess == 'goal_id' or self.currentProcess == 'goal' or \
self.currentProcess == 'waitworker' or self.currentProcess == 'goal_hyps':
self.currentContent += content
class CoqParser(Thread):
def __init__(self, process, state_manager, printer):
Thread.__init__(self)
self.cont = True
self.process = process
self.printer = printer
self.target = CoqHandler(state_manager, printer)
self.parser = ET.XMLParser(target=self.target)
self.parser.feed("""
<!DOCTYPE coq [
<!-- we replace non-breakable spaces with normal spaces, because it would
make copy-pasting harder -->
<!ENTITY nbsp \" \">
<!ENTITY gt \">\">
<!ENTITY lt \"<\">
<!ENTITY apos \"'\">
]>
<Root>
""")
def run(self):
self.printer.debug("Running parser...\n")
try:
f = self.process.stdout
while self.cont:
r, w, e = select.select([ f ], [], [], 0.1)
if f in r:
content = read(f.fileno(), 0x400)
self.printer.debug("<< " + str(content) + "\n")
self.parser.feed(content)
except Exception as e:
self.printer.debug("WHOOPS!\n")
self.printer.debug("WHOOPS! " + str(e) + "\n")
self.printer.debug("WHOOPS! " + str(traceback.format_exc()) + "\n")
try:
self.parser.feed("</Root>")
except:
pass
self.printer.debug("END OF PARSING\n")
def stop(self):
self.cont = False | 0.189559 | 0.101367 |
import os
import unittest
import mock
from landscaper import paths
from landscaper.utilities import coordinates
class TestCoordinatesJson(unittest.TestCase):
"""
Unit tests for the coordinates json file.
"""
def test_get_coordinates_json_path(self):
"""
Check that the path is availabe to the coordinates.
"""
self.assertTrue(hasattr(paths, "COORDINATES"))
def test_path_to_json_works(self):
"""
Check that the coordinates file is where it is supposed to be.
"""
self.assertTrue(os.path.isfile(paths.COORDINATES))
class TestCoordinatesRetrieval(unittest.TestCase):
"""
Unit tests to test the retrieve coordinates functionality.
"""
def setUp(self):
tests_dir = os.path.dirname(os.path.abspath(__file__))
self.coords_path = os.path.join(tests_dir, 'data/coordinates.json')
@mock.patch("landscaper.utilities.coordinates.paths")
def test_unknown_name(self, mck_paths):
"""
Check that a name that does not exist is ignored.
"""
mck_paths.COORDINATES = self.coords_path
coords = coordinates.component_coordinates("machine-G")
self.assertIsNone(coords)
@mock.patch("landscaper.utilities.coordinates.paths")
def test_none_name(self, mck_paths):
"""
Check that a None value input is ignored.
"""
mck_paths.COORDINATES = self.coords_path
coords = coordinates.component_coordinates(None)
self.assertIsNone(coords)
@mock.patch("landscaper.utilities.coordinates.paths")
def test_grab_machine_coordinates(self, mck_paths):
"""
Retrieve machine coordinates for Point, LineString and Polygon format.
"""
mck_paths.COORDINATES = self.coords_path
coords_b = coordinates.component_coordinates("machine-B")
coords_c = coordinates.component_coordinates("machine-C")
coords_d = coordinates.component_coordinates("machine-D")
self.assertEqual(coords_b, {
"type": "Point",
"coordinates": [-78.254, 40.45712]
})
self.assertEqual(coords_c, {
"type": "LineString",
"coordinates": [
[102.0, 0.0], [103.0, 1.0], [104.0, 0.0], [105.0, 1.0]
]
})
self.assertEqual(coords_d, {
"type": "Polygon",
"coordinates": [
[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0], [100.0, 0.0]]
]
}) | tests/test_coordinates.py | import os
import unittest
import mock
from landscaper import paths
from landscaper.utilities import coordinates
class TestCoordinatesJson(unittest.TestCase):
"""
Unit tests for the coordinates json file.
"""
def test_get_coordinates_json_path(self):
"""
Check that the path is availabe to the coordinates.
"""
self.assertTrue(hasattr(paths, "COORDINATES"))
def test_path_to_json_works(self):
"""
Check that the coordinates file is where it is supposed to be.
"""
self.assertTrue(os.path.isfile(paths.COORDINATES))
class TestCoordinatesRetrieval(unittest.TestCase):
"""
Unit tests to test the retrieve coordinates functionality.
"""
def setUp(self):
tests_dir = os.path.dirname(os.path.abspath(__file__))
self.coords_path = os.path.join(tests_dir, 'data/coordinates.json')
@mock.patch("landscaper.utilities.coordinates.paths")
def test_unknown_name(self, mck_paths):
"""
Check that a name that does not exist is ignored.
"""
mck_paths.COORDINATES = self.coords_path
coords = coordinates.component_coordinates("machine-G")
self.assertIsNone(coords)
@mock.patch("landscaper.utilities.coordinates.paths")
def test_none_name(self, mck_paths):
"""
Check that a None value input is ignored.
"""
mck_paths.COORDINATES = self.coords_path
coords = coordinates.component_coordinates(None)
self.assertIsNone(coords)
@mock.patch("landscaper.utilities.coordinates.paths")
def test_grab_machine_coordinates(self, mck_paths):
"""
Retrieve machine coordinates for Point, LineString and Polygon format.
"""
mck_paths.COORDINATES = self.coords_path
coords_b = coordinates.component_coordinates("machine-B")
coords_c = coordinates.component_coordinates("machine-C")
coords_d = coordinates.component_coordinates("machine-D")
self.assertEqual(coords_b, {
"type": "Point",
"coordinates": [-78.254, 40.45712]
})
self.assertEqual(coords_c, {
"type": "LineString",
"coordinates": [
[102.0, 0.0], [103.0, 1.0], [104.0, 0.0], [105.0, 1.0]
]
})
self.assertEqual(coords_d, {
"type": "Polygon",
"coordinates": [
[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0], [100.0, 0.0]]
]
}) | 0.684475 | 0.541106 |
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import srv_msg
import references
import misc
@pytest.mark.v6
@pytest.mark.options
@pytest.mark.user
def test_v6_options_user_defined_option():
# Testing server ability to configure it with user custom option
# in this case: option code 100, value unit8 123.
# with client via Advertise and Reply message.
# Client Server
# request option SOLICIT -->
# custom option <-- ADVERTISE
# request option REQUEST -->
# custom option <-- REPLY
# Pass Criteria:
# REPLY/ADVERTISE MUST include option:
# custom option with value 123
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.config_srv_custom_opt('foo', 100, 'uint8', 123)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(100)
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(100)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_requests_option(100)
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(100)
references.references_check('RFC3315')
@pytest.mark.v6
@pytest.mark.options
@pytest.mark.user
def test_v6_options_user_defined_option_code_zero():
# Testing server ability to configure it with user custom option
# in this case: option code 100, value unit8 123.
# with client via Advertise and Reply message.
# Client Server
# request option SOLICIT -->
# custom option <-- ADVERTISE
# request option REQUEST -->
# custom option <-- REPLY
# Pass Criteria:
# REPLY/ADVERTISE MUST include option:
# custom option with value 123
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.config_srv_custom_opt('foo', 0, 'uint8', 123)
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
@pytest.mark.v6
@pytest.mark.options
@pytest.mark.user
def test_v6_options_user_defined_option_standard_code():
# Testing server ability to configure it with user custom option
# in this case: option code 100, value unit8 123.
# with client via Advertise and Reply message.
# Client Server
# request option SOLICIT -->
# custom option <-- ADVERTISE
# request option REQUEST -->
# custom option <-- REPLY
# Pass Criteria:
# REPLY/ADVERTISE MUST include option:
# custom option with value 123
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.config_srv_custom_opt('foo', 12, 'uint8', 123)
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
@pytest.mark.v6
@pytest.mark.options
def test_v6_options_all():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.config_srv_opt('preference', '123')
srv_control.config_srv_opt('sip-server-dns', 'srv1.example.com,srv2.isc.org')
srv_control.config_srv_opt('dns-servers', '2001:db8::1,2001:db8::2')
srv_control.config_srv_opt('domain-search', 'domain1.example.com,domain2.isc.org')
srv_control.config_srv_opt('sip-server-addr', '2001:db8::1,2001:db8::2')
srv_control.config_srv_opt('nisp-servers', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.config_srv_opt('nis-servers', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.config_srv_opt('nis-domain-name', 'ntp.example.com')
srv_control.config_srv_opt('nisp-domain-name', 'ntp.example.com')
srv_control.config_srv_opt('sntp-servers', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.config_srv_opt('information-refresh-time', '12345678')
srv_control.config_srv_opt('unicast', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.config_srv_opt('bcmcs-server-dns', 'very.good.domain.name.com')
srv_control.config_srv_opt('bcmcs-server-addr', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.config_srv_opt('pana-agent', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.config_srv_opt('new-posix-timezone', 'EST5EDT4')
srv_control.config_srv_opt('new-tzdb-timezone', 'Europe/Zurich')
srv_control.config_srv_opt('bootfile-url', 'http://www.kea.isc.org')
srv_control.config_srv_opt('bootfile-param', '000B48656C6C6F20776F726C640003666F6F')
srv_control.config_srv_opt('erp-local-domain-name', 'erp-domain.isc.org')
srv_control.config_srv('domain-search', 0, 'subnet.example.com')
srv_control.config_srv_custom_opt('foo', 100, 'uint8', '123')
srv_control.config_srv_opt_space('vendor-4491',
'tftp-servers',
'fc00:db20:35b:7399::5')
srv_control.config_srv_opt_space('vendor-4491', 'config-file', 'normal_erouter_v6.cm')
srv_control.config_srv_opt_space('vendor-4491',
'syslog-servers',
'fdf8:f53e:61e4::18')
srv_control.config_srv_opt_space('vendor-4491',
'time-servers',
'fc00:db20:35b:7399::5')
srv_control.config_srv_opt_space('vendor-4491', 'time-offset', '-10000')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(7)
srv_msg.client_requests_option(12)
srv_msg.client_requests_option(21)
srv_msg.client_requests_option(22)
srv_msg.client_requests_option(23)
srv_msg.client_requests_option(24)
srv_msg.client_requests_option(27)
srv_msg.client_requests_option(28)
srv_msg.client_requests_option(29)
srv_msg.client_requests_option(30)
srv_msg.client_requests_option(31)
srv_msg.client_requests_option(32)
srv_msg.client_requests_option(33)
srv_msg.client_requests_option(34)
srv_msg.client_requests_option(40)
srv_msg.client_requests_option(41)
srv_msg.client_requests_option(42)
srv_msg.client_requests_option(59)
srv_msg.client_requests_option(60)
srv_msg.client_requests_option(65)
srv_msg.client_requests_option(100)
srv_msg.client_send_msg('INFOREQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(7)
srv_msg.response_check_option_content(7, 'value', 123)
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'srvaddr', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(21)
srv_msg.response_check_option_content(21, 'addresses', 'srv1.example.com.,srv2.isc.org.')
srv_msg.response_check_include_option(22)
srv_msg.response_check_option_content(22, 'addresses', '2001:db8::1,2001:db8::2')
srv_msg.response_check_include_option(23)
srv_msg.response_check_option_content(23, 'addresses', '2001:db8::1,2001:db8::2')
srv_msg.response_check_include_option(24)
srv_msg.response_check_include_option(27)
srv_msg.response_check_option_content(27, 'addresses', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(28)
srv_msg.response_check_option_content(28, 'addresses', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(29)
srv_msg.response_check_option_content(29, 'domain', 'ntp.example.com.')
srv_msg.response_check_include_option(30)
srv_msg.response_check_option_content(30, 'domain', 'ntp.example.com.')
srv_msg.response_check_include_option(31)
srv_msg.response_check_option_content(31, 'addresses', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(32)
srv_msg.response_check_option_content(32, 'value', '12345678')
srv_msg.response_check_include_option(33)
srv_msg.response_check_option_content(33, 'bcmcsdomains', 'very.good.domain.name.com.')
srv_msg.response_check_include_option(34)
srv_msg.response_check_option_content(34, 'bcmcsservers', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(40)
srv_msg.response_check_option_content(40, 'paaaddr', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(41)
srv_msg.response_check_option_content(41, 'optdata', 'EST5EDT4')
srv_msg.response_check_include_option(42)
srv_msg.response_check_option_content(42, 'optdata', 'Europe/Zurich')
srv_msg.response_check_include_option(59)
srv_msg.response_check_option_content(59, 'optdata', 'http://www.kea.isc.org')
srv_msg.response_check_include_option(65)
srv_msg.response_check_option_content(65, 'erpdomain', 'erp-domain.isc.org.')
misc.test_procedure()
srv_msg.client_requests_option(7)
srv_msg.client_requests_option(12)
srv_msg.client_requests_option(21)
srv_msg.client_requests_option(22)
srv_msg.client_requests_option(23)
srv_msg.client_requests_option(24)
srv_msg.client_requests_option(27)
srv_msg.client_requests_option(28)
srv_msg.client_requests_option(29)
srv_msg.client_requests_option(30)
srv_msg.client_requests_option(31)
srv_msg.client_requests_option(32)
srv_msg.client_requests_option(33)
srv_msg.client_requests_option(34)
srv_msg.client_requests_option(40)
srv_msg.client_requests_option(41)
srv_msg.client_requests_option(42)
srv_msg.client_requests_option(59)
srv_msg.client_requests_option(60)
srv_msg.client_requests_option(65)
srv_msg.client_requests_option(100)
srv_msg.client_sets_value('Client', 'enterprisenum', '4491')
srv_msg.client_does_include('Client', 'vendor-class')
srv_msg.add_vendor_suboption('Client', 1, 32)
srv_msg.add_vendor_suboption('Client', 1, 33)
srv_msg.add_vendor_suboption('Client', 1, 34)
srv_msg.add_vendor_suboption('Client', 1, 37)
srv_msg.add_vendor_suboption('Client', 1, 38)
srv_msg.client_does_include('Client', 'vendor-specific-info')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(17)
srv_msg.response_check_option_content(17, 'sub-option', 32)
srv_msg.response_check_option_content(17, 'sub-option', 33)
srv_msg.response_check_option_content(17, 'sub-option', 34)
srv_msg.response_check_option_content(17, 'sub-option', 37)
srv_msg.response_check_option_content(17, 'sub-option', 38)
srv_msg.response_check_include_option(7)
srv_msg.response_check_option_content(7, 'value', 123)
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'srvaddr', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(21)
srv_msg.response_check_option_content(21, 'addresses', 'srv1.example.com.,srv2.isc.org.')
srv_msg.response_check_include_option(22)
srv_msg.response_check_option_content(22, 'addresses', '2001:db8::1,2001:db8::2')
srv_msg.response_check_include_option(23)
srv_msg.response_check_option_content(23, 'addresses', '2001:db8::1,2001:db8::2')
srv_msg.response_check_include_option(24)
srv_msg.response_check_option_content(24, 'domains', 'subnet.example.com.')
srv_msg.response_check_include_option(27)
srv_msg.response_check_option_content(27, 'addresses', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(28)
srv_msg.response_check_option_content(28, 'addresses', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(29)
srv_msg.response_check_option_content(29, 'domain', 'ntp.example.com.')
srv_msg.response_check_include_option(30)
srv_msg.response_check_option_content(30, 'domain', 'ntp.example.com.')
srv_msg.response_check_include_option(31)
srv_msg.response_check_option_content(31, 'addresses', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(32)
srv_msg.response_check_option_content(32, 'value', '12345678')
srv_msg.response_check_include_option(33)
srv_msg.response_check_option_content(33, 'bcmcsdomains', 'very.good.domain.name.com.')
srv_msg.response_check_include_option(34)
srv_msg.response_check_option_content(34, 'bcmcsservers', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(40)
srv_msg.response_check_option_content(40, 'paaaddr', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(41)
srv_msg.response_check_option_content(41, 'optdata', 'EST5EDT4')
srv_msg.response_check_include_option(42)
srv_msg.response_check_option_content(42, 'optdata', 'Europe/Zurich')
srv_msg.response_check_include_option(59)
srv_msg.response_check_option_content(59, 'optdata', 'http://www.kea.isc.org')
srv_msg.response_check_include_option(65)
srv_msg.response_check_option_content(65, 'erpdomain', 'erp-domain.isc.org.') | tests/dhcpv6/options_validation/test_v6_user_options.py |
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import srv_msg
import references
import misc
@pytest.mark.v6
@pytest.mark.options
@pytest.mark.user
def test_v6_options_user_defined_option():
# Testing server ability to configure it with user custom option
# in this case: option code 100, value unit8 123.
# with client via Advertise and Reply message.
# Client Server
# request option SOLICIT -->
# custom option <-- ADVERTISE
# request option REQUEST -->
# custom option <-- REPLY
# Pass Criteria:
# REPLY/ADVERTISE MUST include option:
# custom option with value 123
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.config_srv_custom_opt('foo', 100, 'uint8', 123)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(100)
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(100)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_requests_option(100)
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(100)
references.references_check('RFC3315')
@pytest.mark.v6
@pytest.mark.options
@pytest.mark.user
def test_v6_options_user_defined_option_code_zero():
# Testing server ability to configure it with user custom option
# in this case: option code 100, value unit8 123.
# with client via Advertise and Reply message.
# Client Server
# request option SOLICIT -->
# custom option <-- ADVERTISE
# request option REQUEST -->
# custom option <-- REPLY
# Pass Criteria:
# REPLY/ADVERTISE MUST include option:
# custom option with value 123
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.config_srv_custom_opt('foo', 0, 'uint8', 123)
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
@pytest.mark.v6
@pytest.mark.options
@pytest.mark.user
def test_v6_options_user_defined_option_standard_code():
# Testing server ability to configure it with user custom option
# in this case: option code 100, value unit8 123.
# with client via Advertise and Reply message.
# Client Server
# request option SOLICIT -->
# custom option <-- ADVERTISE
# request option REQUEST -->
# custom option <-- REPLY
# Pass Criteria:
# REPLY/ADVERTISE MUST include option:
# custom option with value 123
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.config_srv_custom_opt('foo', 12, 'uint8', 123)
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configuration')
@pytest.mark.v6
@pytest.mark.options
def test_v6_options_all():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.config_srv_opt('preference', '123')
srv_control.config_srv_opt('sip-server-dns', 'srv1.example.com,srv2.isc.org')
srv_control.config_srv_opt('dns-servers', '2001:db8::1,2001:db8::2')
srv_control.config_srv_opt('domain-search', 'domain1.example.com,domain2.isc.org')
srv_control.config_srv_opt('sip-server-addr', '2001:db8::1,2001:db8::2')
srv_control.config_srv_opt('nisp-servers', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.config_srv_opt('nis-servers', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.config_srv_opt('nis-domain-name', 'ntp.example.com')
srv_control.config_srv_opt('nisp-domain-name', 'ntp.example.com')
srv_control.config_srv_opt('sntp-servers', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.config_srv_opt('information-refresh-time', '12345678')
srv_control.config_srv_opt('unicast', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.config_srv_opt('bcmcs-server-dns', 'very.good.domain.name.com')
srv_control.config_srv_opt('bcmcs-server-addr', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.config_srv_opt('pana-agent', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_control.config_srv_opt('new-posix-timezone', 'EST5EDT4')
srv_control.config_srv_opt('new-tzdb-timezone', 'Europe/Zurich')
srv_control.config_srv_opt('bootfile-url', 'http://www.kea.isc.org')
srv_control.config_srv_opt('bootfile-param', '000B48656C6C6F20776F726C640003666F6F')
srv_control.config_srv_opt('erp-local-domain-name', 'erp-domain.isc.org')
srv_control.config_srv('domain-search', 0, 'subnet.example.com')
srv_control.config_srv_custom_opt('foo', 100, 'uint8', '123')
srv_control.config_srv_opt_space('vendor-4491',
'tftp-servers',
'fc00:db20:35b:7399::5')
srv_control.config_srv_opt_space('vendor-4491', 'config-file', 'normal_erouter_v6.cm')
srv_control.config_srv_opt_space('vendor-4491',
'syslog-servers',
'fdf8:f53e:61e4::18')
srv_control.config_srv_opt_space('vendor-4491',
'time-servers',
'fc00:db20:35b:7399::5')
srv_control.config_srv_opt_space('vendor-4491', 'time-offset', '-10000')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(7)
srv_msg.client_requests_option(12)
srv_msg.client_requests_option(21)
srv_msg.client_requests_option(22)
srv_msg.client_requests_option(23)
srv_msg.client_requests_option(24)
srv_msg.client_requests_option(27)
srv_msg.client_requests_option(28)
srv_msg.client_requests_option(29)
srv_msg.client_requests_option(30)
srv_msg.client_requests_option(31)
srv_msg.client_requests_option(32)
srv_msg.client_requests_option(33)
srv_msg.client_requests_option(34)
srv_msg.client_requests_option(40)
srv_msg.client_requests_option(41)
srv_msg.client_requests_option(42)
srv_msg.client_requests_option(59)
srv_msg.client_requests_option(60)
srv_msg.client_requests_option(65)
srv_msg.client_requests_option(100)
srv_msg.client_send_msg('INFOREQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(7)
srv_msg.response_check_option_content(7, 'value', 123)
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'srvaddr', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(21)
srv_msg.response_check_option_content(21, 'addresses', 'srv1.example.com.,srv2.isc.org.')
srv_msg.response_check_include_option(22)
srv_msg.response_check_option_content(22, 'addresses', '2001:db8::1,2001:db8::2')
srv_msg.response_check_include_option(23)
srv_msg.response_check_option_content(23, 'addresses', '2001:db8::1,2001:db8::2')
srv_msg.response_check_include_option(24)
srv_msg.response_check_include_option(27)
srv_msg.response_check_option_content(27, 'addresses', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(28)
srv_msg.response_check_option_content(28, 'addresses', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(29)
srv_msg.response_check_option_content(29, 'domain', 'ntp.example.com.')
srv_msg.response_check_include_option(30)
srv_msg.response_check_option_content(30, 'domain', 'ntp.example.com.')
srv_msg.response_check_include_option(31)
srv_msg.response_check_option_content(31, 'addresses', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(32)
srv_msg.response_check_option_content(32, 'value', '12345678')
srv_msg.response_check_include_option(33)
srv_msg.response_check_option_content(33, 'bcmcsdomains', 'very.good.domain.name.com.')
srv_msg.response_check_include_option(34)
srv_msg.response_check_option_content(34, 'bcmcsservers', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(40)
srv_msg.response_check_option_content(40, 'paaaddr', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(41)
srv_msg.response_check_option_content(41, 'optdata', 'EST5EDT4')
srv_msg.response_check_include_option(42)
srv_msg.response_check_option_content(42, 'optdata', 'Europe/Zurich')
srv_msg.response_check_include_option(59)
srv_msg.response_check_option_content(59, 'optdata', 'http://www.kea.isc.org')
srv_msg.response_check_include_option(65)
srv_msg.response_check_option_content(65, 'erpdomain', 'erp-domain.isc.org.')
misc.test_procedure()
srv_msg.client_requests_option(7)
srv_msg.client_requests_option(12)
srv_msg.client_requests_option(21)
srv_msg.client_requests_option(22)
srv_msg.client_requests_option(23)
srv_msg.client_requests_option(24)
srv_msg.client_requests_option(27)
srv_msg.client_requests_option(28)
srv_msg.client_requests_option(29)
srv_msg.client_requests_option(30)
srv_msg.client_requests_option(31)
srv_msg.client_requests_option(32)
srv_msg.client_requests_option(33)
srv_msg.client_requests_option(34)
srv_msg.client_requests_option(40)
srv_msg.client_requests_option(41)
srv_msg.client_requests_option(42)
srv_msg.client_requests_option(59)
srv_msg.client_requests_option(60)
srv_msg.client_requests_option(65)
srv_msg.client_requests_option(100)
srv_msg.client_sets_value('Client', 'enterprisenum', '4491')
srv_msg.client_does_include('Client', 'vendor-class')
srv_msg.add_vendor_suboption('Client', 1, 32)
srv_msg.add_vendor_suboption('Client', 1, 33)
srv_msg.add_vendor_suboption('Client', 1, 34)
srv_msg.add_vendor_suboption('Client', 1, 37)
srv_msg.add_vendor_suboption('Client', 1, 38)
srv_msg.client_does_include('Client', 'vendor-specific-info')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(17)
srv_msg.response_check_option_content(17, 'sub-option', 32)
srv_msg.response_check_option_content(17, 'sub-option', 33)
srv_msg.response_check_option_content(17, 'sub-option', 34)
srv_msg.response_check_option_content(17, 'sub-option', 37)
srv_msg.response_check_option_content(17, 'sub-option', 38)
srv_msg.response_check_include_option(7)
srv_msg.response_check_option_content(7, 'value', 123)
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'srvaddr', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(21)
srv_msg.response_check_option_content(21, 'addresses', 'srv1.example.com.,srv2.isc.org.')
srv_msg.response_check_include_option(22)
srv_msg.response_check_option_content(22, 'addresses', '2001:db8::1,2001:db8::2')
srv_msg.response_check_include_option(23)
srv_msg.response_check_option_content(23, 'addresses', '2001:db8::1,2001:db8::2')
srv_msg.response_check_include_option(24)
srv_msg.response_check_option_content(24, 'domains', 'subnet.example.com.')
srv_msg.response_check_include_option(27)
srv_msg.response_check_option_content(27, 'addresses', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(28)
srv_msg.response_check_option_content(28, 'addresses', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(29)
srv_msg.response_check_option_content(29, 'domain', 'ntp.example.com.')
srv_msg.response_check_include_option(30)
srv_msg.response_check_option_content(30, 'domain', 'ntp.example.com.')
srv_msg.response_check_include_option(31)
srv_msg.response_check_option_content(31, 'addresses', '2001:db8::abc,fdf8:f53e:61e4::18,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(32)
srv_msg.response_check_option_content(32, 'value', '12345678')
srv_msg.response_check_include_option(33)
srv_msg.response_check_option_content(33, 'bcmcsdomains', 'very.good.domain.name.com.')
srv_msg.response_check_include_option(34)
srv_msg.response_check_option_content(34, 'bcmcsservers', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(40)
srv_msg.response_check_option_content(40, 'paaaddr', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b,fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
srv_msg.response_check_include_option(41)
srv_msg.response_check_option_content(41, 'optdata', 'EST5EDT4')
srv_msg.response_check_include_option(42)
srv_msg.response_check_option_content(42, 'optdata', 'Europe/Zurich')
srv_msg.response_check_include_option(59)
srv_msg.response_check_option_content(59, 'optdata', 'http://www.kea.isc.org')
srv_msg.response_check_include_option(65)
srv_msg.response_check_option_content(65, 'erpdomain', 'erp-domain.isc.org.') | 0.334481 | 0.060696 |
import torch
import pandas as pd
from laylm.trainer import metrics
from laylm.config import label as label_cfg
from laylm.config import token as token_cfg
def annoset_inputs(data_dict, device):
input_ids = torch.tensor(data_dict['token_ids'], dtype=torch.long)
mask = torch.tensor(data_dict['mask'], dtype=torch.long)
bbox = torch.tensor(data_dict['bboxes'], dtype=torch.long)
input_data = {
'input_ids': input_ids.unsqueeze(dim=0).to(device),
'attention_mask': mask.unsqueeze(dim=0).to(device),
'bbox': bbox.unsqueeze(dim=0).to(device)
}
return input_data
def annoset_transform(objects, tokenizer, max_seq_length = 512):
data_anno = tokenize_duplicate_dict(objects, tokenizer)
texts, bboxes, tokens, token_ids, wseq, gseq, mask = [],[],[],[],[],[],[]
texts.append(token_cfg.cls_token)
bboxes.append(token_cfg.cls_token_box)
tokens.append(token_cfg.cls_token)
token_ids.append(token_cfg.cls_token_id)
wseq.append(token_cfg.ignore_index_token_id)
gseq.append(token_cfg.ignore_index_token_id)
mask.append(1)
for obj in data_anno:
texts.append(obj['text'])
bboxes.append(obj['bbox'])
tokens.append(obj['token'])
token_ids.append(obj['token_id'])
wseq.append(obj['wseq'])
gseq.append(obj['gseq'])
mask.append(1)
texts.append(token_cfg.sep_token)
bboxes.append(token_cfg.sep_token_box)
tokens.append(token_cfg.sep_token)
token_ids.append(token_cfg.sep_token_id)
wseq.append(token_cfg.ignore_index_token_id)
gseq.append(token_cfg.ignore_index_token_id)
mask.append(1)
pad_length = max_seq_length - len(texts)
for p in range(pad_length):
texts.append(token_cfg.pad_token)
bboxes.append(token_cfg.pad_token_box)
tokens.append(token_cfg.pad_token)
token_ids.append(token_cfg.pad_token_id)
wseq.append(token_cfg.ignore_index_token_id)
gseq.append(token_cfg.ignore_index_token_id)
mask.append(0)
data_dict = {
'words':texts,
'bboxes': bboxes,
'tokens': tokens,
'token_ids': token_ids,
'mask': mask,
'gseq': gseq,
'wseq': wseq
}
return data_dict
def tokenize_duplicate_dict(objects, tokenizer):
new_objects = []
gseq = 0
for idx, obj in enumerate(objects):
curr_text = objects[idx]['text']
token = tokenizer.tokenize(curr_text)
if len(token) > 1:
wseq = 0
for tok in token:
new_obj = objects[idx].copy()
new_obj['token'] = tok
new_obj['token_id'] = tokenizer.convert_tokens_to_ids(tok)
new_obj['fraction'] = True
new_obj['wseq'] = wseq
new_obj['gseq'] = gseq
new_objects.append(new_obj)
wseq+=1
gseq+=1
else:
if len(token)==0:
obj['token'] = '[UNK]'
obj['token_id'] = tokenizer.convert_tokens_to_ids('[UNK]')
else:
obj['token'] = token[0]
obj['token_id'] = tokenizer.convert_tokens_to_ids(token[0])
obj['fraction'] = False
obj['wseq'] = 0
obj['gseq'] = gseq
new_objects.append(obj)
gseq+=1
return new_objects
def normalized_prediction(outputs, tokenizer):
preds = prediction_index(outputs)
bsize = preds.shape[0]
labels = []
for idx in range(bsize):
label_pred = []
for pds in preds[idx].tolist():
lbl = label_cfg.idx_to_label.get(pds, "O")
label_pred.append(lbl)
labels.append(label_pred)
return labels
def prediction_index(outputs):
if len(outputs)>1:
preds = outputs[1]
else:
preds = outputs[0]
preds = torch.argmax(preds, dim=2)
return preds
def clean_prediction_data(data_dict, tokenizer):
words = data_dict['words']
boxes = data_dict['bboxes']
tokens = data_dict['tokens']
labels = data_dict['labels']
gseq = data_dict['gseq']
wseq = data_dict['wseq']
data = {
'words':[],
'bboxes': [],
'tokens': [],
'labels': [],
'gseq': [],
'wseq': [],
}
for (w,b,t,l,gq,wq) in zip(words, boxes, tokens, labels, gseq, wseq):
if not (w==tokenizer.cls_token or
w==tokenizer.sep_token or
w==tokenizer.pad_token):
data['words'].append(w)
data['bboxes'].append(b)
data['tokens'].append(t)
data['labels'].append(l)
data['gseq'].append(gq)
data['wseq'].append(wq)
return data
def sort_multidim(data):
sorter = lambda x: (x[2][1], x[1])
# x[2][1] sort by y position
# x[1] sort by BILOU
return sorted(data, key=sorter)
def word_taken(data):
str_out = ""
for idx in range(len(data)):
w = data[idx][0]
if w!="" and len(w)!=0:
str_out += w
if idx!=len(data)-1:
str_out += " "
return str_out
def rebuild_prediction_data(data):
df = pd.DataFrame(data)
dfg = df.groupby('gseq').aggregate({
'words': 'min',
'bboxes':'last',
'tokens':'sum',
'labels':'first'
})
base_data = dict((k,[]) for k,v in label_cfg.base_label_name.items())
for idx in range(len(dfg)):
labels = dfg.iloc[idx]['labels']
bbox = dfg.iloc[idx]['bboxes']
if not labels=="O":
bil, val = labels.split("-")
val_type, val_label = val.split("_")
if val_type=="VAL":
word = dfg.iloc[idx]['words']
key = label_cfg.label_to_name[val_label]
base_data[key].append((word, bil, bbox))
for k,v in base_data.items():
sorted_data = sort_multidim(v)
base_data[k] = word_taken(sorted_data)
return base_data | laylm/prod/utils.py | import torch
import pandas as pd
from laylm.trainer import metrics
from laylm.config import label as label_cfg
from laylm.config import token as token_cfg
def annoset_inputs(data_dict, device):
input_ids = torch.tensor(data_dict['token_ids'], dtype=torch.long)
mask = torch.tensor(data_dict['mask'], dtype=torch.long)
bbox = torch.tensor(data_dict['bboxes'], dtype=torch.long)
input_data = {
'input_ids': input_ids.unsqueeze(dim=0).to(device),
'attention_mask': mask.unsqueeze(dim=0).to(device),
'bbox': bbox.unsqueeze(dim=0).to(device)
}
return input_data
def annoset_transform(objects, tokenizer, max_seq_length = 512):
data_anno = tokenize_duplicate_dict(objects, tokenizer)
texts, bboxes, tokens, token_ids, wseq, gseq, mask = [],[],[],[],[],[],[]
texts.append(token_cfg.cls_token)
bboxes.append(token_cfg.cls_token_box)
tokens.append(token_cfg.cls_token)
token_ids.append(token_cfg.cls_token_id)
wseq.append(token_cfg.ignore_index_token_id)
gseq.append(token_cfg.ignore_index_token_id)
mask.append(1)
for obj in data_anno:
texts.append(obj['text'])
bboxes.append(obj['bbox'])
tokens.append(obj['token'])
token_ids.append(obj['token_id'])
wseq.append(obj['wseq'])
gseq.append(obj['gseq'])
mask.append(1)
texts.append(token_cfg.sep_token)
bboxes.append(token_cfg.sep_token_box)
tokens.append(token_cfg.sep_token)
token_ids.append(token_cfg.sep_token_id)
wseq.append(token_cfg.ignore_index_token_id)
gseq.append(token_cfg.ignore_index_token_id)
mask.append(1)
pad_length = max_seq_length - len(texts)
for p in range(pad_length):
texts.append(token_cfg.pad_token)
bboxes.append(token_cfg.pad_token_box)
tokens.append(token_cfg.pad_token)
token_ids.append(token_cfg.pad_token_id)
wseq.append(token_cfg.ignore_index_token_id)
gseq.append(token_cfg.ignore_index_token_id)
mask.append(0)
data_dict = {
'words':texts,
'bboxes': bboxes,
'tokens': tokens,
'token_ids': token_ids,
'mask': mask,
'gseq': gseq,
'wseq': wseq
}
return data_dict
def tokenize_duplicate_dict(objects, tokenizer):
new_objects = []
gseq = 0
for idx, obj in enumerate(objects):
curr_text = objects[idx]['text']
token = tokenizer.tokenize(curr_text)
if len(token) > 1:
wseq = 0
for tok in token:
new_obj = objects[idx].copy()
new_obj['token'] = tok
new_obj['token_id'] = tokenizer.convert_tokens_to_ids(tok)
new_obj['fraction'] = True
new_obj['wseq'] = wseq
new_obj['gseq'] = gseq
new_objects.append(new_obj)
wseq+=1
gseq+=1
else:
if len(token)==0:
obj['token'] = '[UNK]'
obj['token_id'] = tokenizer.convert_tokens_to_ids('[UNK]')
else:
obj['token'] = token[0]
obj['token_id'] = tokenizer.convert_tokens_to_ids(token[0])
obj['fraction'] = False
obj['wseq'] = 0
obj['gseq'] = gseq
new_objects.append(obj)
gseq+=1
return new_objects
def normalized_prediction(outputs, tokenizer):
preds = prediction_index(outputs)
bsize = preds.shape[0]
labels = []
for idx in range(bsize):
label_pred = []
for pds in preds[idx].tolist():
lbl = label_cfg.idx_to_label.get(pds, "O")
label_pred.append(lbl)
labels.append(label_pred)
return labels
def prediction_index(outputs):
if len(outputs)>1:
preds = outputs[1]
else:
preds = outputs[0]
preds = torch.argmax(preds, dim=2)
return preds
def clean_prediction_data(data_dict, tokenizer):
words = data_dict['words']
boxes = data_dict['bboxes']
tokens = data_dict['tokens']
labels = data_dict['labels']
gseq = data_dict['gseq']
wseq = data_dict['wseq']
data = {
'words':[],
'bboxes': [],
'tokens': [],
'labels': [],
'gseq': [],
'wseq': [],
}
for (w,b,t,l,gq,wq) in zip(words, boxes, tokens, labels, gseq, wseq):
if not (w==tokenizer.cls_token or
w==tokenizer.sep_token or
w==tokenizer.pad_token):
data['words'].append(w)
data['bboxes'].append(b)
data['tokens'].append(t)
data['labels'].append(l)
data['gseq'].append(gq)
data['wseq'].append(wq)
return data
def sort_multidim(data):
sorter = lambda x: (x[2][1], x[1])
# x[2][1] sort by y position
# x[1] sort by BILOU
return sorted(data, key=sorter)
def word_taken(data):
str_out = ""
for idx in range(len(data)):
w = data[idx][0]
if w!="" and len(w)!=0:
str_out += w
if idx!=len(data)-1:
str_out += " "
return str_out
def rebuild_prediction_data(data):
df = pd.DataFrame(data)
dfg = df.groupby('gseq').aggregate({
'words': 'min',
'bboxes':'last',
'tokens':'sum',
'labels':'first'
})
base_data = dict((k,[]) for k,v in label_cfg.base_label_name.items())
for idx in range(len(dfg)):
labels = dfg.iloc[idx]['labels']
bbox = dfg.iloc[idx]['bboxes']
if not labels=="O":
bil, val = labels.split("-")
val_type, val_label = val.split("_")
if val_type=="VAL":
word = dfg.iloc[idx]['words']
key = label_cfg.label_to_name[val_label]
base_data[key].append((word, bil, bbox))
for k,v in base_data.items():
sorted_data = sort_multidim(v)
base_data[k] = word_taken(sorted_data)
return base_data | 0.19349 | 0.24844 |
import argparse
import os
import sys
from c64img import __version__ as ver
from c64img.hires import HiresConverter
from c64img.multi import MultiConverter
from c64img.path import get_modified_fname
def convert(arguments, converter_class):
"""
Convert pictures
"""
last = conv = None
exit_code = 0
for fname in arguments.filename:
if conv:
last = conv
conv = converter_class(fname, arguments.errors)
if last:
conv.prev_chars = last.chars
if arguments.border is not None:
conv.set_border_color(arguments.border)
# note, that for hires pictures it doesn't make sense, and will be
# ignored.
if arguments.background is not None:
conv.set_bg_color(arguments.background)
conv.log.set_verbose(arguments.verbose, arguments.quiet)
filename, format_ = resolve_name(arguments, fname)
if conv.save(filename, format_) != 0:
exit_code += 1
return exit_code
def resolve_name(arguments, fname):
"""
Return right name and format for an output file.
"""
if arguments.output:
if len(arguments.filename) > 1:
if not os.path.exists(arguments.output):
os.mkdir(arguments.output)
if not os.path.isdir(arguments.output):
raise IOError("Path `%s' is not directory" % arguments.output)
filename = os.path.join(arguments.output,
get_modified_fname(fname, "prg"))
else:
filename = arguments.output
else:
filename = get_modified_fname(fname, "prg")
format_ = arguments.format
if hasattr(arguments, "executable") and arguments.executable:
format_ = "prg"
_, ext = os.path.splitext(filename)
if ext != ".prg":
filename = get_modified_fname(filename, "prg")
if hasattr(arguments, "raw") and arguments.raw:
format_ = "raw"
filename, ext = os.path.splitext(filename)
return filename, format_
def image2c64():
"""
Parse options, run the conversion
"""
class_map = {"art-studio-hires": HiresConverter,
"hires": HiresConverter,
"koala": MultiConverter,
"multi": MultiConverter}
formatter = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=formatter)
parser.add_argument("-g", "--border", help="set color number for border, "
"default: most frequent color", type=int,
choices=range(16))
parser.add_argument("-b", "--background", help="set color number for "
"background", type=int, choices=range(16))
parser.add_argument("-e", "--errors", help="perform the action in case of "
"color clashes: save errormap under the same name "
"with '_error' suffix, show it, open in grafx2, fix "
"it, or don't do anything (the message appear)",
default="none", choices=("show", "save", "grafx2",
"fix", "none"))
parser.add_argument("-f", "--format", help="format of output file, this "
"option is mandatory", choices=class_map.keys(),
required=True)
group = parser.add_mutually_exclusive_group()
group.add_argument("-x", "--executable", help="produce C64 executable as"
" 'prg' file", action="store_true")
group.add_argument("-r", "--raw", help="produce raw files with only the "
"data. Useful for include in assemblers",
action="store_true")
parser.add_argument("-o", "--output", help="output filename, default: "
"same filename as original with appropriate extension"
". If multiple files provided as the input, output "
"will be treated as the directory")
parser.add_argument('filename', nargs="+")
group = parser.add_mutually_exclusive_group()
group.add_argument("-q", "--quiet", help='please, be quiet. Adding more '
'"q" will decrease verbosity', action="count",
default=0)
group.add_argument("-v", "--verbose", help='be verbose. Adding more "v" '
'will increase verbosity', action="count", default=0)
parser.add_argument("-V", "--version", action='version',
version="%(prog)s v" + ver)
arguments = parser.parse_args()
return convert(arguments, class_map[arguments.format])
if __name__ == "__main__":
sys.exit(image2c64()) | c64img/cmd_convert.py | import argparse
import os
import sys
from c64img import __version__ as ver
from c64img.hires import HiresConverter
from c64img.multi import MultiConverter
from c64img.path import get_modified_fname
def convert(arguments, converter_class):
"""
Convert pictures
"""
last = conv = None
exit_code = 0
for fname in arguments.filename:
if conv:
last = conv
conv = converter_class(fname, arguments.errors)
if last:
conv.prev_chars = last.chars
if arguments.border is not None:
conv.set_border_color(arguments.border)
# note, that for hires pictures it doesn't make sense, and will be
# ignored.
if arguments.background is not None:
conv.set_bg_color(arguments.background)
conv.log.set_verbose(arguments.verbose, arguments.quiet)
filename, format_ = resolve_name(arguments, fname)
if conv.save(filename, format_) != 0:
exit_code += 1
return exit_code
def resolve_name(arguments, fname):
"""
Return right name and format for an output file.
"""
if arguments.output:
if len(arguments.filename) > 1:
if not os.path.exists(arguments.output):
os.mkdir(arguments.output)
if not os.path.isdir(arguments.output):
raise IOError("Path `%s' is not directory" % arguments.output)
filename = os.path.join(arguments.output,
get_modified_fname(fname, "prg"))
else:
filename = arguments.output
else:
filename = get_modified_fname(fname, "prg")
format_ = arguments.format
if hasattr(arguments, "executable") and arguments.executable:
format_ = "prg"
_, ext = os.path.splitext(filename)
if ext != ".prg":
filename = get_modified_fname(filename, "prg")
if hasattr(arguments, "raw") and arguments.raw:
format_ = "raw"
filename, ext = os.path.splitext(filename)
return filename, format_
def image2c64():
"""
Parse options, run the conversion
"""
class_map = {"art-studio-hires": HiresConverter,
"hires": HiresConverter,
"koala": MultiConverter,
"multi": MultiConverter}
formatter = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=formatter)
parser.add_argument("-g", "--border", help="set color number for border, "
"default: most frequent color", type=int,
choices=range(16))
parser.add_argument("-b", "--background", help="set color number for "
"background", type=int, choices=range(16))
parser.add_argument("-e", "--errors", help="perform the action in case of "
"color clashes: save errormap under the same name "
"with '_error' suffix, show it, open in grafx2, fix "
"it, or don't do anything (the message appear)",
default="none", choices=("show", "save", "grafx2",
"fix", "none"))
parser.add_argument("-f", "--format", help="format of output file, this "
"option is mandatory", choices=class_map.keys(),
required=True)
group = parser.add_mutually_exclusive_group()
group.add_argument("-x", "--executable", help="produce C64 executable as"
" 'prg' file", action="store_true")
group.add_argument("-r", "--raw", help="produce raw files with only the "
"data. Useful for include in assemblers",
action="store_true")
parser.add_argument("-o", "--output", help="output filename, default: "
"same filename as original with appropriate extension"
". If multiple files provided as the input, output "
"will be treated as the directory")
parser.add_argument('filename', nargs="+")
group = parser.add_mutually_exclusive_group()
group.add_argument("-q", "--quiet", help='please, be quiet. Adding more '
'"q" will decrease verbosity', action="count",
default=0)
group.add_argument("-v", "--verbose", help='be verbose. Adding more "v" '
'will increase verbosity', action="count", default=0)
parser.add_argument("-V", "--version", action='version',
version="%(prog)s v" + ver)
arguments = parser.parse_args()
return convert(arguments, class_map[arguments.format])
if __name__ == "__main__":
sys.exit(image2c64()) | 0.400163 | 0.239928 |
# imports
import argparse
from utils import Data
from ann import ANN, k_fold_train
def parse_args():
'''parse the arguments for artificial neural network'''
parser = argparse.ArgumentParser(
description='Artificial Neural Network for classification'
)
parser.add_argument(
'-a', '--attributes',
type=str,
required=True,
help='path to the attributes files (required)'
)
parser.add_argument(
'-d', '--training',
type=str,
required=True,
help='path to the training data files (required)'
)
parser.add_argument(
'-t', '--testing',
type=str ,
required=False,
help='path to the test data files (required)'
)
parser.add_argument(
'-w', '--weights',
type=str ,
required=False,
help='path to save the weights (optional)'
)
parser.add_argument(
'-k', '--k-fold',
type=int,
required=False,
help='number of folds for k-fold cross validation, k=0 or k=1 for no validation'
)
parser.add_argument(
'-u', '--hidden-units',
type=int,
required=False,
help='number of hidden units (default: 3)'
)
parser.add_argument(
'-e', '--epochs',
type=int,
required=False,
default=10,
help='number of epochs (default: 10)'
)
parser.add_argument(
'-l', '--learning-rate',
type=float,
required=False,
default=0.1,
help='learning rate (default: 0.01)',
)
parser.add_argument(
'-m', '--momentum',
type=float,
required=False,
default=0.0,
help='momentum (default: 0.9)',
)
parser.add_argument(
'-g','--decay',
type=float,
required=False,
default=0.0,
help='weight decay gamma (default: 0.01)',
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help='debug mode, prints statements activated (optional)'
)
# parse arguments
args = parser.parse_args()
return args
def main():
'''main of the program'''
args = parse_args() # parse arguments
print(' args entered',args)
# create data manager
manager = Data(
args.training,
args.testing,
args.attributes,
args.debug)
# hyperparameters
h = {
'k_fold': args.k_fold,
'learning_rate': args.learning_rate,
'momentum': args.momentum,
'epochs': args.epochs,
'decay': args.decay,
'hidden_units': [ args.hidden_units ] # list of number of nodes in each layer
}
print('\nCreating NN with the parameters provided\n')
# create the artificial neural network
net = ANN(
hyperparams=h,
input_units=manager.input_units,
output_units=manager.output_units,
debug=args.debug
)
# printing the neural network
net.print_network()
print('\nLearning the NN...\n')
# train the artificial neural network
if args.k_fold == 0:
# no k fold validation
net.train(manager.training, manager.validation)
else:
# k fold validation
training_data = manager.training + manager.validation
net = k_fold_train(
net,
training_data,
args.epochs,
args.k_fold,
args.debug)
print('\nTraining complete\n')
#print weights
print('\nPrinting learned weights\n')
net.print_weights()
w_path = args.weights
# save the weights
if w_path:
net.save(w_path)
print('weights saved to', w_path)
# load the weights
# ann.load(weights_path)
# print('weights loaded from', weights_path)
# test the artificial neural network
print('\nTesting the NN...\n')
accuracy = 100 * net.test(manager.testing)
print('\nTesting complete\n')
print(f'\nAccuracy: {accuracy:.2f}%\n')
if __name__ == '__main__':
main() | source/main.py |
# imports
import argparse
from utils import Data
from ann import ANN, k_fold_train
def parse_args():
'''parse the arguments for artificial neural network'''
parser = argparse.ArgumentParser(
description='Artificial Neural Network for classification'
)
parser.add_argument(
'-a', '--attributes',
type=str,
required=True,
help='path to the attributes files (required)'
)
parser.add_argument(
'-d', '--training',
type=str,
required=True,
help='path to the training data files (required)'
)
parser.add_argument(
'-t', '--testing',
type=str ,
required=False,
help='path to the test data files (required)'
)
parser.add_argument(
'-w', '--weights',
type=str ,
required=False,
help='path to save the weights (optional)'
)
parser.add_argument(
'-k', '--k-fold',
type=int,
required=False,
help='number of folds for k-fold cross validation, k=0 or k=1 for no validation'
)
parser.add_argument(
'-u', '--hidden-units',
type=int,
required=False,
help='number of hidden units (default: 3)'
)
parser.add_argument(
'-e', '--epochs',
type=int,
required=False,
default=10,
help='number of epochs (default: 10)'
)
parser.add_argument(
'-l', '--learning-rate',
type=float,
required=False,
default=0.1,
help='learning rate (default: 0.01)',
)
parser.add_argument(
'-m', '--momentum',
type=float,
required=False,
default=0.0,
help='momentum (default: 0.9)',
)
parser.add_argument(
'-g','--decay',
type=float,
required=False,
default=0.0,
help='weight decay gamma (default: 0.01)',
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help='debug mode, prints statements activated (optional)'
)
# parse arguments
args = parser.parse_args()
return args
def main():
'''main of the program'''
args = parse_args() # parse arguments
print(' args entered',args)
# create data manager
manager = Data(
args.training,
args.testing,
args.attributes,
args.debug)
# hyperparameters
h = {
'k_fold': args.k_fold,
'learning_rate': args.learning_rate,
'momentum': args.momentum,
'epochs': args.epochs,
'decay': args.decay,
'hidden_units': [ args.hidden_units ] # list of number of nodes in each layer
}
print('\nCreating NN with the parameters provided\n')
# create the artificial neural network
net = ANN(
hyperparams=h,
input_units=manager.input_units,
output_units=manager.output_units,
debug=args.debug
)
# printing the neural network
net.print_network()
print('\nLearning the NN...\n')
# train the artificial neural network
if args.k_fold == 0:
# no k fold validation
net.train(manager.training, manager.validation)
else:
# k fold validation
training_data = manager.training + manager.validation
net = k_fold_train(
net,
training_data,
args.epochs,
args.k_fold,
args.debug)
print('\nTraining complete\n')
#print weights
print('\nPrinting learned weights\n')
net.print_weights()
w_path = args.weights
# save the weights
if w_path:
net.save(w_path)
print('weights saved to', w_path)
# load the weights
# ann.load(weights_path)
# print('weights loaded from', weights_path)
# test the artificial neural network
print('\nTesting the NN...\n')
accuracy = 100 * net.test(manager.testing)
print('\nTesting complete\n')
print(f'\nAccuracy: {accuracy:.2f}%\n')
if __name__ == '__main__':
main() | 0.450118 | 0.274206 |
from functools import partial
from requests_futures.sessions import FuturesSession
from requests.packages.urllib3.util import Retry
from requests.adapters import HTTPAdapter
from django.conf import settings
from wristband.common.utils import extract_version_from_slug
from wristband.providers import providers_config
from wristband.providers.generics import JsonDataProvider
import logging
logger = logging.getLogger('wristband.apps.providers')
CONCURRENT_JOBS_LIMIT = 10
REQUEST_TIMEOUT = 10
REQUEST_RETRIES = 10
class GenericDocktorDataProvider(JsonDataProvider):
__requests_http_adapter = HTTPAdapter(
Retry(total=REQUEST_RETRIES, status_forcelist=[502], backoff_factor=0.5))
def _get_raw_data(self):
docktor_config = providers_config.providers['docktor']
apps = []
session = FuturesSession(max_workers=CONCURRENT_JOBS_LIMIT)
session.mount('https://', self.__requests_http_adapter)
session.mount('http://', self.__requests_http_adapter)
for stage in docktor_config:
for zone in docktor_config[stage]:
apps_uri = '{uri}/apps/'.format(uri=docktor_config[stage][zone]['uri'])
try:
r = session.get(apps_uri, timeout=REQUEST_TIMEOUT).result()
r.raise_for_status()
apps_list = r.json()
except ValueError as e:
logger.error("Non json response {} from {}-{} docktor".format(r.content, stage, zone))
raise e
except Exception as e:
logger.error("Exception raised on {}-{} docktor".format(stage, zone))
raise e
future_apps_details = [session.get('{apps_uri}{app}'.format(apps_uri=apps_uri, app=app), timeout=REQUEST_TIMEOUT) for app in apps_list]
try:
apps_details = [a.result() for a in future_apps_details]
except Exception as e:
logger.error("Exception raised on {}-{} docktor".format(stage, zone))
raise e
partial_get_app_info = partial(self.get_app_info, stage, zone)
apps.extend(map(lambda a: partial_get_app_info(a), apps_details))
return apps
@staticmethod
def get_app_info(stage, zone, response):
try:
response.raise_for_status()
except ValueError as e:
logger.error("Non json response {} from {}-{} docktor".format(response.content, stage, zone))
raise e
data = response.json()
log_url = settings.KIBANA_URL.format(stage=stage, security_zone=zone) if zone != 'right' else None
return {
'name': data['app'],
'stage': stage,
'security_zone': zone,
'version': extract_version_from_slug(data['slug_uri']),
'state': data['state'],
'log_url': log_url
}
class NestedDocktorAppDataProvider(GenericDocktorDataProvider):
def _get_list_data(self):
"""
Show only the latest version per stage, filter by last seen
"""
data = [{'name': app['name'],
'version': app['version'],
'stage': app['stage'],
'state': app['state'],
'log_url': app['log_url']}
for app in self.raw_data]
return sorted(data, key=lambda x: x['name'])
def get_filtered_list_data(self, pk, domain_pk):
filtered_apps = filter(lambda x: x[domain_pk] == pk, self.list_data)
return sorted(filtered_apps, key=lambda x: x['name'])
def to_models(self):
return [{'name': app['name'],
'stage': app['stage'],
'security_zone': app['security_zone']}
for app in self.raw_data]
class DocktorAppDataProvider(GenericDocktorDataProvider):
def _get_list_data(self):
"""
We need to get this format from the current releases app format
Docktor output:
[
{
"name": "a-b-test",
"stage": "qa",
"version": "1.7.7"
"state": "healthy"
},
{
"name": "a-b-test",
"stage": "staging",
"version": "1.7.2"
"state": "unhealthy"
}
]
Expected output:
[
{
"name": "a-b-test",
"stages": [
{
"name": "qa",
"version": "1.7.7"
"state": "healthy",
"log_url": none
},
{
"name": "staging",
"version": "1.7.2"
"state": "unhealthy",
"log_url": "https://test.com/#/dashboard/file/deployments.json?microservice=wristband"
}
]
},
{...}
]
"""
data = []
apps_indexes = {}
for app in self.raw_data:
app_name = app['name']
app_stage = app['stage']
if app_name in apps_indexes.keys():
# we've already seen this app
already_seen_app_index = apps_indexes[app_name]
data[already_seen_app_index]['stages'].append({
'name': app_stage,
'version': app['version'],
'state': app['state'],
'log_url': app['log_url']
})
else:
# we've never seen this app before
app_to_be_added = {
'name': app_name,
'stages': [{
'name': app_stage,
'version': app['version'],
'state': app['state'],
'log_url': app['log_url']
}]
}
data.append(app_to_be_added)
apps_indexes[app_name] = len(data) - 1
return sorted(data, key=lambda x: x['name']) | wristband/apps/providers.py | from functools import partial
from requests_futures.sessions import FuturesSession
from requests.packages.urllib3.util import Retry
from requests.adapters import HTTPAdapter
from django.conf import settings
from wristband.common.utils import extract_version_from_slug
from wristband.providers import providers_config
from wristband.providers.generics import JsonDataProvider
import logging
logger = logging.getLogger('wristband.apps.providers')
CONCURRENT_JOBS_LIMIT = 10
REQUEST_TIMEOUT = 10
REQUEST_RETRIES = 10
class GenericDocktorDataProvider(JsonDataProvider):
__requests_http_adapter = HTTPAdapter(
Retry(total=REQUEST_RETRIES, status_forcelist=[502], backoff_factor=0.5))
def _get_raw_data(self):
docktor_config = providers_config.providers['docktor']
apps = []
session = FuturesSession(max_workers=CONCURRENT_JOBS_LIMIT)
session.mount('https://', self.__requests_http_adapter)
session.mount('http://', self.__requests_http_adapter)
for stage in docktor_config:
for zone in docktor_config[stage]:
apps_uri = '{uri}/apps/'.format(uri=docktor_config[stage][zone]['uri'])
try:
r = session.get(apps_uri, timeout=REQUEST_TIMEOUT).result()
r.raise_for_status()
apps_list = r.json()
except ValueError as e:
logger.error("Non json response {} from {}-{} docktor".format(r.content, stage, zone))
raise e
except Exception as e:
logger.error("Exception raised on {}-{} docktor".format(stage, zone))
raise e
future_apps_details = [session.get('{apps_uri}{app}'.format(apps_uri=apps_uri, app=app), timeout=REQUEST_TIMEOUT) for app in apps_list]
try:
apps_details = [a.result() for a in future_apps_details]
except Exception as e:
logger.error("Exception raised on {}-{} docktor".format(stage, zone))
raise e
partial_get_app_info = partial(self.get_app_info, stage, zone)
apps.extend(map(lambda a: partial_get_app_info(a), apps_details))
return apps
@staticmethod
def get_app_info(stage, zone, response):
try:
response.raise_for_status()
except ValueError as e:
logger.error("Non json response {} from {}-{} docktor".format(response.content, stage, zone))
raise e
data = response.json()
log_url = settings.KIBANA_URL.format(stage=stage, security_zone=zone) if zone != 'right' else None
return {
'name': data['app'],
'stage': stage,
'security_zone': zone,
'version': extract_version_from_slug(data['slug_uri']),
'state': data['state'],
'log_url': log_url
}
class NestedDocktorAppDataProvider(GenericDocktorDataProvider):
def _get_list_data(self):
"""
Show only the latest version per stage, filter by last seen
"""
data = [{'name': app['name'],
'version': app['version'],
'stage': app['stage'],
'state': app['state'],
'log_url': app['log_url']}
for app in self.raw_data]
return sorted(data, key=lambda x: x['name'])
def get_filtered_list_data(self, pk, domain_pk):
filtered_apps = filter(lambda x: x[domain_pk] == pk, self.list_data)
return sorted(filtered_apps, key=lambda x: x['name'])
def to_models(self):
return [{'name': app['name'],
'stage': app['stage'],
'security_zone': app['security_zone']}
for app in self.raw_data]
class DocktorAppDataProvider(GenericDocktorDataProvider):
def _get_list_data(self):
"""
We need to get this format from the current releases app format
Docktor output:
[
{
"name": "a-b-test",
"stage": "qa",
"version": "1.7.7"
"state": "healthy"
},
{
"name": "a-b-test",
"stage": "staging",
"version": "1.7.2"
"state": "unhealthy"
}
]
Expected output:
[
{
"name": "a-b-test",
"stages": [
{
"name": "qa",
"version": "1.7.7"
"state": "healthy",
"log_url": none
},
{
"name": "staging",
"version": "1.7.2"
"state": "unhealthy",
"log_url": "https://test.com/#/dashboard/file/deployments.json?microservice=wristband"
}
]
},
{...}
]
"""
data = []
apps_indexes = {}
for app in self.raw_data:
app_name = app['name']
app_stage = app['stage']
if app_name in apps_indexes.keys():
# we've already seen this app
already_seen_app_index = apps_indexes[app_name]
data[already_seen_app_index]['stages'].append({
'name': app_stage,
'version': app['version'],
'state': app['state'],
'log_url': app['log_url']
})
else:
# we've never seen this app before
app_to_be_added = {
'name': app_name,
'stages': [{
'name': app_stage,
'version': app['version'],
'state': app['state'],
'log_url': app['log_url']
}]
}
data.append(app_to_be_added)
apps_indexes[app_name] = len(data) - 1
return sorted(data, key=lambda x: x['name']) | 0.487551 | 0.159708 |
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from enum import Enum
from importlib import import_module
from typing import Any, Iterable, List, Optional, Set
from faust_avro.types import float32, int32
__all__ = [
# Types
"AvroSchemaT",
"VisitedT",
# Classes
"AvroRecord",
"AvroEnum",
"AvroArray",
"AvroMap",
"AvroFixed",
"AvroUnion",
"AvroNested",
"AvroField",
"NamedSchema",
"Primitive",
"Schema",
# Constants
"PRIMITIVES",
]
MISSING = object()
# https://github.com/python/mypy/issues/7069
# AvroSchemaT = Union[str, List["AvroSchemaT"], Dict[str, "AvroSchemaT"]]
AvroSchemaT = Any
VisitedT = Set[str]
@dataclass # type: ignore
# https://github.com/python/mypy/issues/5374
class Schema(ABC):
def _add_fields(self, *fields, **schema) -> AvroSchemaT:
for f in fields:
value = getattr(self, f)
if value and value != MISSING:
schema[f] = value
return {k: v for k, v in schema.items() if v != MISSING}
@staticmethod
def _import_class(path: str) -> type:
"""Extract a single class/object from within a module."""
try:
module, name = path.rsplit(".", 1)
return getattr(import_module(module), name)
except Exception as e:
raise ImportError(f"{path} not found.") from e
@abstractmethod
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
"""The implementation of intermediate->avro."""
# VisitedT is used to prevent infinite recursion. The first time a
# schema is getting dumped by _to_avro, it should be dumped in full
# and then visited should be updated to include that schema by name,
# so that if it is seen again it is dumped as a named type.
def to_avro(self) -> AvroSchemaT:
"""Return an avro str/list/dict schema for this intermediate schema."""
visited: VisitedT = set()
return self._to_avro(visited)
@dataclass
class LogicalType(Schema):
"""A generic LogicalType wrapping a normal avro type."""
schema: Schema
logical_type: str
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
schema = self.schema._to_avro(visited)
if isinstance(schema, str):
# Primitives return bare strings, so turn those into a dict
schema = dict(type=schema)
schema["logicalType"] = self.logical_type
return schema
@dataclass
class DecimalLogicalType(LogicalType):
"""A LogicalType that supports the decimal precision and scale arguments."""
precision: int
scale: Optional[int] = None
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
schema = super()._to_avro(visited)
schema["precision"] = self.precision
if self.scale is not None:
schema["scale"] = self.scale
return schema
@dataclass
class Primitive(Schema):
"""Primitive avro types: https://avro.apache.org/docs/current/spec.html#schema_primitive"""
name: str
python_type: Optional[
type
] # Optional allows None, which is "weird" in python typing
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
return self.name
NULL = Primitive("null", type(None))
BOOL = Primitive("boolean", bool)
INT = Primitive("int", int32)
LONG = Primitive("long", int)
FLOAT = Primitive("float", float32)
DOUBLE = Primitive("double", float)
BYTES = Primitive("bytes", bytes)
STRING = Primitive("string", str)
PRIMITIVES: List[Primitive] = [NULL, BOOL, INT, LONG, FLOAT, DOUBLE, BYTES, STRING]
@dataclass
class NamedSchema(Schema):
"""Used for the named avro schema types."""
name: str
namespace: Optional[str] = ""
aliases: Iterable[str] = field(default_factory=list)
python_type: Optional[type] = field(default=None, compare=False)
def __post_init__(self) -> None:
try:
self.python_type = self._import_class(self.name)
except ImportError:
pass
def _to_avro(
self, visited: VisitedT, *fields: str, **extras: AvroSchemaT
) -> AvroSchemaT:
if self.name in visited:
return self.name
else:
visited.add(self.name)
return dict(
**extras, **self._add_fields("name", "namespace", "aliases", *fields)
)
class Ordering(Enum):
"""How a field within a record impacts sorting multiple records"""
ASCENDING = "ascending"
DESCENDING = "descending"
IGNORE = "ignore"
@dataclass
class AvroField(Schema):
"""A single field within an avro Record schema"""
name: str
type: Any
doc: Optional[str] = None
aliases: Iterable[str] = field(default_factory=list)
# Can't use None, because that's a valid default
default: Optional[Any] = MISSING
# Must be None so we don't add this to the schema if unspecified
order: Optional[Ordering] = None
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
return self._add_fields(
"name",
"doc",
"order",
"aliases",
type=self.type._to_avro(visited),
default=self.default,
)
@dataclass
class AvroRecord(NamedSchema):
"""https://avro.apache.org/docs/current/spec.html#schema_record"""
doc: Optional[str] = None
fields: Iterable[AvroField] = field(default_factory=list)
schema_id: Optional[int] = None
def _to_avro(
self, visited: VisitedT, *fields: str, **extras: AvroSchemaT
) -> AvroSchemaT:
# Delay trying to flatten the fields, because the super() call here
# adds self to visited, so that when we later flatten fields, any
# references to this record itself will come out as a named type.
result = super()._to_avro(visited, "doc", *fields, type="record", **extras)
if not isinstance(result, str):
result["fields"] = [field._to_avro(visited) for field in self.fields]
return result
@dataclass
class AvroEnum(NamedSchema):
"""https://avro.apache.org/docs/current/spec.html#Enums"""
doc: Optional[str] = None
symbols: Iterable[str] = field(default_factory=list)
default: Optional[str] = None
def __post_init__(self) -> None:
super().__post_init__()
if self.python_type is None:
self.python_type = Enum(self.name, " ".join(self.symbols))
def _to_avro(
self, visited: VisitedT, *fields: str, **extras: AvroSchemaT
) -> AvroSchemaT:
return super()._to_avro(
visited,
"doc",
"default",
*fields,
type="enum",
symbols=list(self.symbols),
**extras,
)
@dataclass
class AvroArray(Schema):
"""https://avro.apache.org/docs/current/spec.html#Arrays"""
items: Schema
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
return dict(type="array", items=self.items._to_avro(visited))
@dataclass
class AvroMap(Schema):
"""https://avro.apache.org/docs/current/spec.html#Maps"""
values: Schema
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
return dict(type="map", values=self.values._to_avro(visited))
@dataclass
class AvroFixed(NamedSchema):
"""https://avro.apache.org/docs/current/spec.html#Fixed"""
size: int = 0
def _to_avro(
self, visited: VisitedT, *fields: str, **extras: AvroSchemaT
) -> AvroSchemaT:
return super()._to_avro(
visited, *fields, type="fixed", size=self.size, **extras
)
@dataclass
class AvroUnion(Schema):
"""https://avro.apache.org/docs/current/spec.html#Unions"""
schemas: Iterable[Schema]
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
return [schema._to_avro(visited) for schema in self.schemas]
@dataclass
class AvroNested(Schema):
"""
An arbitrary nesting, where the schema used the second form of schema declaration from
https://avro.apache.org/docs/current/spec.html#schemas to "nest" a schema with an extra dict.
Example:
{"type": {"type": "str"}}
As opposed to the simpler:
{"type": "str"}
Or even just:
"str"
"""
schema: Schema
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
return dict(type=self.schema._to_avro(visited)) | faust_avro/schema.py | from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from enum import Enum
from importlib import import_module
from typing import Any, Iterable, List, Optional, Set
from faust_avro.types import float32, int32
__all__ = [
# Types
"AvroSchemaT",
"VisitedT",
# Classes
"AvroRecord",
"AvroEnum",
"AvroArray",
"AvroMap",
"AvroFixed",
"AvroUnion",
"AvroNested",
"AvroField",
"NamedSchema",
"Primitive",
"Schema",
# Constants
"PRIMITIVES",
]
MISSING = object()
# https://github.com/python/mypy/issues/7069
# AvroSchemaT = Union[str, List["AvroSchemaT"], Dict[str, "AvroSchemaT"]]
AvroSchemaT = Any
VisitedT = Set[str]
@dataclass # type: ignore
# https://github.com/python/mypy/issues/5374
class Schema(ABC):
def _add_fields(self, *fields, **schema) -> AvroSchemaT:
for f in fields:
value = getattr(self, f)
if value and value != MISSING:
schema[f] = value
return {k: v for k, v in schema.items() if v != MISSING}
@staticmethod
def _import_class(path: str) -> type:
"""Extract a single class/object from within a module."""
try:
module, name = path.rsplit(".", 1)
return getattr(import_module(module), name)
except Exception as e:
raise ImportError(f"{path} not found.") from e
@abstractmethod
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
"""The implementation of intermediate->avro."""
# VisitedT is used to prevent infinite recursion. The first time a
# schema is getting dumped by _to_avro, it should be dumped in full
# and then visited should be updated to include that schema by name,
# so that if it is seen again it is dumped as a named type.
def to_avro(self) -> AvroSchemaT:
"""Return an avro str/list/dict schema for this intermediate schema."""
visited: VisitedT = set()
return self._to_avro(visited)
@dataclass
class LogicalType(Schema):
"""A generic LogicalType wrapping a normal avro type."""
schema: Schema
logical_type: str
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
schema = self.schema._to_avro(visited)
if isinstance(schema, str):
# Primitives return bare strings, so turn those into a dict
schema = dict(type=schema)
schema["logicalType"] = self.logical_type
return schema
@dataclass
class DecimalLogicalType(LogicalType):
"""A LogicalType that supports the decimal precision and scale arguments."""
precision: int
scale: Optional[int] = None
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
schema = super()._to_avro(visited)
schema["precision"] = self.precision
if self.scale is not None:
schema["scale"] = self.scale
return schema
@dataclass
class Primitive(Schema):
"""Primitive avro types: https://avro.apache.org/docs/current/spec.html#schema_primitive"""
name: str
python_type: Optional[
type
] # Optional allows None, which is "weird" in python typing
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
return self.name
NULL = Primitive("null", type(None))
BOOL = Primitive("boolean", bool)
INT = Primitive("int", int32)
LONG = Primitive("long", int)
FLOAT = Primitive("float", float32)
DOUBLE = Primitive("double", float)
BYTES = Primitive("bytes", bytes)
STRING = Primitive("string", str)
PRIMITIVES: List[Primitive] = [NULL, BOOL, INT, LONG, FLOAT, DOUBLE, BYTES, STRING]
@dataclass
class NamedSchema(Schema):
"""Used for the named avro schema types."""
name: str
namespace: Optional[str] = ""
aliases: Iterable[str] = field(default_factory=list)
python_type: Optional[type] = field(default=None, compare=False)
def __post_init__(self) -> None:
try:
self.python_type = self._import_class(self.name)
except ImportError:
pass
def _to_avro(
self, visited: VisitedT, *fields: str, **extras: AvroSchemaT
) -> AvroSchemaT:
if self.name in visited:
return self.name
else:
visited.add(self.name)
return dict(
**extras, **self._add_fields("name", "namespace", "aliases", *fields)
)
class Ordering(Enum):
"""How a field within a record impacts sorting multiple records"""
ASCENDING = "ascending"
DESCENDING = "descending"
IGNORE = "ignore"
@dataclass
class AvroField(Schema):
"""A single field within an avro Record schema"""
name: str
type: Any
doc: Optional[str] = None
aliases: Iterable[str] = field(default_factory=list)
# Can't use None, because that's a valid default
default: Optional[Any] = MISSING
# Must be None so we don't add this to the schema if unspecified
order: Optional[Ordering] = None
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
return self._add_fields(
"name",
"doc",
"order",
"aliases",
type=self.type._to_avro(visited),
default=self.default,
)
@dataclass
class AvroRecord(NamedSchema):
"""https://avro.apache.org/docs/current/spec.html#schema_record"""
doc: Optional[str] = None
fields: Iterable[AvroField] = field(default_factory=list)
schema_id: Optional[int] = None
def _to_avro(
self, visited: VisitedT, *fields: str, **extras: AvroSchemaT
) -> AvroSchemaT:
# Delay trying to flatten the fields, because the super() call here
# adds self to visited, so that when we later flatten fields, any
# references to this record itself will come out as a named type.
result = super()._to_avro(visited, "doc", *fields, type="record", **extras)
if not isinstance(result, str):
result["fields"] = [field._to_avro(visited) for field in self.fields]
return result
@dataclass
class AvroEnum(NamedSchema):
"""https://avro.apache.org/docs/current/spec.html#Enums"""
doc: Optional[str] = None
symbols: Iterable[str] = field(default_factory=list)
default: Optional[str] = None
def __post_init__(self) -> None:
super().__post_init__()
if self.python_type is None:
self.python_type = Enum(self.name, " ".join(self.symbols))
def _to_avro(
self, visited: VisitedT, *fields: str, **extras: AvroSchemaT
) -> AvroSchemaT:
return super()._to_avro(
visited,
"doc",
"default",
*fields,
type="enum",
symbols=list(self.symbols),
**extras,
)
@dataclass
class AvroArray(Schema):
"""https://avro.apache.org/docs/current/spec.html#Arrays"""
items: Schema
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
return dict(type="array", items=self.items._to_avro(visited))
@dataclass
class AvroMap(Schema):
"""https://avro.apache.org/docs/current/spec.html#Maps"""
values: Schema
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
return dict(type="map", values=self.values._to_avro(visited))
@dataclass
class AvroFixed(NamedSchema):
"""https://avro.apache.org/docs/current/spec.html#Fixed"""
size: int = 0
def _to_avro(
self, visited: VisitedT, *fields: str, **extras: AvroSchemaT
) -> AvroSchemaT:
return super()._to_avro(
visited, *fields, type="fixed", size=self.size, **extras
)
@dataclass
class AvroUnion(Schema):
"""https://avro.apache.org/docs/current/spec.html#Unions"""
schemas: Iterable[Schema]
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
return [schema._to_avro(visited) for schema in self.schemas]
@dataclass
class AvroNested(Schema):
"""
An arbitrary nesting, where the schema used the second form of schema declaration from
https://avro.apache.org/docs/current/spec.html#schemas to "nest" a schema with an extra dict.
Example:
{"type": {"type": "str"}}
As opposed to the simpler:
{"type": "str"}
Or even just:
"str"
"""
schema: Schema
def _to_avro(self, visited: VisitedT) -> AvroSchemaT:
return dict(type=self.schema._to_avro(visited)) | 0.927112 | 0.28799 |
from __future__ import annotations
import argparse
import os
import re
import subprocess
import sys
from argparse import ArgumentParser
from pathlib import Path
THISDIR = Path(__file__).resolve().parent
def main():
"""Main entry point"""
parser = ArgumentParser(add_help=False)
parser.add_argument("-h", "--help", action="store_true", default=argparse.SUPPRESS)
args, unknown = parser.parse_known_args()
if len(args.__dict__) + len(unknown) == 0 or "help" in args.__dict__:
print((THISDIR / "doc.txt").read_text(encoding="utf-8"))
sys.exit(0)
sourceFiles = []
for root, _dirs, files in os.walk("."):
for file in files:
if file.endswith(".py") or file.endswith(".pyi") or file.endswith(".ipynb"):
sourceFiles.append(os.path.join(root, file))
# Convert tabs to spaces
for file in sourceFiles:
convertFile(file, "\t", " ")
# Run black with forwarded args
exitCode, out = _doSysExec("black " + " ".join(unknown))
# Convert spaces to tabs
for file in sourceFiles:
convertFile(file, " ", "\t")
print(out.encode("utf-8").decode("unicode_escape")) # pylint: disable=no-member
sys.exit(exitCode)
def convertFile(file: str, find: str, replace: str):
"""Convert spaces to tabs of vice versa
Args:
file (str): file to modify
find (str): tabs/ spaces to find
replace (str): tabs/ spaces to replace
"""
lines = Path(file).read_text(encoding="utf-8").split("\n")
outLines = []
for line in lines:
match = re.match(f"^({find})*", line)
span = match.span()
outLines.append(replace * (span[1] // len(find)) + line[span[1] :])
Path(file).write_text("\n".join(outLines), encoding="utf-8")
def _doSysExec(command: str, errorAsOut: bool = True) -> tuple[int, str]:
"""Execute a command and check for errors.
Args:
command (str): commands as a string
errorAsOut (bool, optional): redirect errors to stdout
Raises:
RuntimeWarning: throw a warning should there be a non exit code
Returns:
tuple[int, str]: tuple of return code (int) and stdout (str)
"""
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT if errorAsOut else subprocess.PIPE,
encoding="utf-8",
errors="ignore",
) as process:
out = process.communicate()[0]
exitCode = process.returncode
return exitCode, out
if __name__ == "__main__":
main() | blackt/__init__.py |
from __future__ import annotations
import argparse
import os
import re
import subprocess
import sys
from argparse import ArgumentParser
from pathlib import Path
THISDIR = Path(__file__).resolve().parent
def main():
"""Main entry point"""
parser = ArgumentParser(add_help=False)
parser.add_argument("-h", "--help", action="store_true", default=argparse.SUPPRESS)
args, unknown = parser.parse_known_args()
if len(args.__dict__) + len(unknown) == 0 or "help" in args.__dict__:
print((THISDIR / "doc.txt").read_text(encoding="utf-8"))
sys.exit(0)
sourceFiles = []
for root, _dirs, files in os.walk("."):
for file in files:
if file.endswith(".py") or file.endswith(".pyi") or file.endswith(".ipynb"):
sourceFiles.append(os.path.join(root, file))
# Convert tabs to spaces
for file in sourceFiles:
convertFile(file, "\t", " ")
# Run black with forwarded args
exitCode, out = _doSysExec("black " + " ".join(unknown))
# Convert spaces to tabs
for file in sourceFiles:
convertFile(file, " ", "\t")
print(out.encode("utf-8").decode("unicode_escape")) # pylint: disable=no-member
sys.exit(exitCode)
def convertFile(file: str, find: str, replace: str):
"""Convert spaces to tabs of vice versa
Args:
file (str): file to modify
find (str): tabs/ spaces to find
replace (str): tabs/ spaces to replace
"""
lines = Path(file).read_text(encoding="utf-8").split("\n")
outLines = []
for line in lines:
match = re.match(f"^({find})*", line)
span = match.span()
outLines.append(replace * (span[1] // len(find)) + line[span[1] :])
Path(file).write_text("\n".join(outLines), encoding="utf-8")
def _doSysExec(command: str, errorAsOut: bool = True) -> tuple[int, str]:
"""Execute a command and check for errors.
Args:
command (str): commands as a string
errorAsOut (bool, optional): redirect errors to stdout
Raises:
RuntimeWarning: throw a warning should there be a non exit code
Returns:
tuple[int, str]: tuple of return code (int) and stdout (str)
"""
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT if errorAsOut else subprocess.PIPE,
encoding="utf-8",
errors="ignore",
) as process:
out = process.communicate()[0]
exitCode = process.returncode
return exitCode, out
if __name__ == "__main__":
main() | 0.399694 | 0.083778 |
import time
from colorama import Fore, Back, Style, init
import sys
import os
init(autoreset=True)
print(Fore.GREEN+"EMYOUNOONE Tarafından Kodlanmıştır ")
print(Fore.GREEN+"www.siberguvenlikblogu.com ")
input("Şifre Oluşturucu Programına Hoş Geldiniz...\n \nDevam Etmek İçin Enter'e Basınız..\n")
os.system('cls' if os.name=='nt' else 'clear')
print(Fore.RED+"BİLMENİZ GEREKEN HER ŞEY PROGRAMI KURDUĞUNUZ YERDE BENİ OKU.txt DOSYASININ İÇİNDE..\n")
while True:
f = open("ŞİFRELER.txt", "a")
import random
şifre_oluşturucu="""abcdfeghijklmnoprstuvyzxwq\
ABCDEFGHIJKLMNOPRSTUVYZXQW\
1234567890\
!'^+%&/()=?}][{#£><.,-"$*:;|_"""
print("Şifrenin maximum uzunluğu 30 minimum uzunluğu 8 olmalı.\n")
uzunluk=int(input("Şifreniz için bir uzunluk belirtin :"))
kayıt=input("Şifrenizi Ne Olarak Kayıt Edelim :")
if uzunluk == (8):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE GÜVENLİKSİZ--- "+" : " + password+"\n" )
elif uzunluk == (9):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKSİZ--- "+kayıt+" =" + password+"\n" )
elif uzunluk == (10):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKSİZ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (11):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (12):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKLİ--- "+kayıt+" =" + password+"\n" )
elif uzunluk == (13):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (14):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (15):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (16):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (17):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (18):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (19):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (20):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (21):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (22):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (23):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (24):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (25):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (26):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (27):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (28):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (29):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (30):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\n\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
else:
print("Lütfen Geçerli Bir uzunluk Giriniz..")
soru=int(input("\n\nYeniden Oluşturmak için 1'e, Çıkmak İçin 2'ye Basınız :"))
if soru == (1):
continue
elif soru == (2):
break
quit()
else:
print("Lütfen Geçerli Bir Kod Giriniz")
input("") | sifre_olusturucu.py | import time
from colorama import Fore, Back, Style, init
import sys
import os
init(autoreset=True)
print(Fore.GREEN+"EMYOUNOONE Tarafından Kodlanmıştır ")
print(Fore.GREEN+"www.siberguvenlikblogu.com ")
input("Şifre Oluşturucu Programına Hoş Geldiniz...\n \nDevam Etmek İçin Enter'e Basınız..\n")
os.system('cls' if os.name=='nt' else 'clear')
print(Fore.RED+"BİLMENİZ GEREKEN HER ŞEY PROGRAMI KURDUĞUNUZ YERDE BENİ OKU.txt DOSYASININ İÇİNDE..\n")
while True:
f = open("ŞİFRELER.txt", "a")
import random
şifre_oluşturucu="""abcdfeghijklmnoprstuvyzxwq\
ABCDEFGHIJKLMNOPRSTUVYZXQW\
1234567890\
!'^+%&/()=?}][{#£><.,-"$*:;|_"""
print("Şifrenin maximum uzunluğu 30 minimum uzunluğu 8 olmalı.\n")
uzunluk=int(input("Şifreniz için bir uzunluk belirtin :"))
kayıt=input("Şifrenizi Ne Olarak Kayıt Edelim :")
if uzunluk == (8):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE GÜVENLİKSİZ--- "+" : " + password+"\n" )
elif uzunluk == (9):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKSİZ--- "+kayıt+" =" + password+"\n" )
elif uzunluk == (10):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKSİZ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (11):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (12):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKLİ--- "+kayıt+" =" + password+"\n" )
elif uzunluk == (13):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (14):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (15):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE ORTA GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (16):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (17):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (18):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (19):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (20):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (21):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (22):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (23):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (24):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (25):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (26):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (27):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (28):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (29):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
elif uzunluk == (30):
password="".join(random.sample(şifre_oluşturucu,uzunluk))
print("\n\nŞifre Oluşturuludu: "+Fore.RED+password)
f.write("ŞİFRE YÜKSEK GÜVENLİKLİ--- "+kayıt+" : " + password+"\n" )
else:
print("Lütfen Geçerli Bir uzunluk Giriniz..")
soru=int(input("\n\nYeniden Oluşturmak için 1'e, Çıkmak İçin 2'ye Basınız :"))
if soru == (1):
continue
elif soru == (2):
break
quit()
else:
print("Lütfen Geçerli Bir Kod Giriniz")
input("") | 0.030916 | 0.202325 |
import numpy as np
import LMM as lmm
import matplotlib.pyplot as plt
# validates the LMM forward rate simulations using martingale tests and other
# tests
class Validation():
def __init__(self):
swaption_vol_cva_dataset_path = 'Data/SwaptionVolMatrix_5Y.csv'
swap_curve_cva_dataset_path = 'Data/SpotCurve_5Y.csv'
self.lmm = lmm.LMM(swaption_vol_cva_dataset_path, swap_curve_cva_dataset_path)
self.calculate_martingale_ratios_for_CVA_dataset_bonds_at_expiry()
swaption_vol_extended_dataset_path = 'Data/SwaptionVolMatrix.csv'
swap_curve_extended_dataset_path = 'Data/SpotCurve.csv'
self.lmm = lmm.LMM(swaption_vol_extended_dataset_path, swap_curve_extended_dataset_path)
self.calculate_10_year_ZC_martingale_test()
self.calculate_zero_coupon_bond_projections()
# uncomment to do diffusion check
# self.check_diffusion_has_zero_mean()
##[terms,time, sim]
self.forward_sims = self.lmm.forward_sims
self.number_of_terms = self.lmm.number_of_terms
self.time_increment = self.lmm.time_increment
self.bootstrapping = self.lmm.bootstrapping
self.number_of_sims = self.lmm.number_of_sims
def set_martingale_differences_for_zero_coupon_bond(self):
self.martingale_differences = np.ones((self.number_of_terms, 3))
# loop through zero coupon bonds
for i in range(1, self.number_of_terms+1):
bond_pv = self.get_expectation_of_zero_coupon_bond(i)
t0_bond_pv = self.bootstrapping.zero_coupon_prices[i]
self.martingale_differences[i - 1,0] = bond_pv
self.martingale_differences[i - 1, 1] = t0_bond_pv
self.martingale_differences[i - 1, 2] = bond_pv / t0_bond_pv - 1
np.savetxt('martingale_test.csv', self.martingale_differences, delimiter=',')
def calculate_martingale_ratios_for_CVA_dataset_bonds_at_expiry(self):
numeraire_index = 10
self.lmm.run_projection(numeraire_index, numeraire_index)
bonds = np.zeros(numeraire_index)
ratio = np.zeros(numeraire_index)
difference = np.zeros(numeraire_index)
for i in range(1, numeraire_index):
numeraire_value = self.lmm.DF[numeraire_index, i,:]
t0_value = self.lmm.DF[numeraire_index,0,0]
bonds[i] = np.mean(1/numeraire_value)*t0_value
difference[i] = bonds[i] - self.lmm.DF[i,0,0]
ratio[i] = bonds[i]/self.lmm.DF[i,0,0]
np.savetxt('martingale_ratio_at_bond_expiry_CVA_dataset.csv', ratio, delimiter=',')
def calculate_zero_coupon_bond_projections(self):
numeraire_index = 40
start_bond = 20
self.lmm.volatility.mc_adjustment_factor = 1
self.lmm.volatility.a = 0.01368861
self.lmm.volatility.b = 0.07921976
self.lmm.volatility.c = 0.33920146
self.lmm.volatility.d = 0.08416935
self.lmm.volatility.instantiate_arrays()
self.lmm.run_projection(numeraire_index, numeraire_index)
bonds = np.zeros((numeraire_index - start_bond, numeraire_index))
ratio = np.zeros((numeraire_index - start_bond, numeraire_index))
for i in range(start_bond, numeraire_index):
for j in range(i+1):
numeraire_value = self.lmm.DF[numeraire_index, j, :]
t0_numeraire_value = self.lmm.DF[numeraire_index, 0, 0]
t0_ratio = self.lmm.DF[i, 0, 0]/t0_numeraire_value
bonds[i-start_bond,j] = np.mean(self.lmm.DF[i, j,:] / numeraire_value) * t0_numeraire_value
ratio[i-start_bond,j] = (np.mean(self.lmm.DF[i, j,:] / numeraire_value))/t0_ratio
np.savetxt('matingale_test_ratio_projections.csv', ratio, delimiter=',')
def calculate_10_year_ZC_martingale_test(self):
numeraire_index = 40
start_bond = 20
self.lmm.volatility.mc_adjustment_factor = 1
self.lmm.volatility.a = 0.01368861
self.lmm.volatility.b = 0.07921976
self.lmm.volatility.c = 0.33920146
self.lmm.volatility.d = 0.08416935
self.lmm.volatility.instantiate_arrays()
self.lmm.run_projection(numeraire_index, numeraire_index)
# self.lmm.run_projection_predictor_corrector(numeraire_index, numeraire_index)
bonds = np.zeros((numeraire_index - start_bond, numeraire_index))
ratio = np.zeros((4, numeraire_index))
for j in range(start_bond + 1):
numeraire_value = self.lmm.DF[numeraire_index, j, :]
t0_numeraire_value = self.lmm.DF[numeraire_index, 0, 0]
t0_ratio = self.lmm.DF[start_bond, 0, 0] / t0_numeraire_value
bonds[start_bond - start_bond, j] = np.mean(self.lmm.DF[start_bond, j, :] / numeraire_value) * t0_numeraire_value
ratio[0, j] = np.percentile(self.lmm.DF[start_bond, j, :] / numeraire_value, 5) / t0_ratio
ratio[1, j] = (np.mean(self.lmm.DF[start_bond, j, :] / numeraire_value)) / t0_ratio
ratio[2, j] = np.percentile(self.lmm.DF[start_bond, j, :] / numeraire_value, 50) / t0_ratio
ratio[3, j] = np.percentile(self.lmm.DF[start_bond, j, :] / numeraire_value, 95) / t0_ratio
np.savetxt('10_year_ZC_martingale_test.csv', ratio, delimiter=',')
def get_expectation_of_zero_coupon_bond(self, zero_coupon_index):
forward_rate_index = zero_coupon_index - 1
product = 1/(np.ones(self.number_of_sims) + self.time_increment*self.forward_sims[0,0,:])
for i in range(1, forward_rate_index+1):
product = product/(np.ones(self.number_of_sims) + self.time_increment*self.forward_sims[i,i,:])
output = np.mean(product)
return output
def check_diffusion_has_zero_mean(self):
number_of_tests = 40
mean = np.zeros((number_of_tests,12))
for i in range(number_of_tests):
diffusion = self.lmm.get_diffusion()
mean[i,:] = np.mean(diffusion, axis=1)
mean_of_mean = np.mean(mean) | Validation.py | import numpy as np
import LMM as lmm
import matplotlib.pyplot as plt
# validates the LMM forward rate simulations using martingale tests and other
# tests
class Validation():
def __init__(self):
swaption_vol_cva_dataset_path = 'Data/SwaptionVolMatrix_5Y.csv'
swap_curve_cva_dataset_path = 'Data/SpotCurve_5Y.csv'
self.lmm = lmm.LMM(swaption_vol_cva_dataset_path, swap_curve_cva_dataset_path)
self.calculate_martingale_ratios_for_CVA_dataset_bonds_at_expiry()
swaption_vol_extended_dataset_path = 'Data/SwaptionVolMatrix.csv'
swap_curve_extended_dataset_path = 'Data/SpotCurve.csv'
self.lmm = lmm.LMM(swaption_vol_extended_dataset_path, swap_curve_extended_dataset_path)
self.calculate_10_year_ZC_martingale_test()
self.calculate_zero_coupon_bond_projections()
# uncomment to do diffusion check
# self.check_diffusion_has_zero_mean()
##[terms,time, sim]
self.forward_sims = self.lmm.forward_sims
self.number_of_terms = self.lmm.number_of_terms
self.time_increment = self.lmm.time_increment
self.bootstrapping = self.lmm.bootstrapping
self.number_of_sims = self.lmm.number_of_sims
def set_martingale_differences_for_zero_coupon_bond(self):
self.martingale_differences = np.ones((self.number_of_terms, 3))
# loop through zero coupon bonds
for i in range(1, self.number_of_terms+1):
bond_pv = self.get_expectation_of_zero_coupon_bond(i)
t0_bond_pv = self.bootstrapping.zero_coupon_prices[i]
self.martingale_differences[i - 1,0] = bond_pv
self.martingale_differences[i - 1, 1] = t0_bond_pv
self.martingale_differences[i - 1, 2] = bond_pv / t0_bond_pv - 1
np.savetxt('martingale_test.csv', self.martingale_differences, delimiter=',')
def calculate_martingale_ratios_for_CVA_dataset_bonds_at_expiry(self):
numeraire_index = 10
self.lmm.run_projection(numeraire_index, numeraire_index)
bonds = np.zeros(numeraire_index)
ratio = np.zeros(numeraire_index)
difference = np.zeros(numeraire_index)
for i in range(1, numeraire_index):
numeraire_value = self.lmm.DF[numeraire_index, i,:]
t0_value = self.lmm.DF[numeraire_index,0,0]
bonds[i] = np.mean(1/numeraire_value)*t0_value
difference[i] = bonds[i] - self.lmm.DF[i,0,0]
ratio[i] = bonds[i]/self.lmm.DF[i,0,0]
np.savetxt('martingale_ratio_at_bond_expiry_CVA_dataset.csv', ratio, delimiter=',')
def calculate_zero_coupon_bond_projections(self):
numeraire_index = 40
start_bond = 20
self.lmm.volatility.mc_adjustment_factor = 1
self.lmm.volatility.a = 0.01368861
self.lmm.volatility.b = 0.07921976
self.lmm.volatility.c = 0.33920146
self.lmm.volatility.d = 0.08416935
self.lmm.volatility.instantiate_arrays()
self.lmm.run_projection(numeraire_index, numeraire_index)
bonds = np.zeros((numeraire_index - start_bond, numeraire_index))
ratio = np.zeros((numeraire_index - start_bond, numeraire_index))
for i in range(start_bond, numeraire_index):
for j in range(i+1):
numeraire_value = self.lmm.DF[numeraire_index, j, :]
t0_numeraire_value = self.lmm.DF[numeraire_index, 0, 0]
t0_ratio = self.lmm.DF[i, 0, 0]/t0_numeraire_value
bonds[i-start_bond,j] = np.mean(self.lmm.DF[i, j,:] / numeraire_value) * t0_numeraire_value
ratio[i-start_bond,j] = (np.mean(self.lmm.DF[i, j,:] / numeraire_value))/t0_ratio
np.savetxt('matingale_test_ratio_projections.csv', ratio, delimiter=',')
def calculate_10_year_ZC_martingale_test(self):
numeraire_index = 40
start_bond = 20
self.lmm.volatility.mc_adjustment_factor = 1
self.lmm.volatility.a = 0.01368861
self.lmm.volatility.b = 0.07921976
self.lmm.volatility.c = 0.33920146
self.lmm.volatility.d = 0.08416935
self.lmm.volatility.instantiate_arrays()
self.lmm.run_projection(numeraire_index, numeraire_index)
# self.lmm.run_projection_predictor_corrector(numeraire_index, numeraire_index)
bonds = np.zeros((numeraire_index - start_bond, numeraire_index))
ratio = np.zeros((4, numeraire_index))
for j in range(start_bond + 1):
numeraire_value = self.lmm.DF[numeraire_index, j, :]
t0_numeraire_value = self.lmm.DF[numeraire_index, 0, 0]
t0_ratio = self.lmm.DF[start_bond, 0, 0] / t0_numeraire_value
bonds[start_bond - start_bond, j] = np.mean(self.lmm.DF[start_bond, j, :] / numeraire_value) * t0_numeraire_value
ratio[0, j] = np.percentile(self.lmm.DF[start_bond, j, :] / numeraire_value, 5) / t0_ratio
ratio[1, j] = (np.mean(self.lmm.DF[start_bond, j, :] / numeraire_value)) / t0_ratio
ratio[2, j] = np.percentile(self.lmm.DF[start_bond, j, :] / numeraire_value, 50) / t0_ratio
ratio[3, j] = np.percentile(self.lmm.DF[start_bond, j, :] / numeraire_value, 95) / t0_ratio
np.savetxt('10_year_ZC_martingale_test.csv', ratio, delimiter=',')
def get_expectation_of_zero_coupon_bond(self, zero_coupon_index):
forward_rate_index = zero_coupon_index - 1
product = 1/(np.ones(self.number_of_sims) + self.time_increment*self.forward_sims[0,0,:])
for i in range(1, forward_rate_index+1):
product = product/(np.ones(self.number_of_sims) + self.time_increment*self.forward_sims[i,i,:])
output = np.mean(product)
return output
def check_diffusion_has_zero_mean(self):
number_of_tests = 40
mean = np.zeros((number_of_tests,12))
for i in range(number_of_tests):
diffusion = self.lmm.get_diffusion()
mean[i,:] = np.mean(diffusion, axis=1)
mean_of_mean = np.mean(mean) | 0.60964 | 0.493714 |
from enum import Enum
from functools import wraps
from typing import List, Optional
from fb4.login_bp import LoginForm
from fb4.widgets import LodTable, Link
from flask import flash, url_for, Blueprint
from flask_login import LoginManager, logout_user, current_user, login_user, login_required, UserMixin
from flask_wtf import FlaskForm
from lodstorage.entity import EntityManager
from lodstorage.jsonable import JSONAble
from lodstorage.storageconfig import StorageConfig, StoreMode
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import redirect
from wtforms import EmailField, validators, StringField, PasswordField, SubmitField, SelectMultipleField, widgets
from wtforms.validators import InputRequired
class LoginBluePrint(object):
'''
a blueprint for logins
'''
def __init__(self, app, name: str, welcome: str = "index", template_folder: str = None, appWrap=None):
'''
construct me
Args:
name(str): my name
welcome(str): the welcome page
template_folder(str): the template folder
'''
self.name = name
self.welcome = welcome
if template_folder is not None:
self.template_folder = template_folder
else:
self.template_folder = 'templates'
self.blueprint = Blueprint(name, __name__, template_folder=self.template_folder)
self.app = app
self.appWrap=appWrap
loginManager = LoginManager(app)
self.loginManager = loginManager
self.userManager=UserManager()
self.hint = None
app.register_blueprint(self.blueprint)
@app.route('/login', methods=['GET', 'POST'])
def login():
return self.login()
@app.route('/logout')
@login_required
def logout():
return self.logOut()
@app.route('/users')
@login_required
@self.roleRequired(role=Roles.ADMIN)
def getAllUsers():
return self.getAllUsers()
@app.route('/users/new', methods=['GET', 'POST'])
@login_required
@self.roleRequired(role=Roles.ADMIN)
def createUser():
return self.createUser()
@app.route('/users/<userId>', methods=['GET', 'POST'])
@login_required
@self.roleRequired(role=Roles.ADMIN)
def editUser(userId:str):
return self.editUser(userId)
@loginManager.user_loader
def load_user(userid):
luser = self.userManager.getUser(userid)
return luser
def login(self):
'''
show the login form
'''
form = LoginForm()
if current_user.is_authenticated:
return redirect(url_for('index'))
if form.validate_on_submit():
user = self.userManager.getUser(form.username.data)
if user is None or not user.checkPassword(form.password.data):
flash('Invalid username or password')
if self.hint is not None:
flash(self.hint)
return redirect(url_for('login'))
login_user(user, remember=form.rememberMe.data)
return redirect(url_for(self.welcome))
return self.appWrap.render_template('login.html',"login", "login", form=form)
def logOut(self):
'''
logout the current user
'''
logout_user()
return redirect(url_for(self.welcome))
def getLoggedInUser(self):
'''
get the currently logged in user details
'''
# https://stackoverflow.com/a/19274791/1497139
return current_user._get_current_object()
def getAllUsers(self):
'''
get all users
'''
userRecords=self.userManager.getAll()
# Todo: make users clickable
for record in userRecords:
record["edit"]=Link(url=f"/users/{record.get('id')}", title="edit")
usersTable = LodTable(lod=userRecords, name="Users")
return self.appWrap.render_template('users.html', "users", "users", users=usersTable)
def createUser(self):
form = CreateUserForm()
if form.validate_on_submit():
user = form.getUser()
self.userManager.addUser(user)
return redirect(url_for("users"))
# ToDo: Propose Invitation email in response
return self.appWrap.render_template('userForm.html', "createUser", "createUser", formTitle="Create new User", form=form)
def editUser(self, userId):
user=self.userManager.getUser(userId)
form = EditUserForm()
if form.validate_on_submit():
user=form.getUser()
self.userManager.updateUser(user)
flash(f"Successfully updated the user {user.id}")
else:
form = EditUserForm(**user.getFormData())
return self.appWrap.render_template('userForm.html', "editUser", "editUser", formTitle="Edit User", form=form)
def roleRequired(self, role):
"""
check if the current user has the required role
"""
def decorator(func):
@wraps(func)
def decorated_view(*args, **kwargs):
roles=current_user.roles
if roles is None or role not in current_user.roles:
return self.loginManager.unauthorized()
return func(*args, **kwargs)
return decorated_view
return decorator
def addUser(self, id:str,password:str,username:str):
"""
add User to db
"""
user = User.getFromArgs(id=id, password=password, username=username)
self.userManager.addUser(user)
return user
class Roles(str, Enum):
"""
roles which assign a user different access rights
"""
ADMIN="admin"
USER="user"
@classmethod
def choices(cls):
return [(choice, choice.name) for choice in cls]
@classmethod
def coerce(cls, name):
if isinstance(name, cls):
# already coerced to instance of this enum
return name
try:
return cls[name[len(f"{Roles.__name__}."):]]
except KeyError:
raise ValueError(name)
class User(JSONAble, UserMixin):
"""
user
"""
def __init__(self):
super(User, self).__init__()
self.active=True
@property
def roles(self) -> List[str]:
if self._roles is not None and isinstance(self._roles, str):
return [Roles[name] for name in self._roles.split(";")]
@roles.setter
def roles(self, roles:List[Roles]):
self._roles = ';'.join([r.name for r in roles])
@staticmethod
def getSamples() -> List[dict]:
samples = [
{
"id": "<EMAIL>",
"username": "Alice",
"password_hash": "password".__hash__(),
"wikidataid": "Q1",
"_roles": "admin;user", # accessed over property role, separator char: ';'
"active":False
}
]
return samples
def setPassword(self, password:str):
"""
sets the password of the user
Args:
password(str): <PASSWORD>
"""
self.password_hash = generate_password_hash(password)
def checkPassword(self, password:str):
"""
check the password of the user
Args:
password(str): <PASSWORD>
"""
return check_password_hash(self.password_hash, password)
def getWikidataRecords(self) -> dict:
"""
Query user data from wikidata
"""
pass
def getFormData(self):
"""
returns the user data as dict as required by FlaskForm
e.g. password_hash is obmitted and roles are returned as List
"""
records=self.__dict__
if "password_hash" in records:
del records["password_hash"]
records["roles"]=self.roles
return records
def __repr__(self):
return '<User {}>'.format(self.username)
@staticmethod
def getFromArgs(**kwargs):
"""
Creates user from given arguments
"""
u = User()
if "password" in kwargs:
if kwargs.get("password"):
u.setPassword(kwargs["password"])
del kwargs["password"]
u.fromDict(kwargs)
return u
class UserManager(EntityManager):
"""
Manages the users
"""
def __init__(self, storageConfig:StorageConfig=None):
if storageConfig is None:
storageConfig=UserManager.getDefaultStorageConfig()
super().__init__(name="users",
clazz=User,
primaryKey="id",
tableName=User.__name__,
entityName="user",
entityPluralName="users",
config=storageConfig)
if not self.isCached():
self.config.getCachePath()
self.initSQLDB(self.getSQLDB(self.getCacheFile()), withDrop=False, withCreate=True)
def getUser(self, id:str) -> Optional[User]:
"""
Retrieves the user records
"""
db = self.getSQLDB(self.getCacheFile())
res = db.query(f"SELECT * FROM {self.tableName} WHERE id == ?", params=(id, ))
user = User()
if isinstance(res, list) and res:
user.fromDict(res[0])
else:
return None
return user
def updateUser(self, user:User):
"""
update the given user
Args:
user(User): new user data
"""
db = self.getSQLDB(self.getCacheFile())
qparams = [(f"{k}=?", v) for k,v in user.__dict__.items()]
vars = ', '.join([p[0] for p in qparams])
params = [p[1] for p in qparams]
db.c.execute(f"UPDATE {self.tableName} SET {vars} WHERE id == ?", (*params, user.id))
db.c.commit()
def addUser(self, user:User) -> bool:
"""
Add the given user to the database
Args:
user(User): user to add
Raises:
"""
if self.getUser(user.id) is not None:
raise Exception("User already exists")
try:
self.storeLoD([user.__dict__], cacheFile=self.getCacheFile(), append=True)
return True
except Exception as e:
raise e
def getAll(self) -> List[dict]:
"""
Returns all users (without password hash)
"""
db = self.getSQLDB(self.getCacheFile())
users = db.query(f'SELECT id, username, wikidataid FROM {self.tableName}')
return users
@staticmethod
def getDefaultStorageConfig() -> StorageConfig:
"""
Returns the default storageConfig
Returns
StorageConfig
"""
config = StorageConfig(mode=StoreMode.SQL, cacheDirName="ose")
return config
class ListWidget(widgets.ListWidget):
def __call__(self, *args, **kwargs):
del kwargs["class"]
return super().__call__(*args, **kwargs)
class UserForm(FlaskForm):
"""
User form to create and edit a user
"""
id=EmailField('Email address', [validators.DataRequired()])
username=StringField("Name", [InputRequired("Please enter a username")])
wikidataid=StringField("Wikidata Id", [validators.Regexp('Q[1-9]\d*', message="Must be a valid Wikidata Q identifier (Q43649390) ")])
roles = SelectMultipleField("Role",
choices=Roles.choices(),
widget=ListWidget(prefix_label=False),
option_widget=widgets.CheckboxInput(),
coerce=Roles.coerce,
render_kw={"class_":""}) #ToDo: Change to ListField and checkboxes
password=PasswordField("Password")
def getUser(self)->User:
"""
Returns the data of the form as user object
"""
u = User.getFromArgs(id=self.id.data,
username=self.username.data,
wikidataid=self.wikidataid.data,
roles=self.roles.data,
password=<PASSWORD>)
return u
class CreateUserForm(UserForm):
"""
User form to create and edit a user
"""
password=PasswordField("Password", [InputRequired("Please enter a username")])
create=SubmitField("Create")
class EditUserForm(UserForm):
"""
User form to create and edit a user
"""
save=SubmitField("Save") | onlinespreadsheet/loginBlueprint.py | from enum import Enum
from functools import wraps
from typing import List, Optional
from fb4.login_bp import LoginForm
from fb4.widgets import LodTable, Link
from flask import flash, url_for, Blueprint
from flask_login import LoginManager, logout_user, current_user, login_user, login_required, UserMixin
from flask_wtf import FlaskForm
from lodstorage.entity import EntityManager
from lodstorage.jsonable import JSONAble
from lodstorage.storageconfig import StorageConfig, StoreMode
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import redirect
from wtforms import EmailField, validators, StringField, PasswordField, SubmitField, SelectMultipleField, widgets
from wtforms.validators import InputRequired
class LoginBluePrint(object):
'''
a blueprint for logins
'''
def __init__(self, app, name: str, welcome: str = "index", template_folder: str = None, appWrap=None):
'''
construct me
Args:
name(str): my name
welcome(str): the welcome page
template_folder(str): the template folder
'''
self.name = name
self.welcome = welcome
if template_folder is not None:
self.template_folder = template_folder
else:
self.template_folder = 'templates'
self.blueprint = Blueprint(name, __name__, template_folder=self.template_folder)
self.app = app
self.appWrap=appWrap
loginManager = LoginManager(app)
self.loginManager = loginManager
self.userManager=UserManager()
self.hint = None
app.register_blueprint(self.blueprint)
@app.route('/login', methods=['GET', 'POST'])
def login():
return self.login()
@app.route('/logout')
@login_required
def logout():
return self.logOut()
@app.route('/users')
@login_required
@self.roleRequired(role=Roles.ADMIN)
def getAllUsers():
return self.getAllUsers()
@app.route('/users/new', methods=['GET', 'POST'])
@login_required
@self.roleRequired(role=Roles.ADMIN)
def createUser():
return self.createUser()
@app.route('/users/<userId>', methods=['GET', 'POST'])
@login_required
@self.roleRequired(role=Roles.ADMIN)
def editUser(userId:str):
return self.editUser(userId)
@loginManager.user_loader
def load_user(userid):
luser = self.userManager.getUser(userid)
return luser
def login(self):
'''
show the login form
'''
form = LoginForm()
if current_user.is_authenticated:
return redirect(url_for('index'))
if form.validate_on_submit():
user = self.userManager.getUser(form.username.data)
if user is None or not user.checkPassword(form.password.data):
flash('Invalid username or password')
if self.hint is not None:
flash(self.hint)
return redirect(url_for('login'))
login_user(user, remember=form.rememberMe.data)
return redirect(url_for(self.welcome))
return self.appWrap.render_template('login.html',"login", "login", form=form)
def logOut(self):
'''
logout the current user
'''
logout_user()
return redirect(url_for(self.welcome))
def getLoggedInUser(self):
'''
get the currently logged in user details
'''
# https://stackoverflow.com/a/19274791/1497139
return current_user._get_current_object()
def getAllUsers(self):
'''
get all users
'''
userRecords=self.userManager.getAll()
# Todo: make users clickable
for record in userRecords:
record["edit"]=Link(url=f"/users/{record.get('id')}", title="edit")
usersTable = LodTable(lod=userRecords, name="Users")
return self.appWrap.render_template('users.html', "users", "users", users=usersTable)
def createUser(self):
form = CreateUserForm()
if form.validate_on_submit():
user = form.getUser()
self.userManager.addUser(user)
return redirect(url_for("users"))
# ToDo: Propose Invitation email in response
return self.appWrap.render_template('userForm.html', "createUser", "createUser", formTitle="Create new User", form=form)
def editUser(self, userId):
user=self.userManager.getUser(userId)
form = EditUserForm()
if form.validate_on_submit():
user=form.getUser()
self.userManager.updateUser(user)
flash(f"Successfully updated the user {user.id}")
else:
form = EditUserForm(**user.getFormData())
return self.appWrap.render_template('userForm.html', "editUser", "editUser", formTitle="Edit User", form=form)
def roleRequired(self, role):
"""
check if the current user has the required role
"""
def decorator(func):
@wraps(func)
def decorated_view(*args, **kwargs):
roles=current_user.roles
if roles is None or role not in current_user.roles:
return self.loginManager.unauthorized()
return func(*args, **kwargs)
return decorated_view
return decorator
def addUser(self, id:str,password:str,username:str):
"""
add User to db
"""
user = User.getFromArgs(id=id, password=password, username=username)
self.userManager.addUser(user)
return user
class Roles(str, Enum):
"""
roles which assign a user different access rights
"""
ADMIN="admin"
USER="user"
@classmethod
def choices(cls):
return [(choice, choice.name) for choice in cls]
@classmethod
def coerce(cls, name):
if isinstance(name, cls):
# already coerced to instance of this enum
return name
try:
return cls[name[len(f"{Roles.__name__}."):]]
except KeyError:
raise ValueError(name)
class User(JSONAble, UserMixin):
"""
user
"""
def __init__(self):
super(User, self).__init__()
self.active=True
@property
def roles(self) -> List[str]:
if self._roles is not None and isinstance(self._roles, str):
return [Roles[name] for name in self._roles.split(";")]
@roles.setter
def roles(self, roles:List[Roles]):
self._roles = ';'.join([r.name for r in roles])
@staticmethod
def getSamples() -> List[dict]:
samples = [
{
"id": "<EMAIL>",
"username": "Alice",
"password_hash": "password".__hash__(),
"wikidataid": "Q1",
"_roles": "admin;user", # accessed over property role, separator char: ';'
"active":False
}
]
return samples
def setPassword(self, password:str):
"""
sets the password of the user
Args:
password(str): <PASSWORD>
"""
self.password_hash = generate_password_hash(password)
def checkPassword(self, password:str):
"""
check the password of the user
Args:
password(str): <PASSWORD>
"""
return check_password_hash(self.password_hash, password)
def getWikidataRecords(self) -> dict:
"""
Query user data from wikidata
"""
pass
def getFormData(self):
"""
returns the user data as dict as required by FlaskForm
e.g. password_hash is obmitted and roles are returned as List
"""
records=self.__dict__
if "password_hash" in records:
del records["password_hash"]
records["roles"]=self.roles
return records
def __repr__(self):
return '<User {}>'.format(self.username)
@staticmethod
def getFromArgs(**kwargs):
"""
Creates user from given arguments
"""
u = User()
if "password" in kwargs:
if kwargs.get("password"):
u.setPassword(kwargs["password"])
del kwargs["password"]
u.fromDict(kwargs)
return u
class UserManager(EntityManager):
"""
Manages the users
"""
def __init__(self, storageConfig:StorageConfig=None):
if storageConfig is None:
storageConfig=UserManager.getDefaultStorageConfig()
super().__init__(name="users",
clazz=User,
primaryKey="id",
tableName=User.__name__,
entityName="user",
entityPluralName="users",
config=storageConfig)
if not self.isCached():
self.config.getCachePath()
self.initSQLDB(self.getSQLDB(self.getCacheFile()), withDrop=False, withCreate=True)
def getUser(self, id:str) -> Optional[User]:
"""
Retrieves the user records
"""
db = self.getSQLDB(self.getCacheFile())
res = db.query(f"SELECT * FROM {self.tableName} WHERE id == ?", params=(id, ))
user = User()
if isinstance(res, list) and res:
user.fromDict(res[0])
else:
return None
return user
def updateUser(self, user:User):
"""
update the given user
Args:
user(User): new user data
"""
db = self.getSQLDB(self.getCacheFile())
qparams = [(f"{k}=?", v) for k,v in user.__dict__.items()]
vars = ', '.join([p[0] for p in qparams])
params = [p[1] for p in qparams]
db.c.execute(f"UPDATE {self.tableName} SET {vars} WHERE id == ?", (*params, user.id))
db.c.commit()
def addUser(self, user:User) -> bool:
"""
Add the given user to the database
Args:
user(User): user to add
Raises:
"""
if self.getUser(user.id) is not None:
raise Exception("User already exists")
try:
self.storeLoD([user.__dict__], cacheFile=self.getCacheFile(), append=True)
return True
except Exception as e:
raise e
def getAll(self) -> List[dict]:
"""
Returns all users (without password hash)
"""
db = self.getSQLDB(self.getCacheFile())
users = db.query(f'SELECT id, username, wikidataid FROM {self.tableName}')
return users
@staticmethod
def getDefaultStorageConfig() -> StorageConfig:
"""
Returns the default storageConfig
Returns
StorageConfig
"""
config = StorageConfig(mode=StoreMode.SQL, cacheDirName="ose")
return config
class ListWidget(widgets.ListWidget):
def __call__(self, *args, **kwargs):
del kwargs["class"]
return super().__call__(*args, **kwargs)
class UserForm(FlaskForm):
"""
User form to create and edit a user
"""
id=EmailField('Email address', [validators.DataRequired()])
username=StringField("Name", [InputRequired("Please enter a username")])
wikidataid=StringField("Wikidata Id", [validators.Regexp('Q[1-9]\d*', message="Must be a valid Wikidata Q identifier (Q43649390) ")])
roles = SelectMultipleField("Role",
choices=Roles.choices(),
widget=ListWidget(prefix_label=False),
option_widget=widgets.CheckboxInput(),
coerce=Roles.coerce,
render_kw={"class_":""}) #ToDo: Change to ListField and checkboxes
password=PasswordField("Password")
def getUser(self)->User:
"""
Returns the data of the form as user object
"""
u = User.getFromArgs(id=self.id.data,
username=self.username.data,
wikidataid=self.wikidataid.data,
roles=self.roles.data,
password=<PASSWORD>)
return u
class CreateUserForm(UserForm):
"""
User form to create and edit a user
"""
password=PasswordField("Password", [InputRequired("Please enter a username")])
create=SubmitField("Create")
class EditUserForm(UserForm):
"""
User form to create and edit a user
"""
save=SubmitField("Save") | 0.639624 | 0.067026 |
from netforce.model import Model, fields, get_model
from netforce.access import get_active_user, set_active_user
from datetime import *
from dateutil.relativedelta import relativedelta
import time
from netforce.database import get_connection
from netforce.access import get_active_company, check_permission_other
from netforce.utils import get_data_path
class Job(Model):
_name = "job"
_string = "Service Order"
_name_field = "number"
_audit_log = True
_multi_company = True
_fields = {
"project_id": fields.Many2One("project", "Project", search=True),
"contact_id": fields.Many2One("contact", "Customer", required=True, search=True),
"template_id": fields.Many2One("job.template", "Template"),
"service_type_id": fields.Many2One("service.type", "Service Type", search=True),
"product_id": fields.Many2One("product", "Product"), # XXX: deprecated
"name": fields.Char("Order Name", search=True),
"number": fields.Char("Order Number", required=True, search=True),
"description": fields.Text("Description"),
"due_date": fields.Date("Due Date", search=True),
"close_date": fields.Date("Close Date", search=True),
"priority": fields.Selection([["low", "Low"], ["medium", "Medium"], ["high", "High"]], "Priority", search=True),
"state": fields.Selection([["planned", "Planned"], ["allocated", "Allocated"], ["in_progress", "In Progress"], ["done", "Completed"], ["canceled", "Canceled"]], "Status", required=True),
"overdue": fields.Boolean("Overdue", function="get_overdue", function_search="search_overdue"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"documents": fields.One2Many("document", "related_id", "Documents"),
"tasks": fields.One2Many("task", "job_id", "Tasks"),
"days_late": fields.Integer("Days Late", function="get_days_late"),
"user_id": fields.Many2One("base.user", "Assigned To"), # XXX: deprecated
"resource_id": fields.Many2One("service.resource", "Assigned Resource", search=True), # XXX: deprecated
"skill_level_id": fields.Many2One("skill.level", "Required Skill Level", search=True),
"request_by_id": fields.Many2One("base.user", "Requested By", search=True),
"user_board_id": fields.Boolean("User", store=False, function_search="search_user_board_id"),
"sharing": fields.One2Many("share.record", "related_id", "Sharing"),
"invoice_no": fields.Char("Invoice No."), # XXX: not used any more...
"shared_board": fields.Boolean("Shared", store=False, function_search="search_shared_board"),
"quotation_id": fields.Many2One("sale.quot", "Quotation"),
"cancel_reason": fields.Text("Cancel Reason"),
"cancel_periodic": fields.Boolean("Cancel Periodic"),
"next_job_id": fields.Many2One("job", "Next Order"),
"emails": fields.One2Many("email.message", "related_id", "Emails"),
"company_id": fields.Many2One("company", "Company"),
"invoices": fields.One2Many("account.invoice", "related_id", "Invoices"),
"bill_amount": fields.Decimal("Billable Amount"),
"invoice_id": fields.Many2One("account.invoice", "Invoice"),
"is_duplicate": fields.Boolean("Duplicate"),
"work_time": fields.One2Many("work.time", "job_id", "Work Time"),
"pickings": fields.One2Many("stock.picking", "related_id", "Pickings"),
"stock_moves": fields.One2Many("stock.move", "related_id", "Stock Movements"),
"parts": fields.One2Many("job.part", "job_id", "Parts"),
"other_costs": fields.One2Many("job.cost", "job_id", "Other Costs"),
"items": fields.One2Many("job.item", "job_id", "Service Items"),
"allocs": fields.One2Many("service.resource.alloc", "job_id", "Resource Allocations"),
"time_start": fields.DateTime("Planned Start Time"),
"time_stop": fields.DateTime("Planned Stop Time"),
"location_id": fields.Many2One("stock.location", "Job Location"),
"related_id": fields.Reference([["sale.order", "Sales Order"], ["rental.order","Rental Order"], ["issue", "Issue"]], "Related To"),
"lines": fields.One2Many("job.line", "job_id", "Worksheet"),
"complaints": fields.Text("Complaints"),
"cause": fields.Text("Cause"),
"correction": fields.Text("Correction"),
"amount_total": fields.Decimal("Total Selling", function="get_total", function_multi=True),
"amount_contract": fields.Decimal("Included In Contract", function="get_total", function_multi=True),
"amount_job": fields.Decimal("Not Included In Contract", function="get_total", function_multi=True),
"overdue": fields.Boolean("Overdue", function="get_overdue", function_search="search_overdue"),
"date_open": fields.DateTime("Actual Start"),
"date_close": fields.DateTime("Actual Stop"),
"labor_cost": fields.Decimal("Labor Cost", function="get_cost", function_multi=True),
"part_cost": fields.Decimal("Parts Cost", function="get_cost", function_multi=True),
"other_cost": fields.Decimal("Other Cost", function="get_cost", function_multi=True),
"total_cost": fields.Decimal("Total Cost", function="get_cost", function_multi=True),
"labor_sell": fields.Decimal("Labor Selling", function="get_sell", function_multi=True),
"part_sell": fields.Decimal("Parts Selling", function="get_sell", function_multi=True),
"other_sell": fields.Decimal("Other Selling", function="get_sell", function_multi=True),
"done_approved_by_id": fields.Many2One("base.user", "Approved By", readonly=True),
"multi_visit_code_id": fields.Many2One("reason.code", "Multi Visit Reason Code", condition=[["type", "=", "service_multi_visit"]]),
"late_response_code_id": fields.Many2One("reason.code", "Late Response Reason Code", condition=[["type", "=", "service_late_response"]]),
"year": fields.Char("Year", sql_function=["year", "due_date"]),
"quarter": fields.Char("Quarter", sql_function=["quarter", "due_date"]),
"month": fields.Char("Month", sql_function=["month", "due_date"]),
"week": fields.Char("Week", sql_function=["week", "due_date"]),
"activities": fields.One2Many("activity","related_id","Activities"),
"track_id": fields.Many2One("account.track.categ","Tracking Code"),
"track_entries": fields.One2Many("account.track.entry",None,"Tracking Entries",function="get_track_entries",function_write="write_track_entries"),
"track_balance": fields.Decimal("Tracking Balance",function="_get_related",function_context={"path":"track_id.balance"}),
}
_order = "number"
_sql_constraints = [
("number_uniq", "unique (number)", "The job number must be unique!"),
]
def _get_number(self, context={}):
while 1:
num = get_model("sequence").get_number(type="job")
if not num:
return None
user_id = get_active_user()
set_active_user(1)
res = self.search([["number", "=", num]])
set_active_user(user_id)
if not res:
return num
get_model("sequence").increment(type="job")
def name_get(self, ids, context={}):
vals = []
for obj in self.browse(ids):
name = obj.number
if obj.name:
name += " - " + obj.name
vals.append((obj.id, name))
return vals
_defaults = {
"state": "planned",
"number": _get_number,
"request_by_id": lambda *a: get_active_user(),
#"company_id": lambda *a: get_active_company(), # XXX: don't use this yet
"date_open": lambda *a: time.strftime("%Y-%m-%d"),
}
def write(self, ids, vals, **kw):
if vals.get("state") == "done":
vals["date_close"] = time.strftime("%Y-%m-%d")
for obj in self.browse(ids):
if not obj.done_approved_by_id:
raise Exception("Service order has to be approved first")
super().write(ids, vals, **kw)
def get_total(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
amt_total = 0
amt_contract = 0
amt_job = 0
for line in obj.lines:
amt_total += line.amount
if line.payment_type == "contract":
amt_contract += line.amount
elif line.payment_type == "job":
amt_job += line.amount
vals[obj.id] = {
"amount_total": amt_total,
"amount_contract": amt_contract,
"amount_job": amt_job,
}
return vals
def onchange_template(self, context={}):
data = context["data"]
template_id = data["template_id"]
tmpl = get_model("job.template").browse(template_id)
data["service_type_id"] = tmpl.service_type_id.id
data["description"] = tmpl.description
data["skill_level_id"] = tmpl.skill_level_id.id
data["lines"] = []
for line in tmpl.lines:
line_vals = {
"type": line.type,
"product_id": line.product_id.id,
"description": line.description,
"qty": line.qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
}
data["lines"].append(line_vals)
return data
def get_overdue(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
if obj.due_date:
vals[obj.id] = obj.due_date < time.strftime(
"%Y-%m-%d") and obj.state in ("planned", "allocated", "in_progress")
else:
vals[obj.id] = False
return vals
def search_overdue(self, clause, context={}):
return [["due_date", "<", time.strftime("%Y-%m-%d")], ["state", "in", ["planned", "allocated", "in_progress"]]]
def copy_to_pick_out(self, ids, context={}):
obj = self.browse(ids)[0]
vals = {
"type": "out",
"contact_id": obj.contact_id.id,
"related_id": "job,%d" % obj.id,
"lines": [],
}
res = get_model("stock.location").search([["type", "=", "customer"]])
if not res:
raise Exception("Customer location not found")
cust_loc_id = res[0]
res = get_model("stock.location").search([["type", "=", "internal"]])
if not res:
raise Exception("Warehouse location not found")
wh_loc_id = res[0]
for line in obj.lines:
prod = line.product_id
if prod.type not in ("stock", "consumable"):
continue
line_vals = {
"product_id": prod.id,
"qty": line.qty,
"uom_id": line.uom_id.id,
"location_from_id": prod.location_id.id or wh_loc_id,
"location_to_id": obj.location_id.id or cust_loc_id,
}
vals["lines"].append(("create", line_vals))
if not vals["lines"]:
raise Exception("Nothing to issue")
new_id = get_model("stock.picking").create(vals, context={"pick_type": "out"})
pick = get_model("stock.picking").browse(new_id)
return {
"flash": "Goods issue %s copied from service order %s" % (pick.number, obj.number),
"next": {
"name": "pick_out",
"mode": "form",
"active_id": new_id,
}
}
def copy_to_invoice(self, ids, context={}):
obj = self.browse(ids)[0]
inv_vals = {
"type": "out",
"inv_type": "invoice",
"ref": obj.number,
"related_id": "job,%s" % obj.id,
"contact_id": obj.contact_id.id,
"lines": [],
}
for line in obj.lines:
if line.payment_type != "job":
continue
prod = line.product_id
line_vals = {
"product_id": prod.id,
"description": line.description,
"qty": line.qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
"account_id": prod.sale_account_id.id if prod else None,
"tax_id": prod.sale_tax_id.id if prod else None,
"amount": line.amount,
}
inv_vals["lines"].append(("create", line_vals))
if not inv_vals["lines"]:
raise Exception("Nothing to invoice")
inv_id = get_model("account.invoice").create(inv_vals, {"type": "out", "inv_type": "invoice"})
inv = get_model("account.invoice").browse(inv_id)
return {
"next": {
"name": "view_invoice",
"active_id": inv_id,
},
"flash": "Invoice %s created from job %s" % (inv.number, obj.number),
}
def onchange_product(self, context={}):
data = context["data"]
path = context["path"]
line = get_data_path(data, path, parent=True)
prod_id = line["product_id"]
prod = get_model("product").browse(prod_id)
line["uom_id"] = prod.uom_id.id
line["unit_price"] = prod.sale_price
line["description"] = prod.description
return data
def onchange_due_date(self, context={}):
print("onchange_due_date")
data = context["data"]
data['time_start'] = data['due_date']
return data
def onchange_close_date(self, context={}):
print("onchange_close_date")
data = context["data"]
crr_date = time.strftime("%Y-%m-%d")
close_date = data['close_date']
due_date = data['due_date']
if crr_date >= close_date:
data['state'] = 'done'
elif crr_date >= due_date and crr_date <= close_date:
data['state'] = 'in_progress'
return data
def get_cost(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
labor_cost = 0
for time in obj.work_time:
labor_cost += time.amount or 0
other_cost = 0
for line in obj.lines:
if line.type != "other":
continue
prod = line.product_id
other_cost += prod.cost_price or 0
job_loc_id = obj.location_id.id
if not job_loc_id:
res = get_model("stock.location").search([["type", "=", "customer"]])
if res:
job_loc_id = res[0]
part_cost = 0
for pick in obj.pickings:
for move in pick.lines:
amt = move.qty * (move.unit_price or 0)
if move.location_to_id.id == job_loc_id and move.location_from_id.id != job_loc_id:
part_cost += amt
elif move.location_from_id.id == job_loc_id and move.location_to_id.id != job_loc_id:
part_cost -= amt
vals[obj.id] = {
"labor_cost": labor_cost,
"part_cost": part_cost,
"other_cost": other_cost,
"total_cost": labor_cost + part_cost + other_cost,
}
return vals
def get_sell(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
labor_sell = 0
other_sell = 0
part_sell = 0
for line in obj.lines:
if line.type == "labor":
labor_sell += line.amount
elif line.type == "part":
part_sell += line.amount
elif line.type == "other":
other_sell += line.amount
vals[obj.id] = {
"labor_sell": labor_sell,
"part_sell": part_sell,
"other_sell": other_sell,
}
return vals
def approve_done(self, ids, context={}):
if not check_permission_other("job_approve_done"):
raise Exception("Permission denied")
obj = self.browse(ids)[0]
user_id = get_active_user()
obj.write({"done_approved_by_id": user_id})
return {
"next": {
"name": "job",
"mode": "form",
"active_id": obj.id,
},
"flash": "Service order completion approved successfully",
}
def get_days_late(self, ids, context={}):
vals = {}
d = datetime.now()
for obj in self.browse(ids):
if obj.due_date:
vals[obj.id] = (d - datetime.strptime(obj.due_date, "%Y-%m-%d")).days
else:
vals[obj.id] = None
return vals
def get_track_entries(self,ids,context={}):
vals={}
for obj in self.browse(ids):
if not obj.track_id:
vals[obj.id]=[]
continue
res=get_model("account.track.entry").search([["track_id","child_of",obj.track_id.id]])
vals[obj.id]=res
return vals
def write_track_entries(self,ids,field,val,context={}):
for op in val:
if op[0]=="create":
rel_vals=op[1]
for obj in self.browse(ids):
if not obj.track_id:
continue
rel_vals["track_id"]=obj.track_id.id
get_model("account.track.entry").create(rel_vals,context=context)
elif op[0]=="write":
rel_ids=op[1]
rel_vals=op[2]
get_model("account.track.entry").write(rel_ids,rel_vals,context=context)
elif op[0]=="delete":
rel_ids=op[1]
get_model("account.track.entry").delete(rel_ids,context=context)
def create_track(self,ids,context={}):
obj=self.browse(ids[0])
code=obj.number
res=get_model("account.track.categ").search([["code","=",code]])
if res:
track_id=res[0]
else:
parent_id=obj.project_id.track_id.id if obj.project_id else None
track_id=get_model("account.track.categ").create({
"code": code,
"name": code,
"type": "1",
"parent_id": parent_id,
})
obj.write({"track_id": track_id})
Job.register() | netforce_service/netforce_service/models/job.py |
from netforce.model import Model, fields, get_model
from netforce.access import get_active_user, set_active_user
from datetime import *
from dateutil.relativedelta import relativedelta
import time
from netforce.database import get_connection
from netforce.access import get_active_company, check_permission_other
from netforce.utils import get_data_path
class Job(Model):
_name = "job"
_string = "Service Order"
_name_field = "number"
_audit_log = True
_multi_company = True
_fields = {
"project_id": fields.Many2One("project", "Project", search=True),
"contact_id": fields.Many2One("contact", "Customer", required=True, search=True),
"template_id": fields.Many2One("job.template", "Template"),
"service_type_id": fields.Many2One("service.type", "Service Type", search=True),
"product_id": fields.Many2One("product", "Product"), # XXX: deprecated
"name": fields.Char("Order Name", search=True),
"number": fields.Char("Order Number", required=True, search=True),
"description": fields.Text("Description"),
"due_date": fields.Date("Due Date", search=True),
"close_date": fields.Date("Close Date", search=True),
"priority": fields.Selection([["low", "Low"], ["medium", "Medium"], ["high", "High"]], "Priority", search=True),
"state": fields.Selection([["planned", "Planned"], ["allocated", "Allocated"], ["in_progress", "In Progress"], ["done", "Completed"], ["canceled", "Canceled"]], "Status", required=True),
"overdue": fields.Boolean("Overdue", function="get_overdue", function_search="search_overdue"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"documents": fields.One2Many("document", "related_id", "Documents"),
"tasks": fields.One2Many("task", "job_id", "Tasks"),
"days_late": fields.Integer("Days Late", function="get_days_late"),
"user_id": fields.Many2One("base.user", "Assigned To"), # XXX: deprecated
"resource_id": fields.Many2One("service.resource", "Assigned Resource", search=True), # XXX: deprecated
"skill_level_id": fields.Many2One("skill.level", "Required Skill Level", search=True),
"request_by_id": fields.Many2One("base.user", "Requested By", search=True),
"user_board_id": fields.Boolean("User", store=False, function_search="search_user_board_id"),
"sharing": fields.One2Many("share.record", "related_id", "Sharing"),
"invoice_no": fields.Char("Invoice No."), # XXX: not used any more...
"shared_board": fields.Boolean("Shared", store=False, function_search="search_shared_board"),
"quotation_id": fields.Many2One("sale.quot", "Quotation"),
"cancel_reason": fields.Text("Cancel Reason"),
"cancel_periodic": fields.Boolean("Cancel Periodic"),
"next_job_id": fields.Many2One("job", "Next Order"),
"emails": fields.One2Many("email.message", "related_id", "Emails"),
"company_id": fields.Many2One("company", "Company"),
"invoices": fields.One2Many("account.invoice", "related_id", "Invoices"),
"bill_amount": fields.Decimal("Billable Amount"),
"invoice_id": fields.Many2One("account.invoice", "Invoice"),
"is_duplicate": fields.Boolean("Duplicate"),
"work_time": fields.One2Many("work.time", "job_id", "Work Time"),
"pickings": fields.One2Many("stock.picking", "related_id", "Pickings"),
"stock_moves": fields.One2Many("stock.move", "related_id", "Stock Movements"),
"parts": fields.One2Many("job.part", "job_id", "Parts"),
"other_costs": fields.One2Many("job.cost", "job_id", "Other Costs"),
"items": fields.One2Many("job.item", "job_id", "Service Items"),
"allocs": fields.One2Many("service.resource.alloc", "job_id", "Resource Allocations"),
"time_start": fields.DateTime("Planned Start Time"),
"time_stop": fields.DateTime("Planned Stop Time"),
"location_id": fields.Many2One("stock.location", "Job Location"),
"related_id": fields.Reference([["sale.order", "Sales Order"], ["rental.order","Rental Order"], ["issue", "Issue"]], "Related To"),
"lines": fields.One2Many("job.line", "job_id", "Worksheet"),
"complaints": fields.Text("Complaints"),
"cause": fields.Text("Cause"),
"correction": fields.Text("Correction"),
"amount_total": fields.Decimal("Total Selling", function="get_total", function_multi=True),
"amount_contract": fields.Decimal("Included In Contract", function="get_total", function_multi=True),
"amount_job": fields.Decimal("Not Included In Contract", function="get_total", function_multi=True),
"overdue": fields.Boolean("Overdue", function="get_overdue", function_search="search_overdue"),
"date_open": fields.DateTime("Actual Start"),
"date_close": fields.DateTime("Actual Stop"),
"labor_cost": fields.Decimal("Labor Cost", function="get_cost", function_multi=True),
"part_cost": fields.Decimal("Parts Cost", function="get_cost", function_multi=True),
"other_cost": fields.Decimal("Other Cost", function="get_cost", function_multi=True),
"total_cost": fields.Decimal("Total Cost", function="get_cost", function_multi=True),
"labor_sell": fields.Decimal("Labor Selling", function="get_sell", function_multi=True),
"part_sell": fields.Decimal("Parts Selling", function="get_sell", function_multi=True),
"other_sell": fields.Decimal("Other Selling", function="get_sell", function_multi=True),
"done_approved_by_id": fields.Many2One("base.user", "Approved By", readonly=True),
"multi_visit_code_id": fields.Many2One("reason.code", "Multi Visit Reason Code", condition=[["type", "=", "service_multi_visit"]]),
"late_response_code_id": fields.Many2One("reason.code", "Late Response Reason Code", condition=[["type", "=", "service_late_response"]]),
"year": fields.Char("Year", sql_function=["year", "due_date"]),
"quarter": fields.Char("Quarter", sql_function=["quarter", "due_date"]),
"month": fields.Char("Month", sql_function=["month", "due_date"]),
"week": fields.Char("Week", sql_function=["week", "due_date"]),
"activities": fields.One2Many("activity","related_id","Activities"),
"track_id": fields.Many2One("account.track.categ","Tracking Code"),
"track_entries": fields.One2Many("account.track.entry",None,"Tracking Entries",function="get_track_entries",function_write="write_track_entries"),
"track_balance": fields.Decimal("Tracking Balance",function="_get_related",function_context={"path":"track_id.balance"}),
}
_order = "number"
_sql_constraints = [
("number_uniq", "unique (number)", "The job number must be unique!"),
]
def _get_number(self, context={}):
while 1:
num = get_model("sequence").get_number(type="job")
if not num:
return None
user_id = get_active_user()
set_active_user(1)
res = self.search([["number", "=", num]])
set_active_user(user_id)
if not res:
return num
get_model("sequence").increment(type="job")
def name_get(self, ids, context={}):
vals = []
for obj in self.browse(ids):
name = obj.number
if obj.name:
name += " - " + obj.name
vals.append((obj.id, name))
return vals
_defaults = {
"state": "planned",
"number": _get_number,
"request_by_id": lambda *a: get_active_user(),
#"company_id": lambda *a: get_active_company(), # XXX: don't use this yet
"date_open": lambda *a: time.strftime("%Y-%m-%d"),
}
def write(self, ids, vals, **kw):
if vals.get("state") == "done":
vals["date_close"] = time.strftime("%Y-%m-%d")
for obj in self.browse(ids):
if not obj.done_approved_by_id:
raise Exception("Service order has to be approved first")
super().write(ids, vals, **kw)
def get_total(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
amt_total = 0
amt_contract = 0
amt_job = 0
for line in obj.lines:
amt_total += line.amount
if line.payment_type == "contract":
amt_contract += line.amount
elif line.payment_type == "job":
amt_job += line.amount
vals[obj.id] = {
"amount_total": amt_total,
"amount_contract": amt_contract,
"amount_job": amt_job,
}
return vals
def onchange_template(self, context={}):
data = context["data"]
template_id = data["template_id"]
tmpl = get_model("job.template").browse(template_id)
data["service_type_id"] = tmpl.service_type_id.id
data["description"] = tmpl.description
data["skill_level_id"] = tmpl.skill_level_id.id
data["lines"] = []
for line in tmpl.lines:
line_vals = {
"type": line.type,
"product_id": line.product_id.id,
"description": line.description,
"qty": line.qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
}
data["lines"].append(line_vals)
return data
def get_overdue(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
if obj.due_date:
vals[obj.id] = obj.due_date < time.strftime(
"%Y-%m-%d") and obj.state in ("planned", "allocated", "in_progress")
else:
vals[obj.id] = False
return vals
def search_overdue(self, clause, context={}):
return [["due_date", "<", time.strftime("%Y-%m-%d")], ["state", "in", ["planned", "allocated", "in_progress"]]]
def copy_to_pick_out(self, ids, context={}):
obj = self.browse(ids)[0]
vals = {
"type": "out",
"contact_id": obj.contact_id.id,
"related_id": "job,%d" % obj.id,
"lines": [],
}
res = get_model("stock.location").search([["type", "=", "customer"]])
if not res:
raise Exception("Customer location not found")
cust_loc_id = res[0]
res = get_model("stock.location").search([["type", "=", "internal"]])
if not res:
raise Exception("Warehouse location not found")
wh_loc_id = res[0]
for line in obj.lines:
prod = line.product_id
if prod.type not in ("stock", "consumable"):
continue
line_vals = {
"product_id": prod.id,
"qty": line.qty,
"uom_id": line.uom_id.id,
"location_from_id": prod.location_id.id or wh_loc_id,
"location_to_id": obj.location_id.id or cust_loc_id,
}
vals["lines"].append(("create", line_vals))
if not vals["lines"]:
raise Exception("Nothing to issue")
new_id = get_model("stock.picking").create(vals, context={"pick_type": "out"})
pick = get_model("stock.picking").browse(new_id)
return {
"flash": "Goods issue %s copied from service order %s" % (pick.number, obj.number),
"next": {
"name": "pick_out",
"mode": "form",
"active_id": new_id,
}
}
def copy_to_invoice(self, ids, context={}):
obj = self.browse(ids)[0]
inv_vals = {
"type": "out",
"inv_type": "invoice",
"ref": obj.number,
"related_id": "job,%s" % obj.id,
"contact_id": obj.contact_id.id,
"lines": [],
}
for line in obj.lines:
if line.payment_type != "job":
continue
prod = line.product_id
line_vals = {
"product_id": prod.id,
"description": line.description,
"qty": line.qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
"account_id": prod.sale_account_id.id if prod else None,
"tax_id": prod.sale_tax_id.id if prod else None,
"amount": line.amount,
}
inv_vals["lines"].append(("create", line_vals))
if not inv_vals["lines"]:
raise Exception("Nothing to invoice")
inv_id = get_model("account.invoice").create(inv_vals, {"type": "out", "inv_type": "invoice"})
inv = get_model("account.invoice").browse(inv_id)
return {
"next": {
"name": "view_invoice",
"active_id": inv_id,
},
"flash": "Invoice %s created from job %s" % (inv.number, obj.number),
}
def onchange_product(self, context={}):
data = context["data"]
path = context["path"]
line = get_data_path(data, path, parent=True)
prod_id = line["product_id"]
prod = get_model("product").browse(prod_id)
line["uom_id"] = prod.uom_id.id
line["unit_price"] = prod.sale_price
line["description"] = prod.description
return data
def onchange_due_date(self, context={}):
print("onchange_due_date")
data = context["data"]
data['time_start'] = data['due_date']
return data
def onchange_close_date(self, context={}):
print("onchange_close_date")
data = context["data"]
crr_date = time.strftime("%Y-%m-%d")
close_date = data['close_date']
due_date = data['due_date']
if crr_date >= close_date:
data['state'] = 'done'
elif crr_date >= due_date and crr_date <= close_date:
data['state'] = 'in_progress'
return data
def get_cost(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
labor_cost = 0
for time in obj.work_time:
labor_cost += time.amount or 0
other_cost = 0
for line in obj.lines:
if line.type != "other":
continue
prod = line.product_id
other_cost += prod.cost_price or 0
job_loc_id = obj.location_id.id
if not job_loc_id:
res = get_model("stock.location").search([["type", "=", "customer"]])
if res:
job_loc_id = res[0]
part_cost = 0
for pick in obj.pickings:
for move in pick.lines:
amt = move.qty * (move.unit_price or 0)
if move.location_to_id.id == job_loc_id and move.location_from_id.id != job_loc_id:
part_cost += amt
elif move.location_from_id.id == job_loc_id and move.location_to_id.id != job_loc_id:
part_cost -= amt
vals[obj.id] = {
"labor_cost": labor_cost,
"part_cost": part_cost,
"other_cost": other_cost,
"total_cost": labor_cost + part_cost + other_cost,
}
return vals
def get_sell(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
labor_sell = 0
other_sell = 0
part_sell = 0
for line in obj.lines:
if line.type == "labor":
labor_sell += line.amount
elif line.type == "part":
part_sell += line.amount
elif line.type == "other":
other_sell += line.amount
vals[obj.id] = {
"labor_sell": labor_sell,
"part_sell": part_sell,
"other_sell": other_sell,
}
return vals
def approve_done(self, ids, context={}):
if not check_permission_other("job_approve_done"):
raise Exception("Permission denied")
obj = self.browse(ids)[0]
user_id = get_active_user()
obj.write({"done_approved_by_id": user_id})
return {
"next": {
"name": "job",
"mode": "form",
"active_id": obj.id,
},
"flash": "Service order completion approved successfully",
}
def get_days_late(self, ids, context={}):
vals = {}
d = datetime.now()
for obj in self.browse(ids):
if obj.due_date:
vals[obj.id] = (d - datetime.strptime(obj.due_date, "%Y-%m-%d")).days
else:
vals[obj.id] = None
return vals
def get_track_entries(self,ids,context={}):
vals={}
for obj in self.browse(ids):
if not obj.track_id:
vals[obj.id]=[]
continue
res=get_model("account.track.entry").search([["track_id","child_of",obj.track_id.id]])
vals[obj.id]=res
return vals
def write_track_entries(self,ids,field,val,context={}):
for op in val:
if op[0]=="create":
rel_vals=op[1]
for obj in self.browse(ids):
if not obj.track_id:
continue
rel_vals["track_id"]=obj.track_id.id
get_model("account.track.entry").create(rel_vals,context=context)
elif op[0]=="write":
rel_ids=op[1]
rel_vals=op[2]
get_model("account.track.entry").write(rel_ids,rel_vals,context=context)
elif op[0]=="delete":
rel_ids=op[1]
get_model("account.track.entry").delete(rel_ids,context=context)
def create_track(self,ids,context={}):
obj=self.browse(ids[0])
code=obj.number
res=get_model("account.track.categ").search([["code","=",code]])
if res:
track_id=res[0]
else:
parent_id=obj.project_id.track_id.id if obj.project_id else None
track_id=get_model("account.track.categ").create({
"code": code,
"name": code,
"type": "1",
"parent_id": parent_id,
})
obj.write({"track_id": track_id})
Job.register() | 0.550607 | 0.295351 |
from __future__ import absolute_import
from nose.tools import *
from payoneer_escrow_sdk.authenticator import Authenticator
def setup():
pass
def teardown():
pass
def test_secure_headers():
"""
Verify that secure_headers has the right keys and (certain) values.
We will test the value of the request signature below, but here we are at
least verifying that it is the length we expect.
"""
auth = Authenticator(
'test_key', 'test_secret')
method = 'POST'
uri = '/accounts/5818958914?55811'
secure_headers = auth.secure_headers(method, uri)
assert type(secure_headers).__name__ == 'dict'
assert len(secure_headers) == 3
assert secure_headers['x-armorpayments-apikey'] == 'test_key'
assert len(secure_headers['x-armorpayments-requesttimestamp']) == 25
assert len(secure_headers['x-armorpayments-signature']) == 128
def test__request_signature():
"""
Confirm that we have reproducable results with _request_signature.
"""
auth = Authenticator(
'test_key', 'test_secret')
method = 'POST'
uri = '/accounts/5818958914?55811'
timestamp = '2017-04-24T02:52:53-00:00'
actual = auth._request_signature(method, uri, timestamp)
expected = 'c70a4b43a271cdc40db55c5b2ddfaeabc9fb448fd16b3f261027cb3ed06fd4954799e8e40b1d64781225a4c2ef71ea938ca7cdff8228ade561041a994f6dd299'
assert actual == expected
def test__request_signature_variations_do_not_have_same_hash():
"""
Confirm that we get different results when we vary each of the inputs.
"""
auth = Authenticator(
'test_key', 'test_secret')
method = 'POST'
uri = '/accounts/5818958914?55811'
timestamp = '2017-04-24T02:52:53-00:00'
alt_method = 'get'
alt_uri = '/shipmentcarriers'
alt_timestamp = '2017-04-24T02:52:54-00:00'
actual = auth._request_signature(method, uri, timestamp)
actual_alt_method = auth._request_signature(alt_method, uri, timestamp)
actual_alt_uri = auth._request_signature(method, alt_uri, timestamp)
actual_alt_timestamp = auth._request_signature(method, uri, alt_timestamp)
actual_as_set = set([
actual,
actual_alt_method,
actual_alt_uri,
actual_alt_timestamp])
expected = 'c70a4b43a271cdc40db55c5b2ddfaeabc9fb448fd16b3f261027cb3ed06fd4954799e8e40b1d64781225a4c2ef71ea938ca7cdff8228ade561041a994f6dd299'
assert actual == expected
assert len(actual_as_set) == 4
def test__request_signature_method_case_does_not_matter():
"""
Confirm that the case of the method does not change the result.
"""
auth = Authenticator(
'test_key', 'test_secret')
method = 'POST'
lowercase_method = 'post'
uri = '/accounts/5818958914?55811'
timestamp = '2017-04-24T02:52:53-00:00'
actual = auth._request_signature(method, uri, timestamp)
actual_lowercase = auth._request_signature(lowercase_method, uri, timestamp)
expected = 'c70a4b43a271cdc40db55c5b2ddfaeabc9fb448fd16b3f261027cb3ed06fd4954799e8e40b1d64781225a4c2ef71ea938ca7cdff8228ade561041a994f6dd299'
assert actual == expected
assert actual_lowercase == expected | tests/authenticator_tests.py | from __future__ import absolute_import
from nose.tools import *
from payoneer_escrow_sdk.authenticator import Authenticator
def setup():
pass
def teardown():
pass
def test_secure_headers():
"""
Verify that secure_headers has the right keys and (certain) values.
We will test the value of the request signature below, but here we are at
least verifying that it is the length we expect.
"""
auth = Authenticator(
'test_key', 'test_secret')
method = 'POST'
uri = '/accounts/5818958914?55811'
secure_headers = auth.secure_headers(method, uri)
assert type(secure_headers).__name__ == 'dict'
assert len(secure_headers) == 3
assert secure_headers['x-armorpayments-apikey'] == 'test_key'
assert len(secure_headers['x-armorpayments-requesttimestamp']) == 25
assert len(secure_headers['x-armorpayments-signature']) == 128
def test__request_signature():
"""
Confirm that we have reproducable results with _request_signature.
"""
auth = Authenticator(
'test_key', 'test_secret')
method = 'POST'
uri = '/accounts/5818958914?55811'
timestamp = '2017-04-24T02:52:53-00:00'
actual = auth._request_signature(method, uri, timestamp)
expected = 'c70a4b43a271cdc40db55c5b2ddfaeabc9fb448fd16b3f261027cb3ed06fd4954799e8e40b1d64781225a4c2ef71ea938ca7cdff8228ade561041a994f6dd299'
assert actual == expected
def test__request_signature_variations_do_not_have_same_hash():
"""
Confirm that we get different results when we vary each of the inputs.
"""
auth = Authenticator(
'test_key', 'test_secret')
method = 'POST'
uri = '/accounts/5818958914?55811'
timestamp = '2017-04-24T02:52:53-00:00'
alt_method = 'get'
alt_uri = '/shipmentcarriers'
alt_timestamp = '2017-04-24T02:52:54-00:00'
actual = auth._request_signature(method, uri, timestamp)
actual_alt_method = auth._request_signature(alt_method, uri, timestamp)
actual_alt_uri = auth._request_signature(method, alt_uri, timestamp)
actual_alt_timestamp = auth._request_signature(method, uri, alt_timestamp)
actual_as_set = set([
actual,
actual_alt_method,
actual_alt_uri,
actual_alt_timestamp])
expected = 'c70a4b43a271cdc40db55c5b2ddfaeabc9fb448fd16b3f261027cb3ed06fd4954799e8e40b1d64781225a4c2ef71ea938ca7cdff8228ade561041a994f6dd299'
assert actual == expected
assert len(actual_as_set) == 4
def test__request_signature_method_case_does_not_matter():
"""
Confirm that the case of the method does not change the result.
"""
auth = Authenticator(
'test_key', 'test_secret')
method = 'POST'
lowercase_method = 'post'
uri = '/accounts/5818958914?55811'
timestamp = '2017-04-24T02:52:53-00:00'
actual = auth._request_signature(method, uri, timestamp)
actual_lowercase = auth._request_signature(lowercase_method, uri, timestamp)
expected = 'c70a4b43a271cdc40db55c5b2ddfaeabc9fb448fd16b3f261027cb3ed06fd4954799e8e40b1d64781225a4c2ef71ea938ca7cdff8228ade561041a994f6dd299'
assert actual == expected
assert actual_lowercase == expected | 0.771413 | 0.555616 |
import settings
import caffe
import numpy as np
import numpy as np
import math, random
import sys, subprocess
from IPython.display import clear_output, Image, display
from scipy.misc import imresize
from numpy.linalg import norm
from numpy.testing import assert_array_equal
import scipy.misc, scipy.io
import patchShow
caffe.set_mode_cpu()
def save_image(img, name):
img = img[:,::-1, :, :] # Convert from BGR to RGB
normalized_img = patchShow.patchShow_single(img, in_range=(-120,120))
scipy.misc.imsave(name, normalized_img)
def get_shape(data_shape):
if len(data_shape) == 4:
return (data_shape[2], data_shape[3])
else:
raise Exception("Data shape invalid.")
np.random.seed(0)
generator = caffe.Net(settings.generator_definition, settings.generator_weights, caffe.TEST)
shape = generator.blobs["feat"].data.shape
generator_output_shape = generator.blobs["deconv0"].data.shape
mean = np.float32([104.0, 117.0, 123.0])
nsfw_net = caffe.Classifier("nets/open_nsfw/deploy.prototxt",
"nets/open_nsfw/resnet_50_1by2_nsfw.caffemodel",
mean = mean, # ImageNet mean
channel_swap = (2,1,0)) # the reference model has channels in BGR order instead of RGB
def grad_classifier(classifier, end_layer, imagein, z):
net_dst = classifier.blobs[end_layer]
acts = classifier.forward(data=imagein, end=end_layer)
net_dst.diff[:] = z
g = classifier.backward(start=end_layer, diffs=['data'])['data'][0]
net_dst.diff.fill(0.)
return g, acts
def grad(classifier, end_layer, i, code):
generated = generator.forward(feat=code)
image = crop(classifier, generated["deconv0"])
z = np.zeros_like(classifier.blobs[end_layer].data)
z.flat[i] = 1
g, acts = grad_classifier(classifier, end_layer, image, z)
generator.blobs['deconv0'].diff[...] = pad(classifier, g)
gx = generator.backward(start='deconv0')
generator.blobs['deconv0'].diff.fill(0.)
return gx['feat'], image
def crop(classifier, image):
data_shape = classifier.blobs['data'].data.shape
image_size = get_shape(data_shape)
output_size = get_shape(generator_output_shape)
topleft = ((output_size[0] - image_size[0])/2, (output_size[1] - image_size[1])/2)
return image.copy()[:,:,topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]]
def pad(classifier, image):
data_shape = classifier.blobs['data'].data.shape
image_size = get_shape(data_shape)
output_size = get_shape(generator_output_shape)
topleft = ((output_size[0] - image_size[0])/2, (output_size[1] - image_size[1])/2)
o = np.zeros(generator_output_shape)
o[:,:,topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]] = image
return o
def get_code(path, layer):
batch_size = 1
image_size = (3, 227, 227)
images = np.zeros((batch_size,) + image_size, dtype='float32')
in_image = scipy.misc.imread(path)
in_image = scipy.misc.imresize(in_image, (image_size[1], image_size[2]))
for ni in range(images.shape[0]):
images[ni] = np.transpose(in_image, (2, 0, 1))
data = images[:,::-1]
matfile = scipy.io.loadmat('ilsvrc_2012_mean.mat')
image_mean = matfile['image_mean']
topleft = ((image_mean.shape[0] - image_size[1])/2, (image_mean.shape[1] - image_size[2])/2)
image_mean = image_mean[topleft[0]:topleft[0]+image_size[1], topleft[1]:topleft[1]+image_size[2]]
del matfile
data -= np.expand_dims(np.transpose(image_mean, (2,0,1)), 0) # mean is already BGR
encoder = caffe.Net(settings.encoder_definition, settings.encoder_weights, caffe.TEST)
encoder.forward(data=data)
feat = np.copy(encoder.blobs[layer].data)
del encoder
zero_feat = feat[0].copy()[np.newaxis]
return zero_feat, data
opt_layer = 'fc6'
total_iters = 300
alpha = 1
def main(filename, iters=total_iters):
np.random.seed(0)
code, start_image = get_code(filename, opt_layer)
upper_bound = np.loadtxt("act_range/3x/fc6.txt", delimiter=' ', usecols=np.arange(0, 4096), unpack=True)
upper_bound = upper_bound.reshape(4096)
lower_bound = np.zeros(4096)
for i in range(0,iters):
step_size = (alpha + (1e-10 - alpha) * i) / iters
gn, image = grad(nsfw_net, 'prob', 1, code)
g = 1500 * gn
if norm(g) <= 1e-8:
break
code = code - step_size*g/np.abs(g).mean()
code = np.maximum(code, lower_bound)
# 1*upper bound produces realistic looking images
# No upper bound produces dramatic high saturation pics
# 1.5* Upper bound is a decent choice
code = np.minimum(code, 1.5*upper_bound)
save_image(image, "output/" + str(i) + ".jpg")
if __name__ == '__main__':
main('jordan1.jpg') | nsfw.py | import settings
import caffe
import numpy as np
import numpy as np
import math, random
import sys, subprocess
from IPython.display import clear_output, Image, display
from scipy.misc import imresize
from numpy.linalg import norm
from numpy.testing import assert_array_equal
import scipy.misc, scipy.io
import patchShow
caffe.set_mode_cpu()
def save_image(img, name):
img = img[:,::-1, :, :] # Convert from BGR to RGB
normalized_img = patchShow.patchShow_single(img, in_range=(-120,120))
scipy.misc.imsave(name, normalized_img)
def get_shape(data_shape):
if len(data_shape) == 4:
return (data_shape[2], data_shape[3])
else:
raise Exception("Data shape invalid.")
np.random.seed(0)
generator = caffe.Net(settings.generator_definition, settings.generator_weights, caffe.TEST)
shape = generator.blobs["feat"].data.shape
generator_output_shape = generator.blobs["deconv0"].data.shape
mean = np.float32([104.0, 117.0, 123.0])
nsfw_net = caffe.Classifier("nets/open_nsfw/deploy.prototxt",
"nets/open_nsfw/resnet_50_1by2_nsfw.caffemodel",
mean = mean, # ImageNet mean
channel_swap = (2,1,0)) # the reference model has channels in BGR order instead of RGB
def grad_classifier(classifier, end_layer, imagein, z):
net_dst = classifier.blobs[end_layer]
acts = classifier.forward(data=imagein, end=end_layer)
net_dst.diff[:] = z
g = classifier.backward(start=end_layer, diffs=['data'])['data'][0]
net_dst.diff.fill(0.)
return g, acts
def grad(classifier, end_layer, i, code):
generated = generator.forward(feat=code)
image = crop(classifier, generated["deconv0"])
z = np.zeros_like(classifier.blobs[end_layer].data)
z.flat[i] = 1
g, acts = grad_classifier(classifier, end_layer, image, z)
generator.blobs['deconv0'].diff[...] = pad(classifier, g)
gx = generator.backward(start='deconv0')
generator.blobs['deconv0'].diff.fill(0.)
return gx['feat'], image
def crop(classifier, image):
data_shape = classifier.blobs['data'].data.shape
image_size = get_shape(data_shape)
output_size = get_shape(generator_output_shape)
topleft = ((output_size[0] - image_size[0])/2, (output_size[1] - image_size[1])/2)
return image.copy()[:,:,topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]]
def pad(classifier, image):
data_shape = classifier.blobs['data'].data.shape
image_size = get_shape(data_shape)
output_size = get_shape(generator_output_shape)
topleft = ((output_size[0] - image_size[0])/2, (output_size[1] - image_size[1])/2)
o = np.zeros(generator_output_shape)
o[:,:,topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]] = image
return o
def get_code(path, layer):
batch_size = 1
image_size = (3, 227, 227)
images = np.zeros((batch_size,) + image_size, dtype='float32')
in_image = scipy.misc.imread(path)
in_image = scipy.misc.imresize(in_image, (image_size[1], image_size[2]))
for ni in range(images.shape[0]):
images[ni] = np.transpose(in_image, (2, 0, 1))
data = images[:,::-1]
matfile = scipy.io.loadmat('ilsvrc_2012_mean.mat')
image_mean = matfile['image_mean']
topleft = ((image_mean.shape[0] - image_size[1])/2, (image_mean.shape[1] - image_size[2])/2)
image_mean = image_mean[topleft[0]:topleft[0]+image_size[1], topleft[1]:topleft[1]+image_size[2]]
del matfile
data -= np.expand_dims(np.transpose(image_mean, (2,0,1)), 0) # mean is already BGR
encoder = caffe.Net(settings.encoder_definition, settings.encoder_weights, caffe.TEST)
encoder.forward(data=data)
feat = np.copy(encoder.blobs[layer].data)
del encoder
zero_feat = feat[0].copy()[np.newaxis]
return zero_feat, data
opt_layer = 'fc6'
total_iters = 300
alpha = 1
def main(filename, iters=total_iters):
np.random.seed(0)
code, start_image = get_code(filename, opt_layer)
upper_bound = np.loadtxt("act_range/3x/fc6.txt", delimiter=' ', usecols=np.arange(0, 4096), unpack=True)
upper_bound = upper_bound.reshape(4096)
lower_bound = np.zeros(4096)
for i in range(0,iters):
step_size = (alpha + (1e-10 - alpha) * i) / iters
gn, image = grad(nsfw_net, 'prob', 1, code)
g = 1500 * gn
if norm(g) <= 1e-8:
break
code = code - step_size*g/np.abs(g).mean()
code = np.maximum(code, lower_bound)
# 1*upper bound produces realistic looking images
# No upper bound produces dramatic high saturation pics
# 1.5* Upper bound is a decent choice
code = np.minimum(code, 1.5*upper_bound)
save_image(image, "output/" + str(i) + ".jpg")
if __name__ == '__main__':
main('jordan1.jpg') | 0.454956 | 0.304623 |
import discord
import asyncio
from arxivpy.arxiv import Arxiv
import json
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
client = discord.Client()
try:
with open(os.path.join(dir_path, "read_papers.json"), "rb") as f:
papers = json.load(f)
except:
papers = {}
try:
with open(os.path.join(dir_path, "config.json"), "rb") as f:
config = json.load(f)
except:
config = {
"search": {}
}
async def check_arxiv():
await client.wait_until_ready()
channel = discord.Object(id='299546992957456386')
while not client.is_closed:
unprocessed = []
all_new = []
for category, criterias in config["search"].items():
unprocessed.extend(Arxiv.query(
prefix=Arxiv.Prefix.subject,
q=category,
sort_order=Arxiv.Sort.Order.descending,
sort_by=Arxiv.Sort.By.submitted_date,
start=0,
max_results=100
))
for criteria in criterias:
unprocessed.extend(Arxiv.query(
prefix=Arxiv.Prefix.all,
q=criteria,
sort_order=Arxiv.Sort.Order.descending,
sort_by=Arxiv.Sort.By.submitted_date,
start=0,
max_results=100
))
# Check if exists
for _paper in unprocessed:
_paper_id = _paper.get_id()
if _paper_id not in papers:
all_new.append(_paper)
papers[_paper_id] = None
with open(os.path.join(dir_path, "config.json"), "w") as f:
json.dump(config, f)
with open(os.path.join(dir_path, "read_papers.json"), "w") as f:
json.dump(papers, f)
for new_paper in all_new:
embed = discord.Embed(
title=new_paper.title,
description=new_paper.summary if len(new_paper.summary) < 2040 else new_paper.summary[0:2040] + ".....",
type="rich",
url=new_paper.page_url,
color=0x00ff00
)
embed.set_author(name=', '.join(new_paper.authors))
await client.send_message(channel, embed=embed)
await asyncio.sleep(60*60)
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.loop.create_task(check_arxiv())
@client.event
async def on_message(message):
if not message.content.startswith('!arxiv'):
return
tokenized_message = message.content.split(" ")
if len(tokenized_message) >= 2:
if tokenized_message[1] == 'add':
category = tokenized_message[2]
if category not in config["search"]:
await client.send_message(message.channel, "Added %s to the search list." % category)
config["search"][category] = []
try:
title = tokenized_message[3]
config["search"][category].append(title)
except:
pass
elif tokenized_message[1] == 'frequency':
config["frequency"] = tokenized_message[2]
elif tokenized_message[1] == 'list':
pass
else:
await client.send_message(message.channel, "\n------ Help ------\n!arxiv add <category:required> <title:optional>\n!arxiv frequency <integer>\n!arxiv list\n--------------------")
client.run('') | bot.py | import discord
import asyncio
from arxivpy.arxiv import Arxiv
import json
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
client = discord.Client()
try:
with open(os.path.join(dir_path, "read_papers.json"), "rb") as f:
papers = json.load(f)
except:
papers = {}
try:
with open(os.path.join(dir_path, "config.json"), "rb") as f:
config = json.load(f)
except:
config = {
"search": {}
}
async def check_arxiv():
await client.wait_until_ready()
channel = discord.Object(id='299546992957456386')
while not client.is_closed:
unprocessed = []
all_new = []
for category, criterias in config["search"].items():
unprocessed.extend(Arxiv.query(
prefix=Arxiv.Prefix.subject,
q=category,
sort_order=Arxiv.Sort.Order.descending,
sort_by=Arxiv.Sort.By.submitted_date,
start=0,
max_results=100
))
for criteria in criterias:
unprocessed.extend(Arxiv.query(
prefix=Arxiv.Prefix.all,
q=criteria,
sort_order=Arxiv.Sort.Order.descending,
sort_by=Arxiv.Sort.By.submitted_date,
start=0,
max_results=100
))
# Check if exists
for _paper in unprocessed:
_paper_id = _paper.get_id()
if _paper_id not in papers:
all_new.append(_paper)
papers[_paper_id] = None
with open(os.path.join(dir_path, "config.json"), "w") as f:
json.dump(config, f)
with open(os.path.join(dir_path, "read_papers.json"), "w") as f:
json.dump(papers, f)
for new_paper in all_new:
embed = discord.Embed(
title=new_paper.title,
description=new_paper.summary if len(new_paper.summary) < 2040 else new_paper.summary[0:2040] + ".....",
type="rich",
url=new_paper.page_url,
color=0x00ff00
)
embed.set_author(name=', '.join(new_paper.authors))
await client.send_message(channel, embed=embed)
await asyncio.sleep(60*60)
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.loop.create_task(check_arxiv())
@client.event
async def on_message(message):
if not message.content.startswith('!arxiv'):
return
tokenized_message = message.content.split(" ")
if len(tokenized_message) >= 2:
if tokenized_message[1] == 'add':
category = tokenized_message[2]
if category not in config["search"]:
await client.send_message(message.channel, "Added %s to the search list." % category)
config["search"][category] = []
try:
title = tokenized_message[3]
config["search"][category].append(title)
except:
pass
elif tokenized_message[1] == 'frequency':
config["frequency"] = tokenized_message[2]
elif tokenized_message[1] == 'list':
pass
else:
await client.send_message(message.channel, "\n------ Help ------\n!arxiv add <category:required> <title:optional>\n!arxiv frequency <integer>\n!arxiv list\n--------------------")
client.run('') | 0.341802 | 0.118003 |
import sys
import os
import importlib
import ConfigParser
import cea.config
import cea.datamanagement.copy_default_databases
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def main(config=None):
"""
:param cea.config.Configuration config: the configuration file to use (instead of creating a new one)
:return:
"""
if not config:
config = cea.config.Configuration()
cli_config = get_cli_config()
# handle arguments
args = sys.argv[1:] # drop the script name from the arguments
if not len(args) or args[0].lower() == '--help':
print_help(config, cli_config, args[1:])
sys.exit(1)
script_name = args.pop(0)
option_list = cli_config.get('config', script_name).split()
config.restrict_to(option_list)
config.apply_command_line_args(args, option_list)
# save the updates to the configuration file (re-running the same tool will result in the
# same parameters being set)
config.save(cea.config.CEA_CONFIG)
print_script_configuration(config, script_name, option_list)
# FIXME: remove this after Executive Course
# <--
config.restrict_to(['general:scenario', 'general:region'] + option_list)
cea.datamanagement.copy_default_databases.copy_default_databases(
locator=cea.inputlocator.InputLocator(config.scenario), region=config.region)
config.restrict_to(option_list)
# -->
module_path = cli_config.get('scripts', script_name)
script_module = importlib.import_module(module_path)
try:
script_module.main(config)
except cea.ConfigError as config_error:
print('ERROR: %s' % config_error)
sys.exit(config_error.rc)
except cea.CustomDatabaseNotFound as error:
print('ERROR: %s' % error)
sys.exit(error.rc)
except:
raise
def print_script_configuration(config, script_name, option_list):
"""
Print a list of script parameters being used for this run of the tool. Historically, each tool
was responsible for printing their own parameters, but that requires manually keeping track of these
parameters.
"""
print('City Energy Analyst version %s' % cea.__version__)
print("Running `cea %(script_name)s` with the following parameters:" % locals())
for section, parameter in config.matching_parameters(option_list):
section_name = section.name
parameter_name = parameter.name
parameter_value = parameter.get()
print("- %(section_name)s:%(parameter_name)s = %(parameter_value)s" % locals())
def get_cli_config():
"""Return a ConfigParser object for the ``cli.config`` file used to configure the scripts known to the
``cea`` command line interface and the parameters accepted by each script"""
cli_config = ConfigParser.SafeConfigParser()
cli_config.read(os.path.join(os.path.dirname(__file__), 'cli.config'))
return cli_config
def print_help(config, cli_config, remaining_args):
"""Print out the help message for the ``cea`` command line interface"""
if remaining_args:
script_name = remaining_args[0]
try:
module_path = cli_config.get('scripts', script_name)
option_list = cli_config.get('config', script_name).split()
except:
print("Invalid value for SCRIPT.")
print_valid_script_names(cli_config)
return
script_module = importlib.import_module(module_path)
print(script_module.__doc__)
print("")
print("OPTIONS for %s:" % script_name)
for _, parameter in config.matching_parameters(option_list):
print("--%s: %s" % (parameter.name, parameter.get()))
print(" %s" % parameter.help)
else:
print("usage: cea SCRIPT [OPTIONS]")
print(" to run a specific script")
print("usage: cea --help SCRIPT")
print(" to get additional help specific to a script")
print_valid_script_names(cli_config)
def print_valid_script_names(cli_config):
import textwrap
print("")
print(textwrap.fill("SCRIPT can be one of: %s" % ', '.join(sorted(cli_config.options('scripts'))),
subsequent_indent=' ', break_on_hyphens=False))
if __name__ == '__main__':
main(cea.config.Configuration()) | cea/interfaces/cli/cli.py | import sys
import os
import importlib
import ConfigParser
import cea.config
import cea.datamanagement.copy_default_databases
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def main(config=None):
"""
:param cea.config.Configuration config: the configuration file to use (instead of creating a new one)
:return:
"""
if not config:
config = cea.config.Configuration()
cli_config = get_cli_config()
# handle arguments
args = sys.argv[1:] # drop the script name from the arguments
if not len(args) or args[0].lower() == '--help':
print_help(config, cli_config, args[1:])
sys.exit(1)
script_name = args.pop(0)
option_list = cli_config.get('config', script_name).split()
config.restrict_to(option_list)
config.apply_command_line_args(args, option_list)
# save the updates to the configuration file (re-running the same tool will result in the
# same parameters being set)
config.save(cea.config.CEA_CONFIG)
print_script_configuration(config, script_name, option_list)
# FIXME: remove this after Executive Course
# <--
config.restrict_to(['general:scenario', 'general:region'] + option_list)
cea.datamanagement.copy_default_databases.copy_default_databases(
locator=cea.inputlocator.InputLocator(config.scenario), region=config.region)
config.restrict_to(option_list)
# -->
module_path = cli_config.get('scripts', script_name)
script_module = importlib.import_module(module_path)
try:
script_module.main(config)
except cea.ConfigError as config_error:
print('ERROR: %s' % config_error)
sys.exit(config_error.rc)
except cea.CustomDatabaseNotFound as error:
print('ERROR: %s' % error)
sys.exit(error.rc)
except:
raise
def print_script_configuration(config, script_name, option_list):
"""
Print a list of script parameters being used for this run of the tool. Historically, each tool
was responsible for printing their own parameters, but that requires manually keeping track of these
parameters.
"""
print('City Energy Analyst version %s' % cea.__version__)
print("Running `cea %(script_name)s` with the following parameters:" % locals())
for section, parameter in config.matching_parameters(option_list):
section_name = section.name
parameter_name = parameter.name
parameter_value = parameter.get()
print("- %(section_name)s:%(parameter_name)s = %(parameter_value)s" % locals())
def get_cli_config():
"""Return a ConfigParser object for the ``cli.config`` file used to configure the scripts known to the
``cea`` command line interface and the parameters accepted by each script"""
cli_config = ConfigParser.SafeConfigParser()
cli_config.read(os.path.join(os.path.dirname(__file__), 'cli.config'))
return cli_config
def print_help(config, cli_config, remaining_args):
"""Print out the help message for the ``cea`` command line interface"""
if remaining_args:
script_name = remaining_args[0]
try:
module_path = cli_config.get('scripts', script_name)
option_list = cli_config.get('config', script_name).split()
except:
print("Invalid value for SCRIPT.")
print_valid_script_names(cli_config)
return
script_module = importlib.import_module(module_path)
print(script_module.__doc__)
print("")
print("OPTIONS for %s:" % script_name)
for _, parameter in config.matching_parameters(option_list):
print("--%s: %s" % (parameter.name, parameter.get()))
print(" %s" % parameter.help)
else:
print("usage: cea SCRIPT [OPTIONS]")
print(" to run a specific script")
print("usage: cea --help SCRIPT")
print(" to get additional help specific to a script")
print_valid_script_names(cli_config)
def print_valid_script_names(cli_config):
import textwrap
print("")
print(textwrap.fill("SCRIPT can be one of: %s" % ', '.join(sorted(cli_config.options('scripts'))),
subsequent_indent=' ', break_on_hyphens=False))
if __name__ == '__main__':
main(cea.config.Configuration()) | 0.216094 | 0.07117 |
import sqlite3
from flask import Flask, render_template, g, redirect, url_for, request, session, flash
DATABASE = 'test.db'
USERNAME = 'admin'
PASSWORD = '<PASSWORD>'
SECRET_KEY = 'this is secret!'
CURRENT_ID = 0
app = Flask(__name__)
app.config.from_object(__name__)
@app.route('/')
def welcome():
return '<h1>Welcome to CMPUT 410 - Jinja Lab! </h1>'
@app.route('/task', methods=['GET', 'POST'])
def task():
if request.method == 'POST':
if not session.get('logged_in'):
abort(401)
description = request.form['description']
category = request.form['category']
priority = request.form['priority']
addTask(category,priority,description,app.config['CURRENT_ID'])
app.config['CURRENT_ID'] += 1
flash("new task added")
return redirect(url_for('task'))
return render_template('show_entries.html', tasks=query_db('select * from tasks'))
@app.route('/login', methods=['GET','POST'])
def login():
error = None
if request.method=='POST':
if request.form['username'] != app.config['USERNAME']:
error = 'invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'invalid password'
else:
session['logged_in'] = True
flash("You are logged in")
return redirect(url_for('task'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in')
flash("you are logged out")
return redirect(url_for('task'))
@app.route('/delete', methods=['POST'])
def delete():
if not session.get('logged_in'):
abort(401)
removetask(request.form['category'],request.form['priority'],request.form['description'],request.form['id'])
flash("task was deleted")
return redirect(url_for('task'))
def addTask(category,priority,description,id):
query_db('insert into tasks values(?,?,?,?)', [category,int(priority),description,int(id)], one=True)
get_db().commit();
def removetask(category,priority,description, id):
query_db('delete from tasks where category = ? and description = ? and priority = ? and id = ?', [category, description, priority, id], one=True)
get_db().commit()
def query_db(query, args=(), one=False):
cur = get_db().cursor()
cur.execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
db.row_factory = sqlite3.Row
return db;
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
db = None
if __name__ == '__main__':
current_id = 0
app.debug = True
app.run() | todolist.py | import sqlite3
from flask import Flask, render_template, g, redirect, url_for, request, session, flash
DATABASE = 'test.db'
USERNAME = 'admin'
PASSWORD = '<PASSWORD>'
SECRET_KEY = 'this is secret!'
CURRENT_ID = 0
app = Flask(__name__)
app.config.from_object(__name__)
@app.route('/')
def welcome():
return '<h1>Welcome to CMPUT 410 - Jinja Lab! </h1>'
@app.route('/task', methods=['GET', 'POST'])
def task():
if request.method == 'POST':
if not session.get('logged_in'):
abort(401)
description = request.form['description']
category = request.form['category']
priority = request.form['priority']
addTask(category,priority,description,app.config['CURRENT_ID'])
app.config['CURRENT_ID'] += 1
flash("new task added")
return redirect(url_for('task'))
return render_template('show_entries.html', tasks=query_db('select * from tasks'))
@app.route('/login', methods=['GET','POST'])
def login():
error = None
if request.method=='POST':
if request.form['username'] != app.config['USERNAME']:
error = 'invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'invalid password'
else:
session['logged_in'] = True
flash("You are logged in")
return redirect(url_for('task'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in')
flash("you are logged out")
return redirect(url_for('task'))
@app.route('/delete', methods=['POST'])
def delete():
if not session.get('logged_in'):
abort(401)
removetask(request.form['category'],request.form['priority'],request.form['description'],request.form['id'])
flash("task was deleted")
return redirect(url_for('task'))
def addTask(category,priority,description,id):
query_db('insert into tasks values(?,?,?,?)', [category,int(priority),description,int(id)], one=True)
get_db().commit();
def removetask(category,priority,description, id):
query_db('delete from tasks where category = ? and description = ? and priority = ? and id = ?', [category, description, priority, id], one=True)
get_db().commit()
def query_db(query, args=(), one=False):
cur = get_db().cursor()
cur.execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
db.row_factory = sqlite3.Row
return db;
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
db = None
if __name__ == '__main__':
current_id = 0
app.debug = True
app.run() | 0.134037 | 0.043224 |
import tempfile
from pathlib import PosixPath
import pyarrow.parquet as pq
import yaml
from cloudpathlib import AnyPath, CloudPath
from cachetools import cached, TTLCache
def get_local_file(file_location):
if isinstance(file_location, PosixPath):
return file_location.as_posix()
elif isinstance(file_location, CloudPath):
if file_location._local.exists():
# Our files are immutable so if the local cache exists
# we can just return that
return file_location._local.as_posix()
else:
# Otherwise this downloads the file and returns the local path
return file_location.fspath
else:
raise Exception("Unsupported path type")
@cached(TTLCache(maxsize=1000, ttl=60))
def get_latest_details(config_location):
with open(config_location / "latest.yaml", "r") as stream:
return yaml.safe_load(stream)
def get_partition_iterator(min_partition, max_partition, partition_sizes):
for partition_size in sorted(partition_sizes, reverse=True):
start_partition_allowed = (min_partition // partition_size) * partition_size
end_partition_allowed = (max_partition // partition_size) * partition_size
last_max_partition = None
for start_partition in range(
start_partition_allowed, end_partition_allowed, partition_size
):
last_max_partition = start_partition + partition_size
yield partition_size, start_partition, start_partition + partition_size
if last_max_partition is not None:
min_partition = last_max_partition
def get_partition_files(config_location, table, min_partition, max_partition):
# Get config
with open(get_local_file(config_location / "config.yaml"), "r") as stream:
config = yaml.safe_load(stream)
latest = get_latest_details(config_location)
latest_block = latest.get("latest_block")
# Get table
table_config = config["tables"][table]
partition_sizes = sorted(table_config["partition_sizes"], reverse=True)
table_dir = config_location.joinpath(
"data", f"subgraph={latest['subgraph_deployment']}", f"table={table}"
)
files = []
for partition_size, start_partition, end_partition in get_partition_iterator(
min_partition, latest_block, partition_sizes):
if start_partition < max_partition:
files.append(table_dir.joinpath(
f"partition_size={partition_size}",
f"start_partition={start_partition}",
f"end_partition={end_partition}",
"data.parquet",
))
return files
def get_files(config_location, table, min_partition, max_partition):
file_list = get_partition_files(AnyPath(config_location), table, min_partition, max_partition)
return list(map(get_local_file, file_list))
def get_parameters(parameters):
"""
TODO: take hex blob as input instead of parameters
"""
core_parameters = parameters.get("core")
user_defined_parameters = parameters.get("user_defined")
return core_parameters, user_defined_parameters
def get_payment_cycle(start_block, end_block, payment_cycle_length):
"""
by default, the payment cycle is the tail of the compute range
"""
return max(end_block, start_block + payment_cycle_length)
def write_parquet_file(file_location, table):
# Pyarrow can't take a file object so we have to write to a temp file
# and upload directly
if isinstance(file_location, CloudPath):
with tempfile.TemporaryDirectory() as temp_dir:
pq_file_location = AnyPath(temp_dir) / "results.parquet"
pq.write_table(table, pq_file_location)
file_location.joinpath("results.parquet").upload_from(pq_file_location)
else:
pq.write_table(table, file_location / "results.parquet") | packages/cardpay-reward-programs/cardpay_reward_programs/utils.py | import tempfile
from pathlib import PosixPath
import pyarrow.parquet as pq
import yaml
from cloudpathlib import AnyPath, CloudPath
from cachetools import cached, TTLCache
def get_local_file(file_location):
if isinstance(file_location, PosixPath):
return file_location.as_posix()
elif isinstance(file_location, CloudPath):
if file_location._local.exists():
# Our files are immutable so if the local cache exists
# we can just return that
return file_location._local.as_posix()
else:
# Otherwise this downloads the file and returns the local path
return file_location.fspath
else:
raise Exception("Unsupported path type")
@cached(TTLCache(maxsize=1000, ttl=60))
def get_latest_details(config_location):
with open(config_location / "latest.yaml", "r") as stream:
return yaml.safe_load(stream)
def get_partition_iterator(min_partition, max_partition, partition_sizes):
for partition_size in sorted(partition_sizes, reverse=True):
start_partition_allowed = (min_partition // partition_size) * partition_size
end_partition_allowed = (max_partition // partition_size) * partition_size
last_max_partition = None
for start_partition in range(
start_partition_allowed, end_partition_allowed, partition_size
):
last_max_partition = start_partition + partition_size
yield partition_size, start_partition, start_partition + partition_size
if last_max_partition is not None:
min_partition = last_max_partition
def get_partition_files(config_location, table, min_partition, max_partition):
# Get config
with open(get_local_file(config_location / "config.yaml"), "r") as stream:
config = yaml.safe_load(stream)
latest = get_latest_details(config_location)
latest_block = latest.get("latest_block")
# Get table
table_config = config["tables"][table]
partition_sizes = sorted(table_config["partition_sizes"], reverse=True)
table_dir = config_location.joinpath(
"data", f"subgraph={latest['subgraph_deployment']}", f"table={table}"
)
files = []
for partition_size, start_partition, end_partition in get_partition_iterator(
min_partition, latest_block, partition_sizes):
if start_partition < max_partition:
files.append(table_dir.joinpath(
f"partition_size={partition_size}",
f"start_partition={start_partition}",
f"end_partition={end_partition}",
"data.parquet",
))
return files
def get_files(config_location, table, min_partition, max_partition):
file_list = get_partition_files(AnyPath(config_location), table, min_partition, max_partition)
return list(map(get_local_file, file_list))
def get_parameters(parameters):
"""
TODO: take hex blob as input instead of parameters
"""
core_parameters = parameters.get("core")
user_defined_parameters = parameters.get("user_defined")
return core_parameters, user_defined_parameters
def get_payment_cycle(start_block, end_block, payment_cycle_length):
"""
by default, the payment cycle is the tail of the compute range
"""
return max(end_block, start_block + payment_cycle_length)
def write_parquet_file(file_location, table):
# Pyarrow can't take a file object so we have to write to a temp file
# and upload directly
if isinstance(file_location, CloudPath):
with tempfile.TemporaryDirectory() as temp_dir:
pq_file_location = AnyPath(temp_dir) / "results.parquet"
pq.write_table(table, pq_file_location)
file_location.joinpath("results.parquet").upload_from(pq_file_location)
else:
pq.write_table(table, file_location / "results.parquet") | 0.316053 | 0.12603 |
import numpy as np
import pandas as pd
s = pd.Series([1, 3, 5, np.nan, 6, 8])
print(s)
# Output:
# 0 1.0
# 1 3.0
# 2 5.0
# 3 NaN
# 4 6.0
# 5 8.0
# dtype: float64
dates = pd.date_range('20210506', periods=6)
print(dates)
# Output:
# DatetimeIndex(['2021-05-06', '2021-05-07', '2021-05-08', '2021-05-09',
# '2021-05-10', '2021-05-11'],
# dtype='datetime64[ns]', freq='D')
df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD'))
print(df)
# Output:
# A B C D
# 2021-05-06 1.017064 1.203402 0.321319 -1.842937
# 2021-05-07 0.862317 -0.118506 0.965226 1.135190
# 2021-05-08 0.095270 -0.274542 0.065710 0.848447
# 2021-05-09 0.505555 0.910965 -0.640123 0.139648
# 2021-05-10 -1.610411 2.211535 -0.753992 0.745157
# 2021-05-11 -1.252522 0.560822 -0.741799 0.293456
df2 = pd.DataFrame({'A': 1.,
'B': pd.Timestamp('20210506'),
'C': pd.Series(1, index=list(range(4)), dtype='float32'),
'D': np.array([3] * 4, dtype='int32'),
'E': pd.Categorical(["test", "train", "test", "train"]),
'F': 'foo'
})
print(df2)
# Output
# A B C D E F
# 0 1.0 2021-05-06 1.0 3 test foo
# 1 1.0 2021-05-06 1.0 3 train foo
# 2 1.0 2021-05-06 1.0 3 test foo
# 3 1.0 2021-05-06 1.0 3 train foo
print(df2.dtypes)
# Output
# A float64
# B datetime64[ns]
# C float32
# D int32
# E category
# F object
# dtype: object
print('# 查看数据')
print('头部数据')
print(df.head)
# <bound method NDFrame.head of A B C D
# 2021-05-06 -0.306809 0.422631 -2.093736 0.740021
# 2021-05-07 1.294873 0.576172 0.207939 1.516931
# 2021-05-08 -1.705928 1.726531 0.404730 -0.904943
# 2021-05-09 1.872359 -0.325699 0.355805 -2.472407
# 2021-05-10 2.260158 -1.023984 -0.203169 -1.473089
# 2021-05-11 0.260818 -1.462511 0.449855 -0.308070>
print('最后三条')
print(df.tail(3))
# A B C D
# 2021-05-09 1.872359 -0.325699 0.355805 -2.472407
# 2021-05-10 2.260158 -1.023984 -0.203169 -1.473089
# 2021-05-11 0.260818 -1.462511 0.449855 -0.308070
print('索引')
print(df.index)
# DatetimeIndex(['2021-05-06', '2021-05-07', '2021-05-08', '2021-05-09',
# '2021-05-10', '2021-05-11'],
# dtype='datetime64[ns]', freq='D')
print('列名')
print(df.columns)
# Index(['A', 'B', 'C', 'D'], dtype='object') | Pandas/intro.py | import numpy as np
import pandas as pd
s = pd.Series([1, 3, 5, np.nan, 6, 8])
print(s)
# Output:
# 0 1.0
# 1 3.0
# 2 5.0
# 3 NaN
# 4 6.0
# 5 8.0
# dtype: float64
dates = pd.date_range('20210506', periods=6)
print(dates)
# Output:
# DatetimeIndex(['2021-05-06', '2021-05-07', '2021-05-08', '2021-05-09',
# '2021-05-10', '2021-05-11'],
# dtype='datetime64[ns]', freq='D')
df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD'))
print(df)
# Output:
# A B C D
# 2021-05-06 1.017064 1.203402 0.321319 -1.842937
# 2021-05-07 0.862317 -0.118506 0.965226 1.135190
# 2021-05-08 0.095270 -0.274542 0.065710 0.848447
# 2021-05-09 0.505555 0.910965 -0.640123 0.139648
# 2021-05-10 -1.610411 2.211535 -0.753992 0.745157
# 2021-05-11 -1.252522 0.560822 -0.741799 0.293456
df2 = pd.DataFrame({'A': 1.,
'B': pd.Timestamp('20210506'),
'C': pd.Series(1, index=list(range(4)), dtype='float32'),
'D': np.array([3] * 4, dtype='int32'),
'E': pd.Categorical(["test", "train", "test", "train"]),
'F': 'foo'
})
print(df2)
# Output
# A B C D E F
# 0 1.0 2021-05-06 1.0 3 test foo
# 1 1.0 2021-05-06 1.0 3 train foo
# 2 1.0 2021-05-06 1.0 3 test foo
# 3 1.0 2021-05-06 1.0 3 train foo
print(df2.dtypes)
# Output
# A float64
# B datetime64[ns]
# C float32
# D int32
# E category
# F object
# dtype: object
print('# 查看数据')
print('头部数据')
print(df.head)
# <bound method NDFrame.head of A B C D
# 2021-05-06 -0.306809 0.422631 -2.093736 0.740021
# 2021-05-07 1.294873 0.576172 0.207939 1.516931
# 2021-05-08 -1.705928 1.726531 0.404730 -0.904943
# 2021-05-09 1.872359 -0.325699 0.355805 -2.472407
# 2021-05-10 2.260158 -1.023984 -0.203169 -1.473089
# 2021-05-11 0.260818 -1.462511 0.449855 -0.308070>
print('最后三条')
print(df.tail(3))
# A B C D
# 2021-05-09 1.872359 -0.325699 0.355805 -2.472407
# 2021-05-10 2.260158 -1.023984 -0.203169 -1.473089
# 2021-05-11 0.260818 -1.462511 0.449855 -0.308070
print('索引')
print(df.index)
# DatetimeIndex(['2021-05-06', '2021-05-07', '2021-05-08', '2021-05-09',
# '2021-05-10', '2021-05-11'],
# dtype='datetime64[ns]', freq='D')
print('列名')
print(df.columns)
# Index(['A', 'B', 'C', 'D'], dtype='object') | 0.382372 | 0.319652 |
import pandas as pd
import matplotlib.pyplot as plt
"""# Data Exploration
**Challenge**: How many different colours does the LEGO company produce? Read the colors.csv file in the data folder and find the total number of unique colours. Try using the [.nunique() method](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.nunique.html?highlight=nunique#pandas.DataFrame.nunique) to accomplish this.
"""
colors_df = pd.read_csv('data/colors.csv')
colors_df.head()
colors_df['name'].nunique()
"""**Challenge**: Find the number of transparent colours where <code>is_trans == 't'</code> versus the number of opaque colours where <code>is_trans == 'f'</code>. See if you can accomplish this in two different ways."""
colors_df.groupby('is_trans').count()
colors_df.is_trans.value_counts()
"""### Understanding LEGO Themes vs. LEGO Sets
Walk into a LEGO store and you will see their products organised by theme. Their themes include Star Wars, Batman, Harry Potter and many more.
<img src="https://i.imgur.com/aKcwkSx.png">
A lego set is a particular box of LEGO or product. Therefore, a single theme typically has many different sets.
<img src="https://i.imgur.com/whB1olq.png">
The <code>sets.csv</code> data contains a list of sets over the years and the number of parts that each of these sets contained.
**Challenge**: Read the sets.csv data and take a look at the first and last couple of rows.
"""
sets_df = pd.read_csv("data/sets.csv")
sets_df.head()
sets_df.tail()
"""**Challenge**: In which year were the first LEGO sets released and what were these sets called?"""
sets_df.sort_values('year')
"""**Challenge**: How many different sets did LEGO sell in their first year? How many types of LEGO products were on offer in the year the company started?"""
sets_df[sets_df['year'] == 1949]
"""**Challenge**: Find the top 5 LEGO sets with the most number of parts. """
sets_df.sort_values('num_parts', ascending=False).head()
"""**Challenge**: Use <code>.groupby()</code> and <code>.count()</code> to show the number of LEGO sets released year-on-year. How do the number of sets released in 1955 compare to the number of sets released in 2019? """
sets_by_year = sets_df.groupby('year').count()
sets_by_year['set_num']
"""**Challenge**: Show the number of LEGO releases on a line chart using Matplotlib. <br>
<br>
Note that the .csv file is from late 2020, so to plot the full calendar years, you will have to exclude some data from your chart. Can you use the slicing techniques covered in Day 21 to avoid plotting the last two years? The same syntax will work on Pandas DataFrames.
"""
plt.plot(sets_by_year.index, sets_by_year.set_num)
plt.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2])
"""### Aggregate Data with the Python .agg() Function
Let's work out the number of different themes shipped by year. This means we have to count the number of unique theme_ids per calendar year.
"""
themes_by_year = sets_df.groupby('year').agg({'theme_id': pd.Series.nunique})
themes_by_year.rename(columns= {'theme_id': 'nr_themes'}, inplace=True)
themes_by_year
"""**Challenge**: Plot the number of themes released by year on a line chart. Only include the full calendar years (i.e., exclude 2020 and 2021). """
plt.plot(themes_by_year.index[:-2], themes_by_year.nr_themes[:-2])
"""### Line Charts with Two Seperate Axes"""
ax1 = plt.gca()
ax2 = ax1.twinx()
ax1.plot(themes_by_year.index[:-2], themes_by_year.nr_themes[:-2], 'b')
ax2.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2], 'g')
ax1.set_xlabel('Year')
ax1.set_ylabel('Number of themes', color="blue")
ax2.set_ylabel('Number of sets', color='green')
"""**Challenge**: Use the <code>.groupby()</code> and <code>.agg()</code> function together to figure out the average number of parts per set. How many parts did the average LEGO set released in 1954 compared to say, 2017?"""
parts_per_set = sets_df.groupby('year').agg({'num_parts': pd.Series.mean})
parts_per_set
"""### Scatter Plots in Matplotlib
**Challenge**: Has the size and complexity of LEGO sets increased over time based on the number of parts? Plot the average number of parts over time using a Matplotlib scatter plot. See if you can use the [scatter plot documentation](https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.scatter.html) before I show you the solution. Do you spot a trend in the chart?
"""
plt.scatter(parts_per_set.index[:-2], parts_per_set.num_parts[:-2])
"""### Number of Sets per LEGO Theme
LEGO has licensed many hit franchises from Harry Potter to Marvel Super Heros to many others. But which theme has the largest number of individual sets?
"""
set_theme_count = sets_df["theme_id"].value_counts()
set_theme_count[:5]
"""<img src="https://i.imgur.com/Sg4lcjx.png">
### Database Schemas, Foreign Keys and Merging DataFrames
The themes.csv file has the actual theme names. The sets .csv has <code>theme_ids</code> which link to the <code>id</code> column in the themes.csv.
**Challenge**: Explore the themes.csv. How is it structured? Search for the name 'Star Wars'. How many <code>id</code>s correspond to this name in the themes.csv? Now use these <code>id</code>s and find the corresponding the sets in the sets.csv (Hint: you'll need to look for matches in the <code>theme_id</code> column)
"""
themes = pd.read_csv("data/themes.csv")
themes
themes[themes["name"] == "Star Wars"]
sets_df[sets_df.theme_id == 18]
sets_df[sets_df.theme_id == 158]
sets_df[sets_df.theme_id == 209]
sets_df[sets_df.theme_id == 261]
"""### Merging (i.e., Combining) DataFrames based on a Key
"""
set_theme_count = pd.DataFrame({"id": set_theme_count.index,
"set_count": set_theme_count.values})
set_theme_count.head()
merged_df = pd.merge(set_theme_count, themes, on='id')
merged_df[:3]
plt.figure(figsize=(14, 8))
plt.xlabel("Set Name", fontsize=14)
plt.xticks(fontsize=14, rotation=45)
plt.ylabel("Number of Sets", fontsize=14)
plt.yticks(fontsize=14)
plt.bar(merged_df.name[:10], merged_df.set_count[:10]) | Day-073/main.py | import pandas as pd
import matplotlib.pyplot as plt
"""# Data Exploration
**Challenge**: How many different colours does the LEGO company produce? Read the colors.csv file in the data folder and find the total number of unique colours. Try using the [.nunique() method](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.nunique.html?highlight=nunique#pandas.DataFrame.nunique) to accomplish this.
"""
colors_df = pd.read_csv('data/colors.csv')
colors_df.head()
colors_df['name'].nunique()
"""**Challenge**: Find the number of transparent colours where <code>is_trans == 't'</code> versus the number of opaque colours where <code>is_trans == 'f'</code>. See if you can accomplish this in two different ways."""
colors_df.groupby('is_trans').count()
colors_df.is_trans.value_counts()
"""### Understanding LEGO Themes vs. LEGO Sets
Walk into a LEGO store and you will see their products organised by theme. Their themes include Star Wars, Batman, Harry Potter and many more.
<img src="https://i.imgur.com/aKcwkSx.png">
A lego set is a particular box of LEGO or product. Therefore, a single theme typically has many different sets.
<img src="https://i.imgur.com/whB1olq.png">
The <code>sets.csv</code> data contains a list of sets over the years and the number of parts that each of these sets contained.
**Challenge**: Read the sets.csv data and take a look at the first and last couple of rows.
"""
sets_df = pd.read_csv("data/sets.csv")
sets_df.head()
sets_df.tail()
"""**Challenge**: In which year were the first LEGO sets released and what were these sets called?"""
sets_df.sort_values('year')
"""**Challenge**: How many different sets did LEGO sell in their first year? How many types of LEGO products were on offer in the year the company started?"""
sets_df[sets_df['year'] == 1949]
"""**Challenge**: Find the top 5 LEGO sets with the most number of parts. """
sets_df.sort_values('num_parts', ascending=False).head()
"""**Challenge**: Use <code>.groupby()</code> and <code>.count()</code> to show the number of LEGO sets released year-on-year. How do the number of sets released in 1955 compare to the number of sets released in 2019? """
sets_by_year = sets_df.groupby('year').count()
sets_by_year['set_num']
"""**Challenge**: Show the number of LEGO releases on a line chart using Matplotlib. <br>
<br>
Note that the .csv file is from late 2020, so to plot the full calendar years, you will have to exclude some data from your chart. Can you use the slicing techniques covered in Day 21 to avoid plotting the last two years? The same syntax will work on Pandas DataFrames.
"""
plt.plot(sets_by_year.index, sets_by_year.set_num)
plt.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2])
"""### Aggregate Data with the Python .agg() Function
Let's work out the number of different themes shipped by year. This means we have to count the number of unique theme_ids per calendar year.
"""
themes_by_year = sets_df.groupby('year').agg({'theme_id': pd.Series.nunique})
themes_by_year.rename(columns= {'theme_id': 'nr_themes'}, inplace=True)
themes_by_year
"""**Challenge**: Plot the number of themes released by year on a line chart. Only include the full calendar years (i.e., exclude 2020 and 2021). """
plt.plot(themes_by_year.index[:-2], themes_by_year.nr_themes[:-2])
"""### Line Charts with Two Seperate Axes"""
ax1 = plt.gca()
ax2 = ax1.twinx()
ax1.plot(themes_by_year.index[:-2], themes_by_year.nr_themes[:-2], 'b')
ax2.plot(sets_by_year.index[:-2], sets_by_year.set_num[:-2], 'g')
ax1.set_xlabel('Year')
ax1.set_ylabel('Number of themes', color="blue")
ax2.set_ylabel('Number of sets', color='green')
"""**Challenge**: Use the <code>.groupby()</code> and <code>.agg()</code> function together to figure out the average number of parts per set. How many parts did the average LEGO set released in 1954 compared to say, 2017?"""
parts_per_set = sets_df.groupby('year').agg({'num_parts': pd.Series.mean})
parts_per_set
"""### Scatter Plots in Matplotlib
**Challenge**: Has the size and complexity of LEGO sets increased over time based on the number of parts? Plot the average number of parts over time using a Matplotlib scatter plot. See if you can use the [scatter plot documentation](https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.scatter.html) before I show you the solution. Do you spot a trend in the chart?
"""
plt.scatter(parts_per_set.index[:-2], parts_per_set.num_parts[:-2])
"""### Number of Sets per LEGO Theme
LEGO has licensed many hit franchises from Harry Potter to Marvel Super Heros to many others. But which theme has the largest number of individual sets?
"""
set_theme_count = sets_df["theme_id"].value_counts()
set_theme_count[:5]
"""<img src="https://i.imgur.com/Sg4lcjx.png">
### Database Schemas, Foreign Keys and Merging DataFrames
The themes.csv file has the actual theme names. The sets .csv has <code>theme_ids</code> which link to the <code>id</code> column in the themes.csv.
**Challenge**: Explore the themes.csv. How is it structured? Search for the name 'Star Wars'. How many <code>id</code>s correspond to this name in the themes.csv? Now use these <code>id</code>s and find the corresponding the sets in the sets.csv (Hint: you'll need to look for matches in the <code>theme_id</code> column)
"""
themes = pd.read_csv("data/themes.csv")
themes
themes[themes["name"] == "Star Wars"]
sets_df[sets_df.theme_id == 18]
sets_df[sets_df.theme_id == 158]
sets_df[sets_df.theme_id == 209]
sets_df[sets_df.theme_id == 261]
"""### Merging (i.e., Combining) DataFrames based on a Key
"""
set_theme_count = pd.DataFrame({"id": set_theme_count.index,
"set_count": set_theme_count.values})
set_theme_count.head()
merged_df = pd.merge(set_theme_count, themes, on='id')
merged_df[:3]
plt.figure(figsize=(14, 8))
plt.xlabel("Set Name", fontsize=14)
plt.xticks(fontsize=14, rotation=45)
plt.ylabel("Number of Sets", fontsize=14)
plt.yticks(fontsize=14)
plt.bar(merged_df.name[:10], merged_df.set_count[:10]) | 0.752104 | 0.742025 |
import json
import pandas as pd
import logging
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from localsecret import username, password
from spiceup_labels.config_lizard import patch_labeltype, configure_logger
#%%
def create_lizardrastersource(code, uuid):
key = code
value = ["lizard_nxt.blocks.LizardRasterSource", uuid]
return {key: value}
def create_aggregate(code):
key = "{}_aggregate".format(code)
method = (
"max" if (code.startswith("icon") or code.startswith("soil_mois")) else "mean"
) # Icon mag niet middelen dus pakt max
value = [
"geoblocks.geometry.aggregate.AggregateRaster",
"parcels",
code,
method,
"epsg:4326",
0.00001,
None,
"{}_label".format(code),
]
return {key: value}
def create_seriesblock(code):
key = "{}_seriesblock".format(code)
value = [
"geoblocks.geometry.base.GetSeriesBlock",
"{}_aggregate".format(code),
"{}_label".format(code),
]
return {key: value}
def update_result(code, label, result):
result.append(label)
result.append("{}_seriesblock".format(code))
return result
#%%
def main():
labeltype_uuid = "8ef4c780-6995-4935-8bd3-73440a689fc3"
configure_logger(logging.DEBUG)
logger = logging.getLogger("labellogger")
logger.info("Start creation of weather startup labeltype")
logger.info("Reading data from Google spreadsheet")
if not "weather_info" in locals():
scope = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/drive",
]
creds = ServiceAccountCredentials.from_json_keyfile_name(
"client_secret.json", scope
)
client = gspread.authorize(creds)
sh = client.open("Items & Properties on the App Ui/x")
ws = sh.worksheet("Weather")
weather_info = pd.DataFrame(ws.get_all_records())
weather_info = weather_info[weather_info["parameter"] != "Location"]
with open("Weatherconfig\Labels_basis.json") as json_file:
data = json.load(json_file)
source = data["source"]
graph = source["graph"]
result = graph["result"]
logger.info("Data read succefully")
logger.info("Building labeltype")
for index, row in weather_info.iterrows():
code = row["parameter"]
code = code.replace(" ", "_").replace("(", "").replace(")", "").lower()
uuid = row["Raster UUID"]
rastersource = create_lizardrastersource(code, uuid)
graph.update(rastersource)
aggregate = create_aggregate(code)
graph.update(aggregate)
seriesblock = create_seriesblock(code)
graph.update(seriesblock)
result = update_result(code, "{}_t0".format(code), result)
#Config for Soil Moisture traffic light
code = "soil_moisture"
rastersource = create_lizardrastersource(code, "04802788-be81-4d10-a7f3-81fcb66f3a81")
graph.update(rastersource)
aggregate = create_aggregate(code)
graph.update(aggregate)
seriesblock = create_seriesblock(code)
graph.update(seriesblock)
result = update_result(code, "soil_moisture_condition", result)
graph["result"] = result
source["graph"] = graph
data["source"] = source
with open("Label_result_startup.json", "w+") as outfile:
json.dump(data, outfile)
logger.info("Patching Lizard weather labeltype")
r = patch_labeltype(source, username, password, labeltype_uuid)
logger.debug(r.json())
r.raise_for_status()
logger.info("Complete!") | spiceup_labels/patch_weather_startup_labeltype.py | import json
import pandas as pd
import logging
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from localsecret import username, password
from spiceup_labels.config_lizard import patch_labeltype, configure_logger
#%%
def create_lizardrastersource(code, uuid):
key = code
value = ["lizard_nxt.blocks.LizardRasterSource", uuid]
return {key: value}
def create_aggregate(code):
key = "{}_aggregate".format(code)
method = (
"max" if (code.startswith("icon") or code.startswith("soil_mois")) else "mean"
) # Icon mag niet middelen dus pakt max
value = [
"geoblocks.geometry.aggregate.AggregateRaster",
"parcels",
code,
method,
"epsg:4326",
0.00001,
None,
"{}_label".format(code),
]
return {key: value}
def create_seriesblock(code):
key = "{}_seriesblock".format(code)
value = [
"geoblocks.geometry.base.GetSeriesBlock",
"{}_aggregate".format(code),
"{}_label".format(code),
]
return {key: value}
def update_result(code, label, result):
result.append(label)
result.append("{}_seriesblock".format(code))
return result
#%%
def main():
labeltype_uuid = "8ef4c780-6995-4935-8bd3-73440a689fc3"
configure_logger(logging.DEBUG)
logger = logging.getLogger("labellogger")
logger.info("Start creation of weather startup labeltype")
logger.info("Reading data from Google spreadsheet")
if not "weather_info" in locals():
scope = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/drive",
]
creds = ServiceAccountCredentials.from_json_keyfile_name(
"client_secret.json", scope
)
client = gspread.authorize(creds)
sh = client.open("Items & Properties on the App Ui/x")
ws = sh.worksheet("Weather")
weather_info = pd.DataFrame(ws.get_all_records())
weather_info = weather_info[weather_info["parameter"] != "Location"]
with open("Weatherconfig\Labels_basis.json") as json_file:
data = json.load(json_file)
source = data["source"]
graph = source["graph"]
result = graph["result"]
logger.info("Data read succefully")
logger.info("Building labeltype")
for index, row in weather_info.iterrows():
code = row["parameter"]
code = code.replace(" ", "_").replace("(", "").replace(")", "").lower()
uuid = row["Raster UUID"]
rastersource = create_lizardrastersource(code, uuid)
graph.update(rastersource)
aggregate = create_aggregate(code)
graph.update(aggregate)
seriesblock = create_seriesblock(code)
graph.update(seriesblock)
result = update_result(code, "{}_t0".format(code), result)
#Config for Soil Moisture traffic light
code = "soil_moisture"
rastersource = create_lizardrastersource(code, "04802788-be81-4d10-a7f3-81fcb66f3a81")
graph.update(rastersource)
aggregate = create_aggregate(code)
graph.update(aggregate)
seriesblock = create_seriesblock(code)
graph.update(seriesblock)
result = update_result(code, "soil_moisture_condition", result)
graph["result"] = result
source["graph"] = graph
data["source"] = source
with open("Label_result_startup.json", "w+") as outfile:
json.dump(data, outfile)
logger.info("Patching Lizard weather labeltype")
r = patch_labeltype(source, username, password, labeltype_uuid)
logger.debug(r.json())
r.raise_for_status()
logger.info("Complete!") | 0.398992 | 0.157428 |
from AdjacencyMatrixGraphNode import AdjacencyMatrixGraphNode
class AdjacencyMatrixGraph:
def __init__(self) -> None:
self.matrix = []
self.nodes = []
def adjacent(self, nodeA, nodeB):
i = nodeA.index
j = nodeB.index
if self.matrix[i][j] or self.matrix[j][i]:
return True
return False
def neighbors(self, node):
neighbors = []
i = node.index
for j in range(len(self.matrix[i])):
if self.matrix[i][j]:
neighbors.append(self.nodes[j])
return neighbors
def addNode(self, node):
if node not in self.nodes:
for i in range(len(self.nodes)):
self.matrix[i].append(0)
node.index = len(self.matrix)
self.matrix.append([0] * (len(self.matrix) + 1))
self.nodes.append(node)
def removeNode(self, node):
for i in range(len(self.nodes)):
self.matrix[i].pop(node.index)
self.matrix.pop(node.index)
self.nodes.remove(node)
for n in self.nodes[node.index:]:
n.index -= 1
def addEdge(self, nodeA, nodeB, directional=False):
self.matrix[nodeA.index][nodeB.index] = 1
if not directional:
self.matrix[nodeB.index][nodeA.index] = 1
def removeEdge(self, nodeA, nodeB, directional=False):
self.matrix[nodeA.index][nodeB.index] = 0
if not directional:
self.matrix[nodeB.index][nodeA.index] = 0
if __name__ == "__main__":
graph = AdjacencyMatrixGraph()
a = AdjacencyMatrixGraphNode(5)
b = AdjacencyMatrixGraphNode(20)
c = AdjacencyMatrixGraphNode(15)
d = AdjacencyMatrixGraphNode(9)
graph.addNode(a)
graph.addNode(b)
graph.addNode(c)
graph.addNode(d)
print(graph.nodes)
graph.addEdge(a, b)
graph.addEdge(b, d)
graph.addEdge(b, c)
print(graph.adjacent(b, a))
print(graph.adjacent(c, d))
graph.removeNode(a)
print(graph.nodes)
print(graph.neighbors(b)) | DataStructures/Graphs/AdjacencyMatrixGraph/AdjacencyMatrixGraph.py | from AdjacencyMatrixGraphNode import AdjacencyMatrixGraphNode
class AdjacencyMatrixGraph:
def __init__(self) -> None:
self.matrix = []
self.nodes = []
def adjacent(self, nodeA, nodeB):
i = nodeA.index
j = nodeB.index
if self.matrix[i][j] or self.matrix[j][i]:
return True
return False
def neighbors(self, node):
neighbors = []
i = node.index
for j in range(len(self.matrix[i])):
if self.matrix[i][j]:
neighbors.append(self.nodes[j])
return neighbors
def addNode(self, node):
if node not in self.nodes:
for i in range(len(self.nodes)):
self.matrix[i].append(0)
node.index = len(self.matrix)
self.matrix.append([0] * (len(self.matrix) + 1))
self.nodes.append(node)
def removeNode(self, node):
for i in range(len(self.nodes)):
self.matrix[i].pop(node.index)
self.matrix.pop(node.index)
self.nodes.remove(node)
for n in self.nodes[node.index:]:
n.index -= 1
def addEdge(self, nodeA, nodeB, directional=False):
self.matrix[nodeA.index][nodeB.index] = 1
if not directional:
self.matrix[nodeB.index][nodeA.index] = 1
def removeEdge(self, nodeA, nodeB, directional=False):
self.matrix[nodeA.index][nodeB.index] = 0
if not directional:
self.matrix[nodeB.index][nodeA.index] = 0
if __name__ == "__main__":
graph = AdjacencyMatrixGraph()
a = AdjacencyMatrixGraphNode(5)
b = AdjacencyMatrixGraphNode(20)
c = AdjacencyMatrixGraphNode(15)
d = AdjacencyMatrixGraphNode(9)
graph.addNode(a)
graph.addNode(b)
graph.addNode(c)
graph.addNode(d)
print(graph.nodes)
graph.addEdge(a, b)
graph.addEdge(b, d)
graph.addEdge(b, c)
print(graph.adjacent(b, a))
print(graph.adjacent(c, d))
graph.removeNode(a)
print(graph.nodes)
print(graph.neighbors(b)) | 0.586878 | 0.471102 |
if __name__ == "__main__":
from MKVCreator import PrimaryFrame
PrimaryFrame()
from tkinter import Tk, ttk
from Processor import Processor
from Processor.Processor import Props
from Utils.LogUtils import Log
from Utils import UIUtils as UI
class PrimaryFrame:
def __init__ ( self ):
self.root = Tk()
self.props = Props()
self.root.iconbitmap( "MKV.ico" )
self.root.title( "Automated MKV Creator" )
self.root.geometry( "380x180" )
self.buildPrimaryUI()
self.root.columnconfigure( 0, weight = 1 )
self.root.rowconfigure( 0, weight = 1 )
self.root.mainloop()
def buildPrimaryUI( self ):
self.tab = ttk.Notebook( self.root )
self.tab.grid( row = 0, column = 0, sticky = "NSEW" )
for pair in [ ( "Run Info", self.buildRunInfo ), ( "Partial Season", self.buildPartialSeason ), ( "File Paths", self.buildFilePaths ), ( "Program Paths", self.buildProgramPaths )]:
self.tab.add( pair[1]( self.tab ), text = pair[ 0 ] )
ttk.Button( self.root, text = "Run", command = self.runProcess ).grid( row = 1, column = 0, sticky = "EW" )
def buildRunInfo( self, tab ):
f = ttk.Frame( tab )
for i in range( 5 ):
f.columnconfigure( i, weight = 0 if i == 2 else 1 )
#row 0
UI.gridIt( ttk.Label( f, text = "File Prefix:" ), 0, 1, 1, "E" )
UI.gridIt( ttk.Entry( f, textvariable = self.props.FILE_PREFIX, width = 20 ), 0, 2, 2, "W" )
#row 1
UI.buildEntryPair( f, "Starting Season:", self.props.STARTING_SEASON, 1, 0, 0, 5 )
UI.buildEntryPair( f, "Ending Season:", self.props.ENDING_SEASON, 1, 3, 0, 5 )
#row2
UI.buildEntryPair( f, "Min Time:", self.props.MIN_TIME, 2, 0, 0, 5 )
UI.buildEntryPair( f, "Max Time:", self.props.MAX_TIME, 2, 3, 0, 5 )
#row3
UI.gridIt( ttk.Checkbutton( f, text = "Use File Prefix", variable = self.props.USE_FILE ), 3, 1 )
UI.gridIt( ttk.Checkbutton( f, text = "Single Season", variable = self.props.SINGLE_SEASON ), 3, 3 )
return f
def buildPartialSeason( self, tab ):
f = UI.buildFrame( tab, 3 )
UI.buildEntryPair( f, "Starting File:", self.props.PARTIAL_FILE, 0, 0, 1 )
UI.buildEntryPair( f, "Starting Ep #:", self.props.PARTIAL_EPISODE, 1, 0, 1 )
UI.gridIt( ttk.Checkbutton( f, text = "Single File Only", variable = self.props.PARTIAL_SINGLEFILE ), 2, 1 )
return f
def buildFilePaths( self, tab ):
f = UI.buildFrame( tab, 2 )
UI.buildEntryPair( f, "Input Directory:", self.props.INPUT_PATH , 0, 0 )
UI.buildEntryPair( f, "Output Directory:", self.props.OUTPUT_PATH , 1, 0 )
return f
def buildProgramPaths( self, tab ):
return self.makeEntryFrame( tab, 2, [( "MakeMKV:", self.props.MAKEMKV_PATH, 0 ), ( "MKVMerge:", self.props.MKVMERGE_PATH, 0 ), ( "DVDFab8QT:", self.props.DVDFAB_PATH, 0 )] )
def makeEntryFrame( self, tab, columns, pairs ):
f = UI.buildFrame( tab, columns )
for i, pair in enumerate( pairs ):
if len( pair ) == 3:
UI.buildEntryPair( f, pair[ 0 ], pair[ 1 ], i, pair[ 2 ] )
else:
UI.buildEntryPair( f, pair[ 0 ], pair[ 1 ], i, pair[ 2 ], pair[ 3 ] )
return f
def runProcess( self ):
Log( self.props.OUTPUT_PATH.get(), self.props.FILE_PREFIX.get() )
Processor.simpleProcess( self.props ) | MKVRipper/MKVCreator.py | if __name__ == "__main__":
from MKVCreator import PrimaryFrame
PrimaryFrame()
from tkinter import Tk, ttk
from Processor import Processor
from Processor.Processor import Props
from Utils.LogUtils import Log
from Utils import UIUtils as UI
class PrimaryFrame:
def __init__ ( self ):
self.root = Tk()
self.props = Props()
self.root.iconbitmap( "MKV.ico" )
self.root.title( "Automated MKV Creator" )
self.root.geometry( "380x180" )
self.buildPrimaryUI()
self.root.columnconfigure( 0, weight = 1 )
self.root.rowconfigure( 0, weight = 1 )
self.root.mainloop()
def buildPrimaryUI( self ):
self.tab = ttk.Notebook( self.root )
self.tab.grid( row = 0, column = 0, sticky = "NSEW" )
for pair in [ ( "Run Info", self.buildRunInfo ), ( "Partial Season", self.buildPartialSeason ), ( "File Paths", self.buildFilePaths ), ( "Program Paths", self.buildProgramPaths )]:
self.tab.add( pair[1]( self.tab ), text = pair[ 0 ] )
ttk.Button( self.root, text = "Run", command = self.runProcess ).grid( row = 1, column = 0, sticky = "EW" )
def buildRunInfo( self, tab ):
f = ttk.Frame( tab )
for i in range( 5 ):
f.columnconfigure( i, weight = 0 if i == 2 else 1 )
#row 0
UI.gridIt( ttk.Label( f, text = "File Prefix:" ), 0, 1, 1, "E" )
UI.gridIt( ttk.Entry( f, textvariable = self.props.FILE_PREFIX, width = 20 ), 0, 2, 2, "W" )
#row 1
UI.buildEntryPair( f, "Starting Season:", self.props.STARTING_SEASON, 1, 0, 0, 5 )
UI.buildEntryPair( f, "Ending Season:", self.props.ENDING_SEASON, 1, 3, 0, 5 )
#row2
UI.buildEntryPair( f, "Min Time:", self.props.MIN_TIME, 2, 0, 0, 5 )
UI.buildEntryPair( f, "Max Time:", self.props.MAX_TIME, 2, 3, 0, 5 )
#row3
UI.gridIt( ttk.Checkbutton( f, text = "Use File Prefix", variable = self.props.USE_FILE ), 3, 1 )
UI.gridIt( ttk.Checkbutton( f, text = "Single Season", variable = self.props.SINGLE_SEASON ), 3, 3 )
return f
def buildPartialSeason( self, tab ):
f = UI.buildFrame( tab, 3 )
UI.buildEntryPair( f, "Starting File:", self.props.PARTIAL_FILE, 0, 0, 1 )
UI.buildEntryPair( f, "Starting Ep #:", self.props.PARTIAL_EPISODE, 1, 0, 1 )
UI.gridIt( ttk.Checkbutton( f, text = "Single File Only", variable = self.props.PARTIAL_SINGLEFILE ), 2, 1 )
return f
def buildFilePaths( self, tab ):
f = UI.buildFrame( tab, 2 )
UI.buildEntryPair( f, "Input Directory:", self.props.INPUT_PATH , 0, 0 )
UI.buildEntryPair( f, "Output Directory:", self.props.OUTPUT_PATH , 1, 0 )
return f
def buildProgramPaths( self, tab ):
return self.makeEntryFrame( tab, 2, [( "MakeMKV:", self.props.MAKEMKV_PATH, 0 ), ( "MKVMerge:", self.props.MKVMERGE_PATH, 0 ), ( "DVDFab8QT:", self.props.DVDFAB_PATH, 0 )] )
def makeEntryFrame( self, tab, columns, pairs ):
f = UI.buildFrame( tab, columns )
for i, pair in enumerate( pairs ):
if len( pair ) == 3:
UI.buildEntryPair( f, pair[ 0 ], pair[ 1 ], i, pair[ 2 ] )
else:
UI.buildEntryPair( f, pair[ 0 ], pair[ 1 ], i, pair[ 2 ], pair[ 3 ] )
return f
def runProcess( self ):
Log( self.props.OUTPUT_PATH.get(), self.props.FILE_PREFIX.get() )
Processor.simpleProcess( self.props ) | 0.255344 | 0.131118 |
import os
import shutil
import h5py
import numpy as np
import pytest
import yaml
TEST_FILES = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'resources',
)
VOXEL_SIZE = (0.235, 0.15, 0.15)
# common fixtures aimed to reduce the boilerplate in tests
@pytest.fixture
def input_path(tmpdir):
path = os.path.join(tmpdir, 'test.h5')
with h5py.File(path, 'w') as f:
f.create_dataset('raw', data=np.random.rand(32, 128, 128))
f['raw'].attrs['element_size_um'] = VOXEL_SIZE
f.create_dataset('segmentation', data=np.random.randint(low=0, high=256, size=(32, 128, 128)))
f['segmentation'].attrs['element_size_um'] = VOXEL_SIZE
return path
@pytest.fixture
def preprocess_config(input_path):
"""
Create pipeline config with only pre-processing (gaussian fileter) enabled
"""
config_path = os.path.join(TEST_FILES, 'test_config.yaml')
config = yaml.full_load(open(config_path, 'r'))
# add file to process
config['path'] = input_path
# add gaussian smoothing just to do some work
config['preprocessing']['state'] = True
config['preprocessing']['filter']['state'] = True
return config
@pytest.fixture
def prediction_config(tmpdir):
"""
Create pipeline config with Unet predictions enabled.
Predictions will be executed on the `tests/resources/sample_ovules.h5`.
`sample_ovules.h5` is first copied to the tmp dir in order to avoid unnecessary files creation in `tests/resources`.
"""
# load test config
config_path = os.path.join(TEST_FILES, 'test_config.yaml')
config = yaml.full_load(open(config_path, 'r'))
# enable unet predictions
config['cnn_prediction']['state'] = True
# copy sample_ovules.h5 to tmp dir
sample_ovule_path = os.path.join(TEST_FILES, 'sample_ovule.h5')
tmp_path = os.path.join(tmpdir, 'sample_ovule.h5')
shutil.copy2(sample_ovule_path, tmp_path)
# add tmp_path to the config
config['path'] = tmp_path
# enable network predictions
return config | tests/conftest.py | import os
import shutil
import h5py
import numpy as np
import pytest
import yaml
TEST_FILES = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'resources',
)
VOXEL_SIZE = (0.235, 0.15, 0.15)
# common fixtures aimed to reduce the boilerplate in tests
@pytest.fixture
def input_path(tmpdir):
path = os.path.join(tmpdir, 'test.h5')
with h5py.File(path, 'w') as f:
f.create_dataset('raw', data=np.random.rand(32, 128, 128))
f['raw'].attrs['element_size_um'] = VOXEL_SIZE
f.create_dataset('segmentation', data=np.random.randint(low=0, high=256, size=(32, 128, 128)))
f['segmentation'].attrs['element_size_um'] = VOXEL_SIZE
return path
@pytest.fixture
def preprocess_config(input_path):
"""
Create pipeline config with only pre-processing (gaussian fileter) enabled
"""
config_path = os.path.join(TEST_FILES, 'test_config.yaml')
config = yaml.full_load(open(config_path, 'r'))
# add file to process
config['path'] = input_path
# add gaussian smoothing just to do some work
config['preprocessing']['state'] = True
config['preprocessing']['filter']['state'] = True
return config
@pytest.fixture
def prediction_config(tmpdir):
"""
Create pipeline config with Unet predictions enabled.
Predictions will be executed on the `tests/resources/sample_ovules.h5`.
`sample_ovules.h5` is first copied to the tmp dir in order to avoid unnecessary files creation in `tests/resources`.
"""
# load test config
config_path = os.path.join(TEST_FILES, 'test_config.yaml')
config = yaml.full_load(open(config_path, 'r'))
# enable unet predictions
config['cnn_prediction']['state'] = True
# copy sample_ovules.h5 to tmp dir
sample_ovule_path = os.path.join(TEST_FILES, 'sample_ovule.h5')
tmp_path = os.path.join(tmpdir, 'sample_ovule.h5')
shutil.copy2(sample_ovule_path, tmp_path)
# add tmp_path to the config
config['path'] = tmp_path
# enable network predictions
return config | 0.326701 | 0.244679 |
import os
from plenum.common.log import getlogger
from sovrin.agent.agent import createAgent, runAgent
from sovrin.agent.constants import EVENT_NOTIFY_MSG
from sovrin.agent.exception import NonceNotFound
from sovrin.client.client import Client
from sovrin.client.wallet.wallet import Wallet
from sovrin.common.config_util import getConfig
from sovrin.test.agent.helper import buildThriftWallet
from sovrin.test.agent.test_walleted_agent import TestWalletedAgent
from sovrin.test.helper import TestClient
logger = getlogger()
class ThriftAgent(TestWalletedAgent):
def __init__(self,
basedirpath: str,
client: Client = None,
wallet: Wallet = None,
port: int = None,
loop=None):
if not basedirpath:
config = getConfig()
basedirpath = basedirpath or os.path.expanduser(config.baseDir)
portParam, = self.getPassedArgs()
super().__init__('Thrift Bank', basedirpath, client, wallet,
portParam or port, loop=loop)
# maps invitation nonces to internal ids
self._invites = {
"7<PASSWORD>": 1
}
def getInternalIdByInvitedNonce(self, nonce):
if nonce in self._invites:
return self._invites[nonce]
else:
raise NonceNotFound
def isClaimAvailable(self, link, claimName):
return True
def getAvailableClaimList(self):
return []
def _addAtrribute(self, claimDefKey, proverId, link):
pass
async def postClaimVerif(self, claimName, link, frm):
if claimName == "Loan-Application-Basic":
self.notifyToRemoteCaller(EVENT_NOTIFY_MSG,
" Loan eligibility criteria satisfied,"
" please send another claim "
"'Loan-Application-KYC'\n",
self.wallet.defaultId, frm)
async def bootstrap(self):
pass
def createThrift(name=None, wallet=None, basedirpath=None, port=None):
return createAgent(ThriftAgent, name or "Thrift Bank",
wallet or buildThriftWallet(),
basedirpath, port, clientClass=TestClient)
if __name__ == "__main__":
thrift = createThrift(port=7777)
runAgent(thrift) | sovrin/test/agent/thrift.py | import os
from plenum.common.log import getlogger
from sovrin.agent.agent import createAgent, runAgent
from sovrin.agent.constants import EVENT_NOTIFY_MSG
from sovrin.agent.exception import NonceNotFound
from sovrin.client.client import Client
from sovrin.client.wallet.wallet import Wallet
from sovrin.common.config_util import getConfig
from sovrin.test.agent.helper import buildThriftWallet
from sovrin.test.agent.test_walleted_agent import TestWalletedAgent
from sovrin.test.helper import TestClient
logger = getlogger()
class ThriftAgent(TestWalletedAgent):
def __init__(self,
basedirpath: str,
client: Client = None,
wallet: Wallet = None,
port: int = None,
loop=None):
if not basedirpath:
config = getConfig()
basedirpath = basedirpath or os.path.expanduser(config.baseDir)
portParam, = self.getPassedArgs()
super().__init__('Thrift Bank', basedirpath, client, wallet,
portParam or port, loop=loop)
# maps invitation nonces to internal ids
self._invites = {
"7<PASSWORD>": 1
}
def getInternalIdByInvitedNonce(self, nonce):
if nonce in self._invites:
return self._invites[nonce]
else:
raise NonceNotFound
def isClaimAvailable(self, link, claimName):
return True
def getAvailableClaimList(self):
return []
def _addAtrribute(self, claimDefKey, proverId, link):
pass
async def postClaimVerif(self, claimName, link, frm):
if claimName == "Loan-Application-Basic":
self.notifyToRemoteCaller(EVENT_NOTIFY_MSG,
" Loan eligibility criteria satisfied,"
" please send another claim "
"'Loan-Application-KYC'\n",
self.wallet.defaultId, frm)
async def bootstrap(self):
pass
def createThrift(name=None, wallet=None, basedirpath=None, port=None):
return createAgent(ThriftAgent, name or "Thrift Bank",
wallet or buildThriftWallet(),
basedirpath, port, clientClass=TestClient)
if __name__ == "__main__":
thrift = createThrift(port=7777)
runAgent(thrift) | 0.367838 | 0.141193 |
from catalyst import dl
from catalyst.contrib.data.nlp import LanguageModelingDataset
from catalyst.core import MetricAggregationCallback
import pandas as pd
import pytest # noqa: F401
import torch
from torch.utils.data import DataLoader
from transformers import (
AutoConfig,
AutoTokenizer,
BertForMaskedLM,
DistilBertForMaskedLM,
)
from transformers.data.data_collator import DataCollatorForLanguageModeling
from .callbacks import (
CosineLossCallback,
KLDivLossCallback,
MaskedLanguageModelCallback,
MSELossCallback,
PerplexityMetricCallbackDistillation,
)
from .data import MLMDataset
from .runners import DistilMLMRunner
def test_dataset():
"""Test number of tokens"""
dataset = MLMDataset(["Hello, world"])
output_dict = dataset[0]
assert output_dict["attention_mask"].sum() == 5
def test_runner():
"""Test that runner executes"""
train_df = pd.read_csv("data/train.csv")
valid_df = pd.read_csv("data/valid.csv")
teacher_config = AutoConfig.from_pretrained(
"bert-base-uncased", output_hidden_states=True, output_logits=True
)
teacher = BertForMaskedLM.from_pretrained(
"bert-base-uncased", config=teacher_config
)
student_config = AutoConfig.from_pretrained(
"distilbert-base-uncased",
output_hidden_states=True,
output_logits=True,
)
student = DistilBertForMaskedLM.from_pretrained(
"distilbert-base-uncased", config=student_config
)
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
train_dataset = LanguageModelingDataset(train_df["text"], tokenizer)
valid_dataset = LanguageModelingDataset(valid_df["text"], tokenizer)
collate_fn = DataCollatorForLanguageModeling(tokenizer).collate_batch
train_dataloader = DataLoader(
train_dataset, collate_fn=collate_fn, batch_size=2
)
valid_dataloader = DataLoader(
valid_dataset, collate_fn=collate_fn, batch_size=2
)
loaders = {"train": train_dataloader, "valid": valid_dataloader}
callbacks = {
"masked_lm_loss": MaskedLanguageModelCallback(),
"mse_loss": MSELossCallback(),
"cosine_loss": CosineLossCallback(),
"kl_div_loss": KLDivLossCallback(),
"loss": MetricAggregationCallback(
prefix="loss",
mode="weighted_sum",
metrics={
"cosine_loss": 1.0,
"masked_lm_loss": 1.0,
"kl_div_loss": 1.0,
"mse_loss": 1.0,
},
),
"optimizer": dl.OptimizerCallback(),
"perplexity": PerplexityMetricCallbackDistillation(),
}
model = torch.nn.ModuleDict({"teacher": teacher, "student": student})
runner = DistilMLMRunner()
optimizer = torch.optim.Adam(model.parameters(), lr=5e-5)
runner.train(
model=model,
optimizer=optimizer,
loaders=loaders,
verbose=True,
check=True,
callbacks=callbacks,
)
assert True
if __name__ == "__main__":
print("test") | src/test.py | from catalyst import dl
from catalyst.contrib.data.nlp import LanguageModelingDataset
from catalyst.core import MetricAggregationCallback
import pandas as pd
import pytest # noqa: F401
import torch
from torch.utils.data import DataLoader
from transformers import (
AutoConfig,
AutoTokenizer,
BertForMaskedLM,
DistilBertForMaskedLM,
)
from transformers.data.data_collator import DataCollatorForLanguageModeling
from .callbacks import (
CosineLossCallback,
KLDivLossCallback,
MaskedLanguageModelCallback,
MSELossCallback,
PerplexityMetricCallbackDistillation,
)
from .data import MLMDataset
from .runners import DistilMLMRunner
def test_dataset():
"""Test number of tokens"""
dataset = MLMDataset(["Hello, world"])
output_dict = dataset[0]
assert output_dict["attention_mask"].sum() == 5
def test_runner():
"""Test that runner executes"""
train_df = pd.read_csv("data/train.csv")
valid_df = pd.read_csv("data/valid.csv")
teacher_config = AutoConfig.from_pretrained(
"bert-base-uncased", output_hidden_states=True, output_logits=True
)
teacher = BertForMaskedLM.from_pretrained(
"bert-base-uncased", config=teacher_config
)
student_config = AutoConfig.from_pretrained(
"distilbert-base-uncased",
output_hidden_states=True,
output_logits=True,
)
student = DistilBertForMaskedLM.from_pretrained(
"distilbert-base-uncased", config=student_config
)
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
train_dataset = LanguageModelingDataset(train_df["text"], tokenizer)
valid_dataset = LanguageModelingDataset(valid_df["text"], tokenizer)
collate_fn = DataCollatorForLanguageModeling(tokenizer).collate_batch
train_dataloader = DataLoader(
train_dataset, collate_fn=collate_fn, batch_size=2
)
valid_dataloader = DataLoader(
valid_dataset, collate_fn=collate_fn, batch_size=2
)
loaders = {"train": train_dataloader, "valid": valid_dataloader}
callbacks = {
"masked_lm_loss": MaskedLanguageModelCallback(),
"mse_loss": MSELossCallback(),
"cosine_loss": CosineLossCallback(),
"kl_div_loss": KLDivLossCallback(),
"loss": MetricAggregationCallback(
prefix="loss",
mode="weighted_sum",
metrics={
"cosine_loss": 1.0,
"masked_lm_loss": 1.0,
"kl_div_loss": 1.0,
"mse_loss": 1.0,
},
),
"optimizer": dl.OptimizerCallback(),
"perplexity": PerplexityMetricCallbackDistillation(),
}
model = torch.nn.ModuleDict({"teacher": teacher, "student": student})
runner = DistilMLMRunner()
optimizer = torch.optim.Adam(model.parameters(), lr=5e-5)
runner.train(
model=model,
optimizer=optimizer,
loaders=loaders,
verbose=True,
check=True,
callbacks=callbacks,
)
assert True
if __name__ == "__main__":
print("test") | 0.821939 | 0.346099 |
from utils.generate import generate_data
import starry
import numpy as np
import matplotlib.pyplot as plt
import os
# Settings
ydeg = 15
smoothing = 0
# Array of inclinations
incs = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90])
veq = 60000.0 # m/s
# Plot the true map
fig, ax = plt.subplots(4, 3, figsize=(15, 10))
ax[0, 0].set_visible(False)
ax[0, 2].set_visible(False)
map = starry.Map(ydeg=ydeg)
map.load("spotdots", smoothing=smoothing)
map.show(ax=ax[0, 1], projection="moll")
ax[0, 1].annotate(
r"true",
xy=(0, 1),
xytext=(7, 7),
clip_on=False,
xycoords="axes fraction",
textcoords="offset points",
ha="left",
va="top",
fontsize=14,
color="k",
zorder=101,
)
ax[0, 1].set_rasterization_zorder(0)
# Solve & plot
ax = ax[1:].flatten()
map = None
for i, inc in enumerate(incs):
# Generate the data
data = generate_data(
inc=inc,
veq=veq,
image="spotdots",
flux_err=1e-4,
ydeg=ydeg,
smoothing=smoothing,
vsini_max=veq,
)
theta = data["data"]["theta"]
flux = data["data"]["flux"]
flux_err = data["data"]["flux_err"]
# Instantiate the map
if map is None:
map = starry.DopplerMap(lazy=False, **data["kwargs"])
map.spectrum = data["truths"]["spectrum"]
for n in range(map.udeg):
map[1 + n] = data["props"]["u"][n]
else:
map.inc = inc
map.veq = veq
# Solve
soln = map.solve(
flux,
theta=theta,
normalized=True,
fix_spectrum=True,
flux_err=flux_err,
spatial_cov=3e-5,
quiet=os.getenv("CI", "false") == "true",
)
# Visualize
map.show(ax=ax[i], projection="moll")
ax[i].annotate(
r"$%2d^\circ$" % inc,
xy=(0, 1),
xytext=(7, 7),
clip_on=False,
xycoords="axes fraction",
textcoords="offset points",
ha="left",
va="top",
fontsize=14,
color="k",
zorder=101,
)
ax[i].set_rasterization_zorder(0)
fig.savefig("inclinations.pdf", bbox_inches="tight", dpi=100) | src/figures/inclinations.py | from utils.generate import generate_data
import starry
import numpy as np
import matplotlib.pyplot as plt
import os
# Settings
ydeg = 15
smoothing = 0
# Array of inclinations
incs = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90])
veq = 60000.0 # m/s
# Plot the true map
fig, ax = plt.subplots(4, 3, figsize=(15, 10))
ax[0, 0].set_visible(False)
ax[0, 2].set_visible(False)
map = starry.Map(ydeg=ydeg)
map.load("spotdots", smoothing=smoothing)
map.show(ax=ax[0, 1], projection="moll")
ax[0, 1].annotate(
r"true",
xy=(0, 1),
xytext=(7, 7),
clip_on=False,
xycoords="axes fraction",
textcoords="offset points",
ha="left",
va="top",
fontsize=14,
color="k",
zorder=101,
)
ax[0, 1].set_rasterization_zorder(0)
# Solve & plot
ax = ax[1:].flatten()
map = None
for i, inc in enumerate(incs):
# Generate the data
data = generate_data(
inc=inc,
veq=veq,
image="spotdots",
flux_err=1e-4,
ydeg=ydeg,
smoothing=smoothing,
vsini_max=veq,
)
theta = data["data"]["theta"]
flux = data["data"]["flux"]
flux_err = data["data"]["flux_err"]
# Instantiate the map
if map is None:
map = starry.DopplerMap(lazy=False, **data["kwargs"])
map.spectrum = data["truths"]["spectrum"]
for n in range(map.udeg):
map[1 + n] = data["props"]["u"][n]
else:
map.inc = inc
map.veq = veq
# Solve
soln = map.solve(
flux,
theta=theta,
normalized=True,
fix_spectrum=True,
flux_err=flux_err,
spatial_cov=3e-5,
quiet=os.getenv("CI", "false") == "true",
)
# Visualize
map.show(ax=ax[i], projection="moll")
ax[i].annotate(
r"$%2d^\circ$" % inc,
xy=(0, 1),
xytext=(7, 7),
clip_on=False,
xycoords="axes fraction",
textcoords="offset points",
ha="left",
va="top",
fontsize=14,
color="k",
zorder=101,
)
ax[i].set_rasterization_zorder(0)
fig.savefig("inclinations.pdf", bbox_inches="tight", dpi=100) | 0.627609 | 0.356951 |
import logging
import time
import datetime
import random
import weaved
# BEGIN Configuration
# Weaved related configuration
PLUG_IP = '192.168.1.201' # Assumes the Smart Plug is configured for SSH and IR blaster
PLUG_USER = 'root' # Assumes password-less (key based) SSH authentication is set up
# IR codes for turning TV on/off; use the POWER code if there aren't separate codes for POWER ON and POWER OFF
TV_ON_CODE = '2203D6F71297971C8C47206E8C743267654B3708D374B492211147000111746D0100116D75110058770065476D006D654774657400000000000000000000000000000000000000000000000000000000'
TV_OFF_CODE = '2203D6F71197971B8C47206E8C743267654B3708D374B492211147000111746D0000116D76110058770065476D006D654774657400000000000000000000000000000000000000000000000000000000'
# NoBurglar configuration
START_TIME = '1930' # Daily start time in military time
END_TIME = '2300' # Daily end time
TV_ON_PERCENTAGE = 50.0 # Probably don't want the TV on the entire duration of the time window
# Quick way to enable/disable
# File in the local directory containing 0 or 1; 1 => enabled
# To enable - $ echo 1 > enabled
# To disable - $ echo 0 > enabled
ENABLED_FILENAME = 'enabled'
POLL_INTERVAL = 60 # seconds
# END Configuration
DEBUG = False
logging.basicConfig(level=logging.INFO, format='%(asctime)s - [%(levelname)s] %(message)s')
def enabled():
'''Checks the file to see if this is enabled'''
with open(ENABLED_FILENAME) as f:
if f.read().strip() == "1":
return True
else:
return False
# Global state
class State:
def __init__(self):
self.reset()
def reset(self):
self.is_light_on = False
self.is_tv_on = False
self.tv_start_time = 0 # When TV should be started today
self.tv_total_time = 0 # How long the TV has been on today
self.tv_done = False # Whether we have completed TV time today
state = State()
plug = weaved.Plug(PLUG_IP, PLUG_USER)
def run_triggers():
'''Run the triggers (TV/light) if applicable'''
logging.debug("Processing triggers")
now = datetime.datetime.today()
t1 = datetime.datetime.combine(now.date(), datetime.datetime.strptime(START_TIME, '%H%M').time())
t2 = datetime.datetime.combine(now.date(), datetime.datetime.strptime(END_TIME, '%H%M').time())
in_range = t1 <= now <= t2
# Check the light state
if not in_range:
if state.is_light_on:
logging.info('Turning light off')
if DEBUG or not plug.power_off():
state.is_light_on = False
elif not state.is_light_on:
logging.info('Turning light on')
if DEBUG or not plug.power_on():
state.is_light_on = True
# Randomly start the TV based on the percentage and the start and end times
if in_range:
if not state.tv_done:
tv_target_duration = TV_ON_PERCENTAGE / 100 * (t2 - t1).total_seconds()
if not state.tv_start_time:
delay = random.random() * ((t2 - t1).total_seconds() - tv_target_duration)
state.tv_start_time = t1 + datetime.timedelta(seconds = delay)
logging.info('TV will turn on at around ' + str(state.tv_start_time.time()) +
' for ' + str(tv_target_duration) + ' seconds')
if now > state.tv_start_time:
state.tv_total_time = (now - state.tv_start_time).total_seconds()
if state.tv_total_time >= tv_target_duration:
# time to turn the TV off
logging.info('Turning TV off')
if DEBUG or not plug.send_ir_code(TV_OFF_CODE):
state.is_tv_on = False
state.tv_start_time = state.tv_total_time = None
state.tv_done = True
elif not state.is_tv_on:
logging.info('Turning TV on')
if DEBUG or not plug.send_ir_code(TV_ON_CODE):
state.is_tv_on = True
else:
if state.is_tv_on:
# Usually shouldn't happen unless the tv end time is close to the END_TIME
# and the thread doesn't get woken up until it's past END_TIME
logging.info('Turning TV off since time window has elapsed')
if DEBUG or not plug.send_ir_code(TV_OFF_CODE):
state.tv_start_time = state.tv_total_time = None
state.is_tv_on = False
state.tv_done = False
if __name__ == '__main__':
# Check for action periodically
while True:
if enabled():
run_triggers()
else:
# If this goes from enabled -> disabled in the middle of time window, leave the
# physical state of the devices as it is; just reset the in-memory state
state.reset()
time.sleep(POLL_INTERVAL) | noburglar.py | import logging
import time
import datetime
import random
import weaved
# BEGIN Configuration
# Weaved related configuration
PLUG_IP = '192.168.1.201' # Assumes the Smart Plug is configured for SSH and IR blaster
PLUG_USER = 'root' # Assumes password-less (key based) SSH authentication is set up
# IR codes for turning TV on/off; use the POWER code if there aren't separate codes for POWER ON and POWER OFF
TV_ON_CODE = '2203D6F71297971C8C47206E8C743267654B3708D374B492211147000111746D0100116D75110058770065476D006D654774657400000000000000000000000000000000000000000000000000000000'
TV_OFF_CODE = '2203D6F71197971B8C47206E8C743267654B3708D374B492211147000111746D0000116D76110058770065476D006D654774657400000000000000000000000000000000000000000000000000000000'
# NoBurglar configuration
START_TIME = '1930' # Daily start time in military time
END_TIME = '2300' # Daily end time
TV_ON_PERCENTAGE = 50.0 # Probably don't want the TV on the entire duration of the time window
# Quick way to enable/disable
# File in the local directory containing 0 or 1; 1 => enabled
# To enable - $ echo 1 > enabled
# To disable - $ echo 0 > enabled
ENABLED_FILENAME = 'enabled'
POLL_INTERVAL = 60 # seconds
# END Configuration
DEBUG = False
logging.basicConfig(level=logging.INFO, format='%(asctime)s - [%(levelname)s] %(message)s')
def enabled():
'''Checks the file to see if this is enabled'''
with open(ENABLED_FILENAME) as f:
if f.read().strip() == "1":
return True
else:
return False
# Global state
class State:
def __init__(self):
self.reset()
def reset(self):
self.is_light_on = False
self.is_tv_on = False
self.tv_start_time = 0 # When TV should be started today
self.tv_total_time = 0 # How long the TV has been on today
self.tv_done = False # Whether we have completed TV time today
state = State()
plug = weaved.Plug(PLUG_IP, PLUG_USER)
def run_triggers():
'''Run the triggers (TV/light) if applicable'''
logging.debug("Processing triggers")
now = datetime.datetime.today()
t1 = datetime.datetime.combine(now.date(), datetime.datetime.strptime(START_TIME, '%H%M').time())
t2 = datetime.datetime.combine(now.date(), datetime.datetime.strptime(END_TIME, '%H%M').time())
in_range = t1 <= now <= t2
# Check the light state
if not in_range:
if state.is_light_on:
logging.info('Turning light off')
if DEBUG or not plug.power_off():
state.is_light_on = False
elif not state.is_light_on:
logging.info('Turning light on')
if DEBUG or not plug.power_on():
state.is_light_on = True
# Randomly start the TV based on the percentage and the start and end times
if in_range:
if not state.tv_done:
tv_target_duration = TV_ON_PERCENTAGE / 100 * (t2 - t1).total_seconds()
if not state.tv_start_time:
delay = random.random() * ((t2 - t1).total_seconds() - tv_target_duration)
state.tv_start_time = t1 + datetime.timedelta(seconds = delay)
logging.info('TV will turn on at around ' + str(state.tv_start_time.time()) +
' for ' + str(tv_target_duration) + ' seconds')
if now > state.tv_start_time:
state.tv_total_time = (now - state.tv_start_time).total_seconds()
if state.tv_total_time >= tv_target_duration:
# time to turn the TV off
logging.info('Turning TV off')
if DEBUG or not plug.send_ir_code(TV_OFF_CODE):
state.is_tv_on = False
state.tv_start_time = state.tv_total_time = None
state.tv_done = True
elif not state.is_tv_on:
logging.info('Turning TV on')
if DEBUG or not plug.send_ir_code(TV_ON_CODE):
state.is_tv_on = True
else:
if state.is_tv_on:
# Usually shouldn't happen unless the tv end time is close to the END_TIME
# and the thread doesn't get woken up until it's past END_TIME
logging.info('Turning TV off since time window has elapsed')
if DEBUG or not plug.send_ir_code(TV_OFF_CODE):
state.tv_start_time = state.tv_total_time = None
state.is_tv_on = False
state.tv_done = False
if __name__ == '__main__':
# Check for action periodically
while True:
if enabled():
run_triggers()
else:
# If this goes from enabled -> disabled in the middle of time window, leave the
# physical state of the devices as it is; just reset the in-memory state
state.reset()
time.sleep(POLL_INTERVAL) | 0.381911 | 0.161386 |
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from PIL import Image
class create_data():
"""create_data.py: Converts all of the .wav files into spectrograms"""
@staticmethod
def create_spectrograms():
"""Creates spectrograms from all of the .wav files in /all_sounds"""
# TESTING
for root, dirs, files in os.walk("./all_samples/test"):
for f in files:
# load audio file
data, sampling_rate = librosa.load("./all_samples/test/"+f)
print(sampling_rate)
# create figure, remove borders
fig = plt.figure(figsize=(12, 4))
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
# plt.title(f)
S = librosa.feature.melspectrogram(y=data, sr=sampling_rate)
librosa.display.specshow(librosa.power_to_db(S, ref=np.max), y_axis='mel', fmax=8000, x_axis='time')
spec_file_name = "./specs/test/" + f[:-4] + ".png"
plt.savefig(spec_file_name)
img = Image.open(spec_file_name)
resolution = (240, 160)
img = img.resize(resolution)
img.save(spec_file_name)
plt.close()
# TRAINING
for root, dirs, files in os.walk("./all_samples/train"):
for f in files:
# load audio file
data, sampling_rate = librosa.load("./all_samples/train/" + f)
# create figure, remove borders
fig = plt.figure(figsize=(12, 4))
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
# plt.title(f)
S = librosa.feature.melspectrogram(y=data, sr=sampling_rate)
librosa.display.specshow(librosa.power_to_db(S, ref=np.max), y_axis='mel', fmax=8000, x_axis='time')
spec_file_name = "./specs/train/" + f[:-4] + ".png"
plt.savefig(spec_file_name)
img = Image.open(spec_file_name)
resolution = (240, 160)
img = img.resize(resolution)
img.save(spec_file_name)
plt.close()
@staticmethod
def generate_metadata():
cols = ['Name', 'Spectral_Center', 'Cross_Rate', 'RMS', 'Nothing', 'BP1', 'BP2']
# TRAINING
metadata = pd.DataFrame(columns=cols)
for root, dirs, files in os.walk("./all_samples/train"):
for f in files:
data, sampling_rate = librosa.load("./all_samples/train/"+f)
spectral_centroid = np.average(librosa.feature.spectral_centroid(data, sampling_rate))
zero_crossing_rate = np.average(librosa.feature.zero_crossing_rate(data, sampling_rate))
rms = np.average(librosa.feature.rms(y=data))
label = f[0:3]
if label == "bp1":
label = [0, 1, 0]
elif label == "bp2":
label = [0, 0, 1]
else:
label = [1, 0, 0]
row = pd.DataFrame([f[:-4] + ".png", spectral_centroid, zero_crossing_rate, rms, label[0], label[1], label[2]])
row = row.T
row.columns = cols
metadata = metadata.append(row)
print(metadata)
metadata.to_csv('./specs/train/metadata.csv')
# TESTING
metadata = pd.DataFrame(columns=cols)
for root, dirs, files in os.walk("./all_samples/test"):
for f in files:
data, sampling_rate = librosa.load("./all_samples/test/" + f)
spectral_centroid = np.average(librosa.feature.spectral_centroid(data, sampling_rate))
zero_crossing_rate = np.average(librosa.feature.zero_crossing_rate(data, sampling_rate))
rms = np.average(librosa.feature.rms(y=data))
label = f[0:3]
if label == "bp1":
label = [0, 1, 0]
elif label == "bp2":
label = [0, 0, 1]
else:
label = [1, 0, 0]
row = pd.DataFrame(
[f[:-4] + ".png", spectral_centroid, zero_crossing_rate, rms, label[0], label[1], label[2]])
row = row.T
row.columns = cols
metadata = metadata.append(row)
print(metadata)
metadata.to_csv('./specs/test/metadata.csv')
if __name__ == "__main__":
# create_data.create_spectrograms()
create_data.generate_metadata() | create_spectrograms.py | import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from PIL import Image
class create_data():
"""create_data.py: Converts all of the .wav files into spectrograms"""
@staticmethod
def create_spectrograms():
"""Creates spectrograms from all of the .wav files in /all_sounds"""
# TESTING
for root, dirs, files in os.walk("./all_samples/test"):
for f in files:
# load audio file
data, sampling_rate = librosa.load("./all_samples/test/"+f)
print(sampling_rate)
# create figure, remove borders
fig = plt.figure(figsize=(12, 4))
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
# plt.title(f)
S = librosa.feature.melspectrogram(y=data, sr=sampling_rate)
librosa.display.specshow(librosa.power_to_db(S, ref=np.max), y_axis='mel', fmax=8000, x_axis='time')
spec_file_name = "./specs/test/" + f[:-4] + ".png"
plt.savefig(spec_file_name)
img = Image.open(spec_file_name)
resolution = (240, 160)
img = img.resize(resolution)
img.save(spec_file_name)
plt.close()
# TRAINING
for root, dirs, files in os.walk("./all_samples/train"):
for f in files:
# load audio file
data, sampling_rate = librosa.load("./all_samples/train/" + f)
# create figure, remove borders
fig = plt.figure(figsize=(12, 4))
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
# plt.title(f)
S = librosa.feature.melspectrogram(y=data, sr=sampling_rate)
librosa.display.specshow(librosa.power_to_db(S, ref=np.max), y_axis='mel', fmax=8000, x_axis='time')
spec_file_name = "./specs/train/" + f[:-4] + ".png"
plt.savefig(spec_file_name)
img = Image.open(spec_file_name)
resolution = (240, 160)
img = img.resize(resolution)
img.save(spec_file_name)
plt.close()
@staticmethod
def generate_metadata():
cols = ['Name', 'Spectral_Center', 'Cross_Rate', 'RMS', 'Nothing', 'BP1', 'BP2']
# TRAINING
metadata = pd.DataFrame(columns=cols)
for root, dirs, files in os.walk("./all_samples/train"):
for f in files:
data, sampling_rate = librosa.load("./all_samples/train/"+f)
spectral_centroid = np.average(librosa.feature.spectral_centroid(data, sampling_rate))
zero_crossing_rate = np.average(librosa.feature.zero_crossing_rate(data, sampling_rate))
rms = np.average(librosa.feature.rms(y=data))
label = f[0:3]
if label == "bp1":
label = [0, 1, 0]
elif label == "bp2":
label = [0, 0, 1]
else:
label = [1, 0, 0]
row = pd.DataFrame([f[:-4] + ".png", spectral_centroid, zero_crossing_rate, rms, label[0], label[1], label[2]])
row = row.T
row.columns = cols
metadata = metadata.append(row)
print(metadata)
metadata.to_csv('./specs/train/metadata.csv')
# TESTING
metadata = pd.DataFrame(columns=cols)
for root, dirs, files in os.walk("./all_samples/test"):
for f in files:
data, sampling_rate = librosa.load("./all_samples/test/" + f)
spectral_centroid = np.average(librosa.feature.spectral_centroid(data, sampling_rate))
zero_crossing_rate = np.average(librosa.feature.zero_crossing_rate(data, sampling_rate))
rms = np.average(librosa.feature.rms(y=data))
label = f[0:3]
if label == "bp1":
label = [0, 1, 0]
elif label == "bp2":
label = [0, 0, 1]
else:
label = [1, 0, 0]
row = pd.DataFrame(
[f[:-4] + ".png", spectral_centroid, zero_crossing_rate, rms, label[0], label[1], label[2]])
row = row.T
row.columns = cols
metadata = metadata.append(row)
print(metadata)
metadata.to_csv('./specs/test/metadata.csv')
if __name__ == "__main__":
# create_data.create_spectrograms()
create_data.generate_metadata() | 0.431345 | 0.431345 |
from antlr4 import *
if __name__ is not None and "." in __name__:
from .SABParser import SABParser
else:
from SABParser import SABParser
# This class defines a complete listener for a parse tree produced by SABParser.
class SABListener(ParseTreeListener):
# Enter a parse tree produced by SABParser#s.
def enterS(self, ctx:SABParser.SContext):
pass
# Exit a parse tree produced by SABParser#s.
def exitS(self, ctx:SABParser.SContext):
pass
# Enter a parse tree produced by SABParser#head.
def enterHead(self, ctx:SABParser.HeadContext):
pass
# Exit a parse tree produced by SABParser#head.
def exitHead(self, ctx:SABParser.HeadContext):
pass
# Enter a parse tree produced by SABParser#source.
def enterSource(self, ctx:SABParser.SourceContext):
pass
# Exit a parse tree produced by SABParser#source.
def exitSource(self, ctx:SABParser.SourceContext):
pass
# Enter a parse tree produced by SABParser#group.
def enterGroup(self, ctx:SABParser.GroupContext):
pass
# Exit a parse tree produced by SABParser#group.
def exitGroup(self, ctx:SABParser.GroupContext):
pass
# Enter a parse tree produced by SABParser#overlay_image.
def enterOverlay_image(self, ctx:SABParser.Overlay_imageContext):
pass
# Exit a parse tree produced by SABParser#overlay_image.
def exitOverlay_image(self, ctx:SABParser.Overlay_imageContext):
pass
# Enter a parse tree produced by SABParser#overlay_text.
def enterOverlay_text(self, ctx:SABParser.Overlay_textContext):
pass
# Exit a parse tree produced by SABParser#overlay_text.
def exitOverlay_text(self, ctx:SABParser.Overlay_textContext):
pass
# Enter a parse tree produced by SABParser#command.
def enterCommand(self, ctx:SABParser.CommandContext):
pass
# Exit a parse tree produced by SABParser#command.
def exitCommand(self, ctx:SABParser.CommandContext):
pass
# Enter a parse tree produced by SABParser#position.
def enterPosition(self, ctx:SABParser.PositionContext):
pass
# Exit a parse tree produced by SABParser#position.
def exitPosition(self, ctx:SABParser.PositionContext):
pass
# Enter a parse tree produced by SABParser#justified_pos.
def enterJustified_pos(self, ctx:SABParser.Justified_posContext):
pass
# Exit a parse tree produced by SABParser#justified_pos.
def exitJustified_pos(self, ctx:SABParser.Justified_posContext):
pass
# Enter a parse tree produced by SABParser#slideshow.
def enterSlideshow(self, ctx:SABParser.SlideshowContext):
pass
# Exit a parse tree produced by SABParser#slideshow.
def exitSlideshow(self, ctx:SABParser.SlideshowContext):
pass
# Enter a parse tree produced by SABParser#slidesource.
def enterSlidesource(self, ctx:SABParser.SlidesourceContext):
pass
# Exit a parse tree produced by SABParser#slidesource.
def exitSlidesource(self, ctx:SABParser.SlidesourceContext):
pass
# Enter a parse tree produced by SABParser#slidetime.
def enterSlidetime(self, ctx:SABParser.SlidetimeContext):
pass
# Exit a parse tree produced by SABParser#slidetime.
def exitSlidetime(self, ctx:SABParser.SlidetimeContext):
pass
# Enter a parse tree produced by SABParser#slideorder.
def enterSlideorder(self, ctx:SABParser.SlideorderContext):
pass
# Exit a parse tree produced by SABParser#slideorder.
def exitSlideorder(self, ctx:SABParser.SlideorderContext):
pass
# Enter a parse tree produced by SABParser#timetype.
def enterTimetype(self, ctx:SABParser.TimetypeContext):
pass
# Exit a parse tree produced by SABParser#timetype.
def exitTimetype(self, ctx:SABParser.TimetypeContext):
pass
# Enter a parse tree produced by SABParser#path.
def enterPath(self, ctx:SABParser.PathContext):
pass
# Exit a parse tree produced by SABParser#path.
def exitPath(self, ctx:SABParser.PathContext):
pass
# Enter a parse tree produced by SABParser#image.
def enterImage(self, ctx:SABParser.ImageContext):
pass
# Exit a parse tree produced by SABParser#image.
def exitImage(self, ctx:SABParser.ImageContext):
pass
# Enter a parse tree produced by SABParser#script.
def enterScript(self, ctx:SABParser.ScriptContext):
pass
# Exit a parse tree produced by SABParser#script.
def exitScript(self, ctx:SABParser.ScriptContext):
pass
# Enter a parse tree produced by SABParser#variable.
def enterVariable(self, ctx:SABParser.VariableContext):
pass
# Exit a parse tree produced by SABParser#variable.
def exitVariable(self, ctx:SABParser.VariableContext):
pass | SABListener.py | from antlr4 import *
if __name__ is not None and "." in __name__:
from .SABParser import SABParser
else:
from SABParser import SABParser
# This class defines a complete listener for a parse tree produced by SABParser.
class SABListener(ParseTreeListener):
# Enter a parse tree produced by SABParser#s.
def enterS(self, ctx:SABParser.SContext):
pass
# Exit a parse tree produced by SABParser#s.
def exitS(self, ctx:SABParser.SContext):
pass
# Enter a parse tree produced by SABParser#head.
def enterHead(self, ctx:SABParser.HeadContext):
pass
# Exit a parse tree produced by SABParser#head.
def exitHead(self, ctx:SABParser.HeadContext):
pass
# Enter a parse tree produced by SABParser#source.
def enterSource(self, ctx:SABParser.SourceContext):
pass
# Exit a parse tree produced by SABParser#source.
def exitSource(self, ctx:SABParser.SourceContext):
pass
# Enter a parse tree produced by SABParser#group.
def enterGroup(self, ctx:SABParser.GroupContext):
pass
# Exit a parse tree produced by SABParser#group.
def exitGroup(self, ctx:SABParser.GroupContext):
pass
# Enter a parse tree produced by SABParser#overlay_image.
def enterOverlay_image(self, ctx:SABParser.Overlay_imageContext):
pass
# Exit a parse tree produced by SABParser#overlay_image.
def exitOverlay_image(self, ctx:SABParser.Overlay_imageContext):
pass
# Enter a parse tree produced by SABParser#overlay_text.
def enterOverlay_text(self, ctx:SABParser.Overlay_textContext):
pass
# Exit a parse tree produced by SABParser#overlay_text.
def exitOverlay_text(self, ctx:SABParser.Overlay_textContext):
pass
# Enter a parse tree produced by SABParser#command.
def enterCommand(self, ctx:SABParser.CommandContext):
pass
# Exit a parse tree produced by SABParser#command.
def exitCommand(self, ctx:SABParser.CommandContext):
pass
# Enter a parse tree produced by SABParser#position.
def enterPosition(self, ctx:SABParser.PositionContext):
pass
# Exit a parse tree produced by SABParser#position.
def exitPosition(self, ctx:SABParser.PositionContext):
pass
# Enter a parse tree produced by SABParser#justified_pos.
def enterJustified_pos(self, ctx:SABParser.Justified_posContext):
pass
# Exit a parse tree produced by SABParser#justified_pos.
def exitJustified_pos(self, ctx:SABParser.Justified_posContext):
pass
# Enter a parse tree produced by SABParser#slideshow.
def enterSlideshow(self, ctx:SABParser.SlideshowContext):
pass
# Exit a parse tree produced by SABParser#slideshow.
def exitSlideshow(self, ctx:SABParser.SlideshowContext):
pass
# Enter a parse tree produced by SABParser#slidesource.
def enterSlidesource(self, ctx:SABParser.SlidesourceContext):
pass
# Exit a parse tree produced by SABParser#slidesource.
def exitSlidesource(self, ctx:SABParser.SlidesourceContext):
pass
# Enter a parse tree produced by SABParser#slidetime.
def enterSlidetime(self, ctx:SABParser.SlidetimeContext):
pass
# Exit a parse tree produced by SABParser#slidetime.
def exitSlidetime(self, ctx:SABParser.SlidetimeContext):
pass
# Enter a parse tree produced by SABParser#slideorder.
def enterSlideorder(self, ctx:SABParser.SlideorderContext):
pass
# Exit a parse tree produced by SABParser#slideorder.
def exitSlideorder(self, ctx:SABParser.SlideorderContext):
pass
# Enter a parse tree produced by SABParser#timetype.
def enterTimetype(self, ctx:SABParser.TimetypeContext):
pass
# Exit a parse tree produced by SABParser#timetype.
def exitTimetype(self, ctx:SABParser.TimetypeContext):
pass
# Enter a parse tree produced by SABParser#path.
def enterPath(self, ctx:SABParser.PathContext):
pass
# Exit a parse tree produced by SABParser#path.
def exitPath(self, ctx:SABParser.PathContext):
pass
# Enter a parse tree produced by SABParser#image.
def enterImage(self, ctx:SABParser.ImageContext):
pass
# Exit a parse tree produced by SABParser#image.
def exitImage(self, ctx:SABParser.ImageContext):
pass
# Enter a parse tree produced by SABParser#script.
def enterScript(self, ctx:SABParser.ScriptContext):
pass
# Exit a parse tree produced by SABParser#script.
def exitScript(self, ctx:SABParser.ScriptContext):
pass
# Enter a parse tree produced by SABParser#variable.
def enterVariable(self, ctx:SABParser.VariableContext):
pass
# Exit a parse tree produced by SABParser#variable.
def exitVariable(self, ctx:SABParser.VariableContext):
pass | 0.390708 | 0.05175 |
from flask.app import Flask
from sqlalchemy.sql.schema import ForeignKey
from .db import db, ma
from datetime import datetime as datetime2
from sqlalchemy.orm import relationship,backref
from .default_method_result import DefaultMethodResult
from sqlalchemy.dialects.postgresql import JSON, UUID
from sqlalchemy.sql.expression import distinct
from sqlalchemy import text
import json
class FOIRequestComment(db.Model):
# Name of the table in our database
__tablename__ = 'FOIRequestComments'
# Defining the columns
commentid = db.Column(db.Integer, primary_key=True,autoincrement=True)
ministryrequestid =db.Column(db.Integer, db.ForeignKey('FOIMinistryRequests.foiministryrequestid'))
version =db.Column(db.Integer, db.ForeignKey('FOIMinistryRequests.version'))
comment = db.Column(db.Text, unique=False, nullable=True)
taggedusers = db.Column(JSON, unique=False, nullable=True)
parentcommentid = db.Column(db.Integer, nullable=True)
isactive = db.Column(db.Boolean, unique=False, nullable=False)
created_at = db.Column(db.DateTime, default=datetime2.now())
createdby = db.Column(db.String(120), unique=False, nullable=True)
updated_at = db.Column(db.DateTime, nullable=True)
updatedby = db.Column(db.String(120), unique=False, nullable=True)
commenttypeid = db.Column(db.Integer, unique=False, nullable=False)
@classmethod
def savecomment(cls, commenttypeid, foirequestcomment, version, userid,commentcreatedate=None)->DefaultMethodResult:
parentcommentid = foirequestcomment["parentcommentid"] if 'parentcommentid' in foirequestcomment else None
taggedusers = foirequestcomment["taggedusers"] if 'taggedusers' in foirequestcomment else None
_createddate = datetime2.now().isoformat() if commentcreatedate is None else commentcreatedate
newcomment = FOIRequestComment(commenttypeid=commenttypeid, ministryrequestid=foirequestcomment["ministryrequestid"], version=version, comment=foirequestcomment["comment"], parentcommentid=parentcommentid, isactive=True, created_at=_createddate, createdby=userid,taggedusers=taggedusers)
db.session.add(newcomment)
db.session.commit()
return DefaultMethodResult(True,'Comment added',newcomment.commentid)
@classmethod
def disablecomment(cls, commentid, userid):
dbquery = db.session.query(FOIRequestComment)
comment = dbquery.filter_by(commentid=commentid)
if(comment.count() > 0) :
comment.update({FOIRequestComment.isactive:False, FOIRequestComment.updatedby:userid, FOIRequestComment.updated_at:datetime2.now()}, synchronize_session = False)
db.session.commit()
return DefaultMethodResult(True,'Comment disabled',commentid)
else:
return DefaultMethodResult(True,'No Comment found',commentid)
@classmethod
def updatecomment(cls, commentid, foirequestcomment, userid):
dbquery = db.session.query(FOIRequestComment)
comment = dbquery.filter_by(commentid=commentid)
taggedusers = foirequestcomment["taggedusers"] if 'taggedusers' in foirequestcomment else None
if(comment.count() > 0) :
comment.update({FOIRequestComment.isactive:True, FOIRequestComment.comment:foirequestcomment["comment"], FOIRequestComment.updatedby:userid, FOIRequestComment.updated_at:datetime2.now(),FOIRequestComment.taggedusers:taggedusers}, synchronize_session = False)
db.session.commit()
return DefaultMethodResult(True,'Comment updated',commentid)
else:
return DefaultMethodResult(True,'No Comment found',commentid)
@classmethod
def getcomments(cls, ministryrequestid)->DefaultMethodResult:
comment_schema = FOIRequestCommentSchema(many=True)
query = db.session.query(FOIRequestComment).filter_by(ministryrequestid=ministryrequestid, isactive = True).order_by(FOIRequestComment.commentid.desc()).all()
return comment_schema.dump(query)
class FOIRequestCommentSchema(ma.Schema):
class Meta:
fields = ('commentid', 'ministryrequestid', 'parentcommentid','comment', 'commenttypeid','commenttype','isactive','created_at','createdby','updated_at','updatedby','taggedusers') | request-management-api/request_api/models/FOIRequestComments.py | from flask.app import Flask
from sqlalchemy.sql.schema import ForeignKey
from .db import db, ma
from datetime import datetime as datetime2
from sqlalchemy.orm import relationship,backref
from .default_method_result import DefaultMethodResult
from sqlalchemy.dialects.postgresql import JSON, UUID
from sqlalchemy.sql.expression import distinct
from sqlalchemy import text
import json
class FOIRequestComment(db.Model):
# Name of the table in our database
__tablename__ = 'FOIRequestComments'
# Defining the columns
commentid = db.Column(db.Integer, primary_key=True,autoincrement=True)
ministryrequestid =db.Column(db.Integer, db.ForeignKey('FOIMinistryRequests.foiministryrequestid'))
version =db.Column(db.Integer, db.ForeignKey('FOIMinistryRequests.version'))
comment = db.Column(db.Text, unique=False, nullable=True)
taggedusers = db.Column(JSON, unique=False, nullable=True)
parentcommentid = db.Column(db.Integer, nullable=True)
isactive = db.Column(db.Boolean, unique=False, nullable=False)
created_at = db.Column(db.DateTime, default=datetime2.now())
createdby = db.Column(db.String(120), unique=False, nullable=True)
updated_at = db.Column(db.DateTime, nullable=True)
updatedby = db.Column(db.String(120), unique=False, nullable=True)
commenttypeid = db.Column(db.Integer, unique=False, nullable=False)
@classmethod
def savecomment(cls, commenttypeid, foirequestcomment, version, userid,commentcreatedate=None)->DefaultMethodResult:
parentcommentid = foirequestcomment["parentcommentid"] if 'parentcommentid' in foirequestcomment else None
taggedusers = foirequestcomment["taggedusers"] if 'taggedusers' in foirequestcomment else None
_createddate = datetime2.now().isoformat() if commentcreatedate is None else commentcreatedate
newcomment = FOIRequestComment(commenttypeid=commenttypeid, ministryrequestid=foirequestcomment["ministryrequestid"], version=version, comment=foirequestcomment["comment"], parentcommentid=parentcommentid, isactive=True, created_at=_createddate, createdby=userid,taggedusers=taggedusers)
db.session.add(newcomment)
db.session.commit()
return DefaultMethodResult(True,'Comment added',newcomment.commentid)
@classmethod
def disablecomment(cls, commentid, userid):
dbquery = db.session.query(FOIRequestComment)
comment = dbquery.filter_by(commentid=commentid)
if(comment.count() > 0) :
comment.update({FOIRequestComment.isactive:False, FOIRequestComment.updatedby:userid, FOIRequestComment.updated_at:datetime2.now()}, synchronize_session = False)
db.session.commit()
return DefaultMethodResult(True,'Comment disabled',commentid)
else:
return DefaultMethodResult(True,'No Comment found',commentid)
@classmethod
def updatecomment(cls, commentid, foirequestcomment, userid):
dbquery = db.session.query(FOIRequestComment)
comment = dbquery.filter_by(commentid=commentid)
taggedusers = foirequestcomment["taggedusers"] if 'taggedusers' in foirequestcomment else None
if(comment.count() > 0) :
comment.update({FOIRequestComment.isactive:True, FOIRequestComment.comment:foirequestcomment["comment"], FOIRequestComment.updatedby:userid, FOIRequestComment.updated_at:datetime2.now(),FOIRequestComment.taggedusers:taggedusers}, synchronize_session = False)
db.session.commit()
return DefaultMethodResult(True,'Comment updated',commentid)
else:
return DefaultMethodResult(True,'No Comment found',commentid)
@classmethod
def getcomments(cls, ministryrequestid)->DefaultMethodResult:
comment_schema = FOIRequestCommentSchema(many=True)
query = db.session.query(FOIRequestComment).filter_by(ministryrequestid=ministryrequestid, isactive = True).order_by(FOIRequestComment.commentid.desc()).all()
return comment_schema.dump(query)
class FOIRequestCommentSchema(ma.Schema):
class Meta:
fields = ('commentid', 'ministryrequestid', 'parentcommentid','comment', 'commenttypeid','commenttype','isactive','created_at','createdby','updated_at','updatedby','taggedusers') | 0.395835 | 0.05328 |
import urwid
from console.ui.images.pane import ImagePane
from console.ui.containers.pane import ContainerPane
from console.widgets.tabs import Tab, TabFrame
from console.modes import modemap
class ImagesTab(Tab):
label = "images"
mode = {
'ctrl n': ('next-image', 'set focus on the next image'),
'ctrl p': ('prev-image', 'set focus on the previous image'),
'ctrl d': ('delete-image', 'delete the selected image(s)'),
'ctrl y': ('view-history', 'view history of selected image'),
'ctrl a': ('toggle-show-all', 'toggle whether all image layers are shown'),
'ctrl t': ('tag-image', 'tag the selected image'),
'ctrl b': ('push-image', 'push the selected image'),
'ctrl v': ('inspect-details', 'inspect the selected image'),
'ctrl k': ('set-mark', 'select current image'),
'ctrl u': ('unmark-images', 'unmark all selected images'),
'ctrl l': ('pull-image', 'pull image from repository'),
}
def get_content(self):
return ImagePane()
class ContainersTab(Tab):
label = "containers"
mode = {
'ctrl n': ('next-container', 'set focus on the next container'),
'ctrl p': ('prev-container', 'set focus on the previous container'),
'ctrl d': ('delete-container', 'delete the selected container(s)'),
'ctrl a': ('toggle-show-all', 'toggle whether all containers are shown'),
'ctrl t': ('commit-container', 'commit the selected container'),
'ctrl v': ('inspect-details', 'inspect the selected container'),
'ctrl k': ('set-mark', 'select current container'),
'ctrl r': ('run-container(s)', 'run the selected container(s) in screen or tmux'),
'ctrl u': ('unmark-containers', 'unmark all selected containers'),
'ctrl e': ('rename-container', 'rename the selected container'),
'ctrl f': ('inspect-changes', 'inspect changes on container filesystem'),
'ctrl g': ('restart-container', 'restart the selected container'),
'ctrl l': ('kill-container', 'kill the selected container'),
'ctrl x': ('pause-container', 'pause the selected container'),
'ctrl o': ('unpause-container', 'unpause the selected container'),
'ctrl w': ('start-container', 'start the selected container'),
'ctrl y': ('stop-container', 'stop the selected container'),
'shift tab': ('top-container', 'display running processes'),
}
def get_content(self):
return ContainerPane()
class InfoTab(Tab):
label = "info"
class RootFrame(TabFrame):
"""
The main frame of the application. It contains the tab header and the main
content pane. Flipping through the tabs should cycle the content pane with
the content of each respective tab content.
"""
def __init__(self):
tabs = (ContainersTab(), ImagesTab(),)
TabFrame.__init__(self, tabs)
def make_header(self, tabs):
"""
Generate the frame header.
"""
columns = urwid.Columns([])
columns.title = urwid.Text("docker-console 0.1.0")
columns.tabs = TabFrame.make_header(self, tabs)
columns.contents = [
(columns.title, columns.options('weight', 1)),
(columns.tabs, columns.options('weight', 2)),
]
return columns | console/ui/layout.py | import urwid
from console.ui.images.pane import ImagePane
from console.ui.containers.pane import ContainerPane
from console.widgets.tabs import Tab, TabFrame
from console.modes import modemap
class ImagesTab(Tab):
label = "images"
mode = {
'ctrl n': ('next-image', 'set focus on the next image'),
'ctrl p': ('prev-image', 'set focus on the previous image'),
'ctrl d': ('delete-image', 'delete the selected image(s)'),
'ctrl y': ('view-history', 'view history of selected image'),
'ctrl a': ('toggle-show-all', 'toggle whether all image layers are shown'),
'ctrl t': ('tag-image', 'tag the selected image'),
'ctrl b': ('push-image', 'push the selected image'),
'ctrl v': ('inspect-details', 'inspect the selected image'),
'ctrl k': ('set-mark', 'select current image'),
'ctrl u': ('unmark-images', 'unmark all selected images'),
'ctrl l': ('pull-image', 'pull image from repository'),
}
def get_content(self):
return ImagePane()
class ContainersTab(Tab):
label = "containers"
mode = {
'ctrl n': ('next-container', 'set focus on the next container'),
'ctrl p': ('prev-container', 'set focus on the previous container'),
'ctrl d': ('delete-container', 'delete the selected container(s)'),
'ctrl a': ('toggle-show-all', 'toggle whether all containers are shown'),
'ctrl t': ('commit-container', 'commit the selected container'),
'ctrl v': ('inspect-details', 'inspect the selected container'),
'ctrl k': ('set-mark', 'select current container'),
'ctrl r': ('run-container(s)', 'run the selected container(s) in screen or tmux'),
'ctrl u': ('unmark-containers', 'unmark all selected containers'),
'ctrl e': ('rename-container', 'rename the selected container'),
'ctrl f': ('inspect-changes', 'inspect changes on container filesystem'),
'ctrl g': ('restart-container', 'restart the selected container'),
'ctrl l': ('kill-container', 'kill the selected container'),
'ctrl x': ('pause-container', 'pause the selected container'),
'ctrl o': ('unpause-container', 'unpause the selected container'),
'ctrl w': ('start-container', 'start the selected container'),
'ctrl y': ('stop-container', 'stop the selected container'),
'shift tab': ('top-container', 'display running processes'),
}
def get_content(self):
return ContainerPane()
class InfoTab(Tab):
label = "info"
class RootFrame(TabFrame):
"""
The main frame of the application. It contains the tab header and the main
content pane. Flipping through the tabs should cycle the content pane with
the content of each respective tab content.
"""
def __init__(self):
tabs = (ContainersTab(), ImagesTab(),)
TabFrame.__init__(self, tabs)
def make_header(self, tabs):
"""
Generate the frame header.
"""
columns = urwid.Columns([])
columns.title = urwid.Text("docker-console 0.1.0")
columns.tabs = TabFrame.make_header(self, tabs)
columns.contents = [
(columns.title, columns.options('weight', 1)),
(columns.tabs, columns.options('weight', 2)),
]
return columns | 0.467089 | 0.223144 |
import unittest
# internal distend imports
from distend import serializer
class TestSerializer(unittest.TestCase):
def test_get_replace_function(self):
"""test serializer.get_replace_function,
returns replace_multiple function
"""
replace_multiple_true = True
replace_multiple_false = False
replace_true = serializer.get_replace_function(replace_multiple_true)
replace_false = serializer.get_replace_function(replace_multiple_false)
self.assertTrue(callable(replace_true), 'return is not callable')
self.assertTrue(callable(replace_false), 'return is not callable')
self.assertEqual(replace_true.__name__, 'replace_multiple',
"not replace_multiple when flag is true")
self.assertEqual(replace_false.__name__, 'replace_single',
"not replace_single when flag is false")
def test_get_pre_postpend_function_with_list_prepend_list_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_list_prepend_list_postpend
"""
prepend, postpend = (['1972', '1973'], ['1984', '1985'])
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_list_prepend_list_postpend',
"expected fuse_list_prepend_list_postpend")
def test_get_pre_postpend_function_with_list_prepend_str_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_list_prepend_str_postpend
"""
prepend, postpend = (['1972', '1973'], '1984')
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_list_prepend_str_postpend',
"expected fuse_list_prepend_str_postpend")
def test_get_pre_postpend_function_with_str_prepend_str_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_str_prepend_str_postpend
"""
prepend, postpend = ('1972', '1984')
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_str_prepend_str_postpend',
"expected fuse_str_prepend_str_postpend")
def test_get_pre_postpend_function_with_str_prepend_list_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_str_prepend_list_postpend
"""
prepend, postpend = ('1972', ['1984', '1985'])
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_str_prepend_list_postpend',
"expected fuse_str_prepend_list_postpend")
def test_get_pre_postpend_function_with_list_prepend_no_postpend(self):
"""test serializer.get_pre_postpend_function
returns fuse_list_prepend_no_postpend
"""
prepend, postpend = (['1972', '1973'], '')
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_list_prepend_no_postpend',
"expected fuse_list_prepend_no_postpend")
def test_get_pre_postpend_function_with_str_prepend_no_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_str_prepend_no_postpend
"""
prepend, postpend = ('1972', '')
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_str_prepend_no_postpend',
"expected fuse_str_prepend_no_postpend")
def test_get_pre_postpend_function_with_no_prepend_list_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_no_prepend_list_postpend
"""
prepend, postpend = ('', ['1984', '1985'])
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_no_prepend_list_postpend',
"expected fuse_no_prepend_list_postpend")
def test_get_pre_postpend_function_with_no_prepend_str_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_no_prepend_str_postpend
"""
prepend, postpend = ('', '1984')
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_no_prepend_str_postpend',
"expected fuse_no_prepend_str_postpend")
def test_get_pre_postpend_function_with_no_prepend_no_postpend(self):
"""test serializer.get_pre_postpend_function, returns none"""
prepend, postpend = ('', '')
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertIsNone(fuse, "expected none")
if __name__ == '__main__':
unittest.main() | tests/test_serializer.py | import unittest
# internal distend imports
from distend import serializer
class TestSerializer(unittest.TestCase):
def test_get_replace_function(self):
"""test serializer.get_replace_function,
returns replace_multiple function
"""
replace_multiple_true = True
replace_multiple_false = False
replace_true = serializer.get_replace_function(replace_multiple_true)
replace_false = serializer.get_replace_function(replace_multiple_false)
self.assertTrue(callable(replace_true), 'return is not callable')
self.assertTrue(callable(replace_false), 'return is not callable')
self.assertEqual(replace_true.__name__, 'replace_multiple',
"not replace_multiple when flag is true")
self.assertEqual(replace_false.__name__, 'replace_single',
"not replace_single when flag is false")
def test_get_pre_postpend_function_with_list_prepend_list_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_list_prepend_list_postpend
"""
prepend, postpend = (['1972', '1973'], ['1984', '1985'])
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_list_prepend_list_postpend',
"expected fuse_list_prepend_list_postpend")
def test_get_pre_postpend_function_with_list_prepend_str_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_list_prepend_str_postpend
"""
prepend, postpend = (['1972', '1973'], '1984')
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_list_prepend_str_postpend',
"expected fuse_list_prepend_str_postpend")
def test_get_pre_postpend_function_with_str_prepend_str_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_str_prepend_str_postpend
"""
prepend, postpend = ('1972', '1984')
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_str_prepend_str_postpend',
"expected fuse_str_prepend_str_postpend")
def test_get_pre_postpend_function_with_str_prepend_list_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_str_prepend_list_postpend
"""
prepend, postpend = ('1972', ['1984', '1985'])
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_str_prepend_list_postpend',
"expected fuse_str_prepend_list_postpend")
def test_get_pre_postpend_function_with_list_prepend_no_postpend(self):
"""test serializer.get_pre_postpend_function
returns fuse_list_prepend_no_postpend
"""
prepend, postpend = (['1972', '1973'], '')
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_list_prepend_no_postpend',
"expected fuse_list_prepend_no_postpend")
def test_get_pre_postpend_function_with_str_prepend_no_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_str_prepend_no_postpend
"""
prepend, postpend = ('1972', '')
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_str_prepend_no_postpend',
"expected fuse_str_prepend_no_postpend")
def test_get_pre_postpend_function_with_no_prepend_list_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_no_prepend_list_postpend
"""
prepend, postpend = ('', ['1984', '1985'])
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_no_prepend_list_postpend',
"expected fuse_no_prepend_list_postpend")
def test_get_pre_postpend_function_with_no_prepend_str_postpend(self):
"""test serializer.get_pre_postpend_function,
returns fuse_no_prepend_str_postpend
"""
prepend, postpend = ('', '1984')
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertTrue(callable(fuse), 'return is not callable')
self.assertEqual(fuse.__name__, 'fuse_no_prepend_str_postpend',
"expected fuse_no_prepend_str_postpend")
def test_get_pre_postpend_function_with_no_prepend_no_postpend(self):
"""test serializer.get_pre_postpend_function, returns none"""
prepend, postpend = ('', '')
fuse = serializer.get_pre_postpend_function(prepend, postpend)
self.assertIsNone(fuse, "expected none")
if __name__ == '__main__':
unittest.main() | 0.643889 | 0.292734 |
from discord.ext.commands.errors import BadArgument, MissingRequiredArgument, CommandNotFound
import asyncio
class EventRegistry:
def __init__(self, instance):
self.instance = instance
self.add_handler(self._on_message_process_commands, 1000, event_name="on_message")
self.add_handler(self._on_ready_log, 1000, event_name="on_ready")
self.add_handler(self._on_command_error, 1000, event_name="on_command_error")
def add_handler(self, handler, priority, event_name=None):
if not event_name:
event_name = handler.__name__
if not hasattr(self, event_name):
setattr(self, event_name, [])
async def super_handler(*args, **kwargs):
for h, p in getattr(self, event_name):
try:
if await h(*args, **kwargs):
return
except Exception as e:
self.instance.logger.exception("Error running event {} on handler {}".format(event_name, handler.__name__))
super_handler.__name__ = event_name
self.instance.discord_bot.event(super_handler)
handler_list = getattr(self, event_name)
handler_list.append((handler, priority))
handler_list.sort(key=lambda x: x[1], reverse=True)
# Default event handlers
async def _on_message_process_commands(self, message):
await self.instance.discord_bot.process_commands(message)
async def _on_ready_log(self):
self.instance.logger.info('Successfully logged in. Name: "{0.name}". ID: {0.id}'.format(self.instance.discord_bot.user))
async def _on_command_error(self, ctx, error):
to_delete = []
raise_error = False
if isinstance(error, BadArgument) or isinstance(error, MissingRequiredArgument):
command = next(filter(lambda x: x.name == ctx.invoked_with, ctx.bot.commands))
to_delete.append(await ctx.send("**Error:** *{}*\n*This message will delete automatically*".format(error.args[0])))
for page in await ctx.bot.formatter.format_help_for(ctx, command):
to_delete.append(await ctx.send(page))
elif isinstance(error, CommandNotFound):
self.instance.logger.debug(error.args[0])
else:
to_delete.append(await ctx.send("Unknown error occurred when processing command *{}*.\n*This message will delete automatically*".format(ctx.invoked_with)))
raise_error = True
try:
if to_delete:
await asyncio.sleep(15)
await ctx.channel.delete_messages(to_delete)
except Exception as e:
pass
if raise_error:
raise Exception("Command {} raised an exception".format(ctx.invoked_with)) from error | src/bot/events.py | from discord.ext.commands.errors import BadArgument, MissingRequiredArgument, CommandNotFound
import asyncio
class EventRegistry:
def __init__(self, instance):
self.instance = instance
self.add_handler(self._on_message_process_commands, 1000, event_name="on_message")
self.add_handler(self._on_ready_log, 1000, event_name="on_ready")
self.add_handler(self._on_command_error, 1000, event_name="on_command_error")
def add_handler(self, handler, priority, event_name=None):
if not event_name:
event_name = handler.__name__
if not hasattr(self, event_name):
setattr(self, event_name, [])
async def super_handler(*args, **kwargs):
for h, p in getattr(self, event_name):
try:
if await h(*args, **kwargs):
return
except Exception as e:
self.instance.logger.exception("Error running event {} on handler {}".format(event_name, handler.__name__))
super_handler.__name__ = event_name
self.instance.discord_bot.event(super_handler)
handler_list = getattr(self, event_name)
handler_list.append((handler, priority))
handler_list.sort(key=lambda x: x[1], reverse=True)
# Default event handlers
async def _on_message_process_commands(self, message):
await self.instance.discord_bot.process_commands(message)
async def _on_ready_log(self):
self.instance.logger.info('Successfully logged in. Name: "{0.name}". ID: {0.id}'.format(self.instance.discord_bot.user))
async def _on_command_error(self, ctx, error):
to_delete = []
raise_error = False
if isinstance(error, BadArgument) or isinstance(error, MissingRequiredArgument):
command = next(filter(lambda x: x.name == ctx.invoked_with, ctx.bot.commands))
to_delete.append(await ctx.send("**Error:** *{}*\n*This message will delete automatically*".format(error.args[0])))
for page in await ctx.bot.formatter.format_help_for(ctx, command):
to_delete.append(await ctx.send(page))
elif isinstance(error, CommandNotFound):
self.instance.logger.debug(error.args[0])
else:
to_delete.append(await ctx.send("Unknown error occurred when processing command *{}*.\n*This message will delete automatically*".format(ctx.invoked_with)))
raise_error = True
try:
if to_delete:
await asyncio.sleep(15)
await ctx.channel.delete_messages(to_delete)
except Exception as e:
pass
if raise_error:
raise Exception("Command {} raised an exception".format(ctx.invoked_with)) from error | 0.510985 | 0.061989 |
import sys
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtGui import QPainter, QColor, QPen
from PyQt5.QtCore import Qt, QRect
class Cardinal(QWidget):
def __init__(self):
super().__init__()
self.setGeometry(280, 170, 600, 600)
self.scale = 3
self.offset_X = 0
self.offset_Y = 0
self.show_text = True
self.centerY = self.height() / 2
self.centerX = self.width() / 2
self.qp = QPainter()
self.points = []
self.setFixedSize(self.width(), self.height())
self.center_point = None
self.detected = []
self.active = True
def deactivate(self):
"""
Hides and set the cardinal as inactive
:return:
"""
self.active = False
self.hide()
def activate(self):
"""
Shows and set the cardinal as active
:return:
"""
self.active = True
self.show()
def keyPressEvent(self, event):
"""
Handles the offset in the X and Y axis depends on the pressed key
and the scale
:param event:
:return:
"""
if event.key() == Qt.Key_Left:
self.offset_X += 1
self.repaint()
elif event.key() == Qt.Key_Up:
self.offset_Y += 1
self.repaint()
elif event.key() == Qt.Key_Down:
self.offset_Y -= 1
self.repaint()
elif event.key() == Qt.Key_Right:
self.offset_X -= 1
self.repaint()
elif event.key() == Qt.Key_Plus or event.key() == Qt.Key_Equal:
self.scale += 0.5
self.repaint()
elif event.key() == Qt.Key_Minus:
self.scale -= 0.5
self.repaint()
def wheelEvent(self, event):
"""
updates the scale attribute based on the wheel turning.
:param event:
:return:
"""
if event.angleDelta().y() > 0:
self.scale += 0.5
else:
if self.scale > 0:
self.scale -= 0.5
self.repaint()
def update(self, detected=None):
if detected:
self.detected = detected
self.repaint()
def translate_point(self, x, y, label_offset=0):
return self.centerX-label_offset+(x+self.offset_X)*self.scale, self.centerY-(y-self.offset_Y)*self.scale
def draw_tasks(self):
pass
def draw_rectangle(self, point1=None, point2=None):
t_point1 = self.translate_point(*point1)
width = point2[0] - point1[0]
height = point2[1] - point1[1]
self.qp.drawRect(*t_point1, width, height)
def draw_point(self, x, y, uiid=None):
self.qp.drawPoint(*self.translate_point(x, y))
if self.show_text:
label = "X:" + str(x) + " Y:" + str(y)
self.qp.drawText(*self.translate_point(x, y, label_offset=len(label)), label)
def paintEvent(self, event):
if self.active:
self.draw_cardinal_canvas(event)
def draw_cardinal_canvas(self, event):
pen = QPen()
pen.setWidth(2)
pen.setColor(QColor(0, 0, 0))
self.centerY = self.width() / 2
self.centerX = self.height() / 2
self.qp.begin(self)
self.qp.setPen(pen)
self.qp.fillRect(QRect(0, 0, self.height(), self.width()), Qt.white)
self.qp.drawLine(self.width() // 2 + self.offset_X * self.scale, 0,
self.width() // 2 + self.offset_X * self.scale, self.height())
self.qp.drawLine(0, self.height() // 2 + self.offset_Y * self.scale,
self.width(), self.height() // 2 + self.offset_Y * self.scale)
pen.setColor(QColor(156, 91, 28))
self.qp.setPen(pen)
# self.draw_rectangle(qp)
pen.setColor(QColor(0, 179, 0))
self.qp.setPen(pen)
if self.center_point:
self.draw_point(*self.center_point)
for i, p in enumerate(self.points):
if p in self.detected:
pen.setColor(QColor(153, 0, 0))
self.qp.setPen(pen)
self.draw_point(*p)
else:
pen.setColor(QColor(0, 69, 88))
self.qp.setPen(pen)
self.draw_point(*p)
self.qp.end() | core/cardinal.py | import sys
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtGui import QPainter, QColor, QPen
from PyQt5.QtCore import Qt, QRect
class Cardinal(QWidget):
def __init__(self):
super().__init__()
self.setGeometry(280, 170, 600, 600)
self.scale = 3
self.offset_X = 0
self.offset_Y = 0
self.show_text = True
self.centerY = self.height() / 2
self.centerX = self.width() / 2
self.qp = QPainter()
self.points = []
self.setFixedSize(self.width(), self.height())
self.center_point = None
self.detected = []
self.active = True
def deactivate(self):
"""
Hides and set the cardinal as inactive
:return:
"""
self.active = False
self.hide()
def activate(self):
"""
Shows and set the cardinal as active
:return:
"""
self.active = True
self.show()
def keyPressEvent(self, event):
"""
Handles the offset in the X and Y axis depends on the pressed key
and the scale
:param event:
:return:
"""
if event.key() == Qt.Key_Left:
self.offset_X += 1
self.repaint()
elif event.key() == Qt.Key_Up:
self.offset_Y += 1
self.repaint()
elif event.key() == Qt.Key_Down:
self.offset_Y -= 1
self.repaint()
elif event.key() == Qt.Key_Right:
self.offset_X -= 1
self.repaint()
elif event.key() == Qt.Key_Plus or event.key() == Qt.Key_Equal:
self.scale += 0.5
self.repaint()
elif event.key() == Qt.Key_Minus:
self.scale -= 0.5
self.repaint()
def wheelEvent(self, event):
"""
updates the scale attribute based on the wheel turning.
:param event:
:return:
"""
if event.angleDelta().y() > 0:
self.scale += 0.5
else:
if self.scale > 0:
self.scale -= 0.5
self.repaint()
def update(self, detected=None):
if detected:
self.detected = detected
self.repaint()
def translate_point(self, x, y, label_offset=0):
return self.centerX-label_offset+(x+self.offset_X)*self.scale, self.centerY-(y-self.offset_Y)*self.scale
def draw_tasks(self):
pass
def draw_rectangle(self, point1=None, point2=None):
t_point1 = self.translate_point(*point1)
width = point2[0] - point1[0]
height = point2[1] - point1[1]
self.qp.drawRect(*t_point1, width, height)
def draw_point(self, x, y, uiid=None):
self.qp.drawPoint(*self.translate_point(x, y))
if self.show_text:
label = "X:" + str(x) + " Y:" + str(y)
self.qp.drawText(*self.translate_point(x, y, label_offset=len(label)), label)
def paintEvent(self, event):
if self.active:
self.draw_cardinal_canvas(event)
def draw_cardinal_canvas(self, event):
pen = QPen()
pen.setWidth(2)
pen.setColor(QColor(0, 0, 0))
self.centerY = self.width() / 2
self.centerX = self.height() / 2
self.qp.begin(self)
self.qp.setPen(pen)
self.qp.fillRect(QRect(0, 0, self.height(), self.width()), Qt.white)
self.qp.drawLine(self.width() // 2 + self.offset_X * self.scale, 0,
self.width() // 2 + self.offset_X * self.scale, self.height())
self.qp.drawLine(0, self.height() // 2 + self.offset_Y * self.scale,
self.width(), self.height() // 2 + self.offset_Y * self.scale)
pen.setColor(QColor(156, 91, 28))
self.qp.setPen(pen)
# self.draw_rectangle(qp)
pen.setColor(QColor(0, 179, 0))
self.qp.setPen(pen)
if self.center_point:
self.draw_point(*self.center_point)
for i, p in enumerate(self.points):
if p in self.detected:
pen.setColor(QColor(153, 0, 0))
self.qp.setPen(pen)
self.draw_point(*p)
else:
pen.setColor(QColor(0, 69, 88))
self.qp.setPen(pen)
self.draw_point(*p)
self.qp.end() | 0.443841 | 0.254657 |
import hashlib
import base64
from Crypto import Random
from Crypto.Cipher import AES
from lib.crypt import crypt
from Crypto.Cipher import Blowfish
class AESCipher:
def __init__(self, key, src_filepath, dst_filepath):
self.src_filepath = src_filepath
self.dst_filepath = dst_filepath
self.bs = AES.block_size
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self):
with open(self.src_filepath, "rb") as f:
plaintext_base64 = base64.b64encode(f.read())
raw = self._pad(str(plaintext_base64, "latin-1"))
iv = Random.new().read(self.bs)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
with open(self.dst_filepath, "wb") as f:
f.write(iv + cipher.encrypt(bytes(raw, "latin-1")))
def decrypt(self):
with open(self.src_filepath, "rb") as f:
enc = f.read()
iv = enc[:self.bs]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
with open(self.dst_filepath, "wb") as f:
decrypted_base64 = cipher.decrypt(enc[self.bs:])
f.write(base64.b64decode(bytes(self._unpad(str(decrypted_base64, "latin-1")), "latin-1")))
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
class CryptCipher:
def __init__(self, key, src_filepath, dst_filepath):
self.src_filepath = src_filepath
self.dst_filepath = dst_filepath
self.key = key
def encrypt(self):
crypt.XORFile(self.src_filepath, self.key).encrypt(self.dst_filepath)
def decrypt(self):
crypt.XORFile(self.src_filepath, self.key).decrypt(self.dst_filepath)
class BlowfishCipher:
def __init__(self, key, src_filepath, dst_filepath):
self.src_filepath = src_filepath
self.dst_filepath = dst_filepath
self.bs = Blowfish.block_size
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self):
with open(self.src_filepath, "rb") as f:
plaintext_base64 = base64.b64encode(f.read())
raw = self._pad(str(plaintext_base64, "latin-1"))
iv = Random.new().read(self.bs)
cipher = Blowfish.new(self.key, Blowfish.MODE_CBC, iv)
with open(self.dst_filepath, "wb") as f:
f.write(iv + cipher.encrypt(bytes(raw, "latin-1")))
def decrypt(self):
with open(self.src_filepath, "rb") as f:
enc = f.read()
iv = enc[:self.bs]
cipher = Blowfish.new(self.key, Blowfish.MODE_CBC, iv)
with open(self.dst_filepath, "wb") as f:
decrypted_base64 = cipher.decrypt(enc[self.bs:])
f.write(base64.b64decode(bytes(self._unpad(str(decrypted_base64, "latin-1")), "latin-1")))
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])] | algorithms.py | import hashlib
import base64
from Crypto import Random
from Crypto.Cipher import AES
from lib.crypt import crypt
from Crypto.Cipher import Blowfish
class AESCipher:
def __init__(self, key, src_filepath, dst_filepath):
self.src_filepath = src_filepath
self.dst_filepath = dst_filepath
self.bs = AES.block_size
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self):
with open(self.src_filepath, "rb") as f:
plaintext_base64 = base64.b64encode(f.read())
raw = self._pad(str(plaintext_base64, "latin-1"))
iv = Random.new().read(self.bs)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
with open(self.dst_filepath, "wb") as f:
f.write(iv + cipher.encrypt(bytes(raw, "latin-1")))
def decrypt(self):
with open(self.src_filepath, "rb") as f:
enc = f.read()
iv = enc[:self.bs]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
with open(self.dst_filepath, "wb") as f:
decrypted_base64 = cipher.decrypt(enc[self.bs:])
f.write(base64.b64decode(bytes(self._unpad(str(decrypted_base64, "latin-1")), "latin-1")))
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
class CryptCipher:
def __init__(self, key, src_filepath, dst_filepath):
self.src_filepath = src_filepath
self.dst_filepath = dst_filepath
self.key = key
def encrypt(self):
crypt.XORFile(self.src_filepath, self.key).encrypt(self.dst_filepath)
def decrypt(self):
crypt.XORFile(self.src_filepath, self.key).decrypt(self.dst_filepath)
class BlowfishCipher:
def __init__(self, key, src_filepath, dst_filepath):
self.src_filepath = src_filepath
self.dst_filepath = dst_filepath
self.bs = Blowfish.block_size
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self):
with open(self.src_filepath, "rb") as f:
plaintext_base64 = base64.b64encode(f.read())
raw = self._pad(str(plaintext_base64, "latin-1"))
iv = Random.new().read(self.bs)
cipher = Blowfish.new(self.key, Blowfish.MODE_CBC, iv)
with open(self.dst_filepath, "wb") as f:
f.write(iv + cipher.encrypt(bytes(raw, "latin-1")))
def decrypt(self):
with open(self.src_filepath, "rb") as f:
enc = f.read()
iv = enc[:self.bs]
cipher = Blowfish.new(self.key, Blowfish.MODE_CBC, iv)
with open(self.dst_filepath, "wb") as f:
decrypted_base64 = cipher.decrypt(enc[self.bs:])
f.write(base64.b64decode(bytes(self._unpad(str(decrypted_base64, "latin-1")), "latin-1")))
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])] | 0.591605 | 0.162181 |
import torch
import torch.nn.functional as F
import dgl
from dgl.nn import GraphConv, AvgPooling, MaxPooling
from utils import topk, get_batch_id
class SAGPool(torch.nn.Module):
"""The Self-Attention Pooling layer in paper
`Self Attention Graph Pooling <https://arxiv.org/pdf/1904.08082.pdf>`
Args:
in_dim (int): The dimension of node feature.
ratio (float, optional): The pool ratio which determines the amount of nodes
remain after pooling. (default: :obj:`0.5`)
conv_op (torch.nn.Module, optional): The graph convolution layer in dgl used to
compute scale for each node. (default: :obj:`dgl.nn.GraphConv`)
non_linearity (Callable, optional): The non-linearity function, a pytorch function.
(default: :obj:`torch.tanh`)
"""
def __init__(self, in_dim:int, ratio=0.5, conv_op=GraphConv, non_linearity=torch.tanh):
super(SAGPool, self).__init__()
self.in_dim = in_dim
self.ratio = ratio
self.score_layer = conv_op(in_dim, 1)
self.non_linearity = non_linearity
def forward(self, graph:dgl.DGLGraph, feature:torch.Tensor):
score = self.score_layer(graph, feature).squeeze()
perm, next_batch_num_nodes = topk(score, self.ratio, get_batch_id(graph.batch_num_nodes()), graph.batch_num_nodes())
feature = feature[perm] * self.non_linearity(score[perm]).view(-1, 1)
graph = dgl.node_subgraph(graph, perm)
# node_subgraph currently does not support batch-graph,
# the 'batch_num_nodes' of the result subgraph is None.
# So we manually set the 'batch_num_nodes' here.
# Since global pooling has nothing to do with 'batch_num_edges',
# we can leave it to be None or unchanged.
graph.set_batch_num_nodes(next_batch_num_nodes)
return graph, feature, perm
class ConvPoolBlock(torch.nn.Module):
"""A combination of GCN layer and SAGPool layer,
followed by a concatenated (mean||sum) readout operation.
"""
def __init__(self, in_dim:int, out_dim:int, pool_ratio=0.8):
super(ConvPoolBlock, self).__init__()
self.conv = GraphConv(in_dim, out_dim)
self.pool = SAGPool(out_dim, ratio=pool_ratio)
self.avgpool = AvgPooling()
self.maxpool = MaxPooling()
def forward(self, graph, feature):
out = F.relu(self.conv(graph, feature))
graph, out, _ = self.pool(graph, out)
g_out = torch.cat([self.avgpool(graph, out), self.maxpool(graph, out)], dim=-1)
return graph, out, g_out | examples/pytorch/sagpool/layer.py | import torch
import torch.nn.functional as F
import dgl
from dgl.nn import GraphConv, AvgPooling, MaxPooling
from utils import topk, get_batch_id
class SAGPool(torch.nn.Module):
"""The Self-Attention Pooling layer in paper
`Self Attention Graph Pooling <https://arxiv.org/pdf/1904.08082.pdf>`
Args:
in_dim (int): The dimension of node feature.
ratio (float, optional): The pool ratio which determines the amount of nodes
remain after pooling. (default: :obj:`0.5`)
conv_op (torch.nn.Module, optional): The graph convolution layer in dgl used to
compute scale for each node. (default: :obj:`dgl.nn.GraphConv`)
non_linearity (Callable, optional): The non-linearity function, a pytorch function.
(default: :obj:`torch.tanh`)
"""
def __init__(self, in_dim:int, ratio=0.5, conv_op=GraphConv, non_linearity=torch.tanh):
super(SAGPool, self).__init__()
self.in_dim = in_dim
self.ratio = ratio
self.score_layer = conv_op(in_dim, 1)
self.non_linearity = non_linearity
def forward(self, graph:dgl.DGLGraph, feature:torch.Tensor):
score = self.score_layer(graph, feature).squeeze()
perm, next_batch_num_nodes = topk(score, self.ratio, get_batch_id(graph.batch_num_nodes()), graph.batch_num_nodes())
feature = feature[perm] * self.non_linearity(score[perm]).view(-1, 1)
graph = dgl.node_subgraph(graph, perm)
# node_subgraph currently does not support batch-graph,
# the 'batch_num_nodes' of the result subgraph is None.
# So we manually set the 'batch_num_nodes' here.
# Since global pooling has nothing to do with 'batch_num_edges',
# we can leave it to be None or unchanged.
graph.set_batch_num_nodes(next_batch_num_nodes)
return graph, feature, perm
class ConvPoolBlock(torch.nn.Module):
"""A combination of GCN layer and SAGPool layer,
followed by a concatenated (mean||sum) readout operation.
"""
def __init__(self, in_dim:int, out_dim:int, pool_ratio=0.8):
super(ConvPoolBlock, self).__init__()
self.conv = GraphConv(in_dim, out_dim)
self.pool = SAGPool(out_dim, ratio=pool_ratio)
self.avgpool = AvgPooling()
self.maxpool = MaxPooling()
def forward(self, graph, feature):
out = F.relu(self.conv(graph, feature))
graph, out, _ = self.pool(graph, out)
g_out = torch.cat([self.avgpool(graph, out), self.maxpool(graph, out)], dim=-1)
return graph, out, g_out | 0.963239 | 0.649745 |
from hypothesis import given
from tests.utils import (KeysView,
KeysViewsPair,
KeysViewsTriplet,
is_left_subtree_less_than_right_subtree,
to_height,
to_max_binary_tree_height,
to_min_binary_tree_height)
from . import strategies
@given(strategies.keys_views_pairs)
def test_type(keys_views_pair: KeysViewsPair) -> None:
left_keys_view, right_keys_view = keys_views_pair
result = left_keys_view & right_keys_view
assert isinstance(result, type(left_keys_view))
@given(strategies.keys_views_pairs)
def test_properties(keys_views_pair: KeysViewsPair) -> None:
left_keys_view, right_keys_view = keys_views_pair
result = left_keys_view & right_keys_view
result_tree = result.tree
assert len(result) <= min(len(left_keys_view), len(right_keys_view))
assert (to_min_binary_tree_height(result_tree)
<= to_height(result_tree)
<= min(to_height(left_keys_view.tree),
to_height(right_keys_view.tree),
to_max_binary_tree_height(result_tree)))
assert all(key in left_keys_view and key in right_keys_view
for key in result)
assert (not result
or not result.isdisjoint(left_keys_view)
and not result.isdisjoint(right_keys_view))
assert is_left_subtree_less_than_right_subtree(result_tree)
@given(strategies.keys_views)
def test_idempotence(keys_view: KeysView) -> None:
result = keys_view & keys_view
assert result == keys_view
@given(strategies.empty_keys_views_with_keys_views)
def test_left_absorbing_element(empty_tree_with_tree: KeysViewsPair) -> None:
empty_tree, keys_view = empty_tree_with_tree
result = empty_tree & keys_view
assert len(result) == 0
assert not result
@given(strategies.empty_keys_views_with_keys_views)
def test_right_absorbing_element(empty_tree_with_tree: KeysViewsPair) -> None:
empty_tree, keys_view = empty_tree_with_tree
result = keys_view & empty_tree
assert len(result) == 0
assert not result
@given(strategies.keys_views_pairs)
def test_absorption_identity(keys_views_pair: KeysViewsPair) -> None:
left_keys_view, right_keys_view = keys_views_pair
result = left_keys_view & (left_keys_view | right_keys_view)
assert result == left_keys_view
@given(strategies.keys_views_pairs)
def test_commutativity(keys_views_pair: KeysViewsPair) -> None:
left_keys_view, right_keys_view = keys_views_pair
result = left_keys_view & right_keys_view
assert result == right_keys_view & left_keys_view
@given(strategies.keys_views_triplets)
def test_associativity(keys_views_triplet: KeysViewsTriplet) -> None:
left_keys_view, mid_tree, right_keys_view = keys_views_triplet
result = (left_keys_view & mid_tree) & right_keys_view
assert result == left_keys_view & (mid_tree & right_keys_view)
@given(strategies.keys_views_triplets)
def test_difference_operand(keys_views_triplet: KeysViewsTriplet) -> None:
left_keys_view, mid_tree, right_keys_view = keys_views_triplet
result = (left_keys_view - mid_tree) & right_keys_view
assert result == (left_keys_view & right_keys_view) - mid_tree
@given(strategies.keys_views_triplets)
def test_distribution_over_union(keys_views_triplet: KeysViewsTriplet) -> None:
left_keys_view, mid_tree, right_keys_view = keys_views_triplet
result = left_keys_view & (mid_tree | right_keys_view)
assert result == ((left_keys_view & mid_tree)
| (left_keys_view & right_keys_view))
@given(strategies.keys_views_pairs)
def test_connection_with_subset_relation(keys_views_pair: KeysViewsPair
) -> None:
left_keys_view, right_keys_view = keys_views_pair
result = left_keys_view & right_keys_view
assert result <= left_keys_view and result <= right_keys_view | tests/views_tests/keys_view_tests/test_intersect.py | from hypothesis import given
from tests.utils import (KeysView,
KeysViewsPair,
KeysViewsTriplet,
is_left_subtree_less_than_right_subtree,
to_height,
to_max_binary_tree_height,
to_min_binary_tree_height)
from . import strategies
@given(strategies.keys_views_pairs)
def test_type(keys_views_pair: KeysViewsPair) -> None:
left_keys_view, right_keys_view = keys_views_pair
result = left_keys_view & right_keys_view
assert isinstance(result, type(left_keys_view))
@given(strategies.keys_views_pairs)
def test_properties(keys_views_pair: KeysViewsPair) -> None:
left_keys_view, right_keys_view = keys_views_pair
result = left_keys_view & right_keys_view
result_tree = result.tree
assert len(result) <= min(len(left_keys_view), len(right_keys_view))
assert (to_min_binary_tree_height(result_tree)
<= to_height(result_tree)
<= min(to_height(left_keys_view.tree),
to_height(right_keys_view.tree),
to_max_binary_tree_height(result_tree)))
assert all(key in left_keys_view and key in right_keys_view
for key in result)
assert (not result
or not result.isdisjoint(left_keys_view)
and not result.isdisjoint(right_keys_view))
assert is_left_subtree_less_than_right_subtree(result_tree)
@given(strategies.keys_views)
def test_idempotence(keys_view: KeysView) -> None:
result = keys_view & keys_view
assert result == keys_view
@given(strategies.empty_keys_views_with_keys_views)
def test_left_absorbing_element(empty_tree_with_tree: KeysViewsPair) -> None:
empty_tree, keys_view = empty_tree_with_tree
result = empty_tree & keys_view
assert len(result) == 0
assert not result
@given(strategies.empty_keys_views_with_keys_views)
def test_right_absorbing_element(empty_tree_with_tree: KeysViewsPair) -> None:
empty_tree, keys_view = empty_tree_with_tree
result = keys_view & empty_tree
assert len(result) == 0
assert not result
@given(strategies.keys_views_pairs)
def test_absorption_identity(keys_views_pair: KeysViewsPair) -> None:
left_keys_view, right_keys_view = keys_views_pair
result = left_keys_view & (left_keys_view | right_keys_view)
assert result == left_keys_view
@given(strategies.keys_views_pairs)
def test_commutativity(keys_views_pair: KeysViewsPair) -> None:
left_keys_view, right_keys_view = keys_views_pair
result = left_keys_view & right_keys_view
assert result == right_keys_view & left_keys_view
@given(strategies.keys_views_triplets)
def test_associativity(keys_views_triplet: KeysViewsTriplet) -> None:
left_keys_view, mid_tree, right_keys_view = keys_views_triplet
result = (left_keys_view & mid_tree) & right_keys_view
assert result == left_keys_view & (mid_tree & right_keys_view)
@given(strategies.keys_views_triplets)
def test_difference_operand(keys_views_triplet: KeysViewsTriplet) -> None:
left_keys_view, mid_tree, right_keys_view = keys_views_triplet
result = (left_keys_view - mid_tree) & right_keys_view
assert result == (left_keys_view & right_keys_view) - mid_tree
@given(strategies.keys_views_triplets)
def test_distribution_over_union(keys_views_triplet: KeysViewsTriplet) -> None:
left_keys_view, mid_tree, right_keys_view = keys_views_triplet
result = left_keys_view & (mid_tree | right_keys_view)
assert result == ((left_keys_view & mid_tree)
| (left_keys_view & right_keys_view))
@given(strategies.keys_views_pairs)
def test_connection_with_subset_relation(keys_views_pair: KeysViewsPair
) -> None:
left_keys_view, right_keys_view = keys_views_pair
result = left_keys_view & right_keys_view
assert result <= left_keys_view and result <= right_keys_view | 0.875335 | 0.696359 |
import argparse
import pathlib
import random
import sys
import time
import cv2 as cv
import numpy as np
import pandas as pd
from xgboost import XGBClassifier
from matplotlib import colors
filename2class = {}
filename2class["n"] = "background"
filename2class["y"] = "yellow"
filename2class["r"] = "red"
filename2class["m"] = "magenta"
filename2class["b"] = "blue"
filename2class["c"] = "cyan"
filename2class["g"] = "green"
def class2bgr(pixel_class):
if pixel_class == "background":
color = "black"
else:
color = pixel_class
rgb = np.array(colors.to_rgba(color)[:3])
rgb = (rgb * 255).astype("uint8")
return rgb[::-1]
def compute_identical_fraction(a, b):
assert a.shape == b.shape
identical_fraction = (a == b).sum() / a.size
return identical_fraction
def create_channels(image_bgr):
conversions = {
"hsv": cv.COLOR_BGR2HSV,
"xyz": cv.COLOR_BGR2XYZ,
"LAB": cv.COLOR_BGR2Lab,
# "LUV": cv.COLOR_BGR2Luv,
}
channels = {"bgr"[i]: image_bgr[:, :, i] for i in range(3)}
for key in conversions:
image = cv.cvtColor(image_bgr, conversions[key])
new_channels = {key[i]: image[:, :, i] for i in range(len(key))}
channels = {**channels, **new_channels}
return channels
def create_features(image_bgr, flatten=False):
image_bgr = cv.medianBlur(image_bgr, 7)
channels = create_channels(image_bgr=image_bgr)
if flatten:
channels = {key: channels[key].flatten() for key in channels}
return channels, image_bgr.shape[:2]
def load_segment(path: pathlib.Path, name: str) -> pd.DataFrame:
image = cv.imread(str(path / ("camera" + name[1:])))
features, shape = create_features(image_bgr=image, flatten=True)
data = pd.DataFrame(features)
mask = cv.imread(str(path / name))
mask = mask.sum(axis=2) != 0
mask = mask.flatten()
data = data[mask]
data["class"] = filename2class[name[0]]
return data
def balance_classes(data, background_ratio, random_state):
foreground = data[data["class"] != "background"]
min_class_size = foreground["class"].value_counts().min()
foreground = foreground.groupby("class").apply(
lambda d: d.sample(min_class_size, random_state=random_state)
)
foreground = foreground.reset_index(drop=True)
background = data[data["class"] == "background"]
n_background_points = int(background_ratio * foreground.shape[0])
background = background.sample(
n_background_points, random_state=random_state
)
return pd.concat([foreground, background])
def get_subdirectories(input_path: pathlib.Path):
return [f for f in input_path.iterdir() if f.is_dir()]
def load_images_and_create_data(
input_path: pathlib.Path, output_filename: str
):
# go through the folders and load all the annotated images
# then compute features and create a pandas frame
print("loading images")
data = []
for frame_folder in get_subdirectories(input_path):
segment_names = [
f.name
for f in frame_folder.iterdir()
if f.is_file() and f.name[1].isdigit()
]
for segment_name in segment_names:
segment_data = load_segment(path=frame_folder, name=segment_name)
segment_data["frame"] = frame_folder.name
data.append(segment_data)
pd_data = pd.concat(data, axis="index")
pd_data.to_pickle(output_filename)
print("done loading images")
def prepare_data(
data: pd.DataFrame, feature_names, train_fraction, background_ratio, seed
):
# create training and test data from entire data frame
# we split according to frames, not single pixels
# to properly test generalization to other frames
frames = sorted(list(set(data["frame"])))
random.seed(seed)
random.shuffle(frames)
n_train_frames = int(len(frames) * train_fraction)
train_frames = frames[:n_train_frames]
test_frames = frames[n_train_frames:]
train_set = data.loc[data["frame"].isin(train_frames)]
train_set = balance_classes(
data=train_set, background_ratio=background_ratio, random_state=seed
)
train_set = train_set.sample(frac=1, random_state=seed)
test_set = data.loc[data["frame"].isin(test_frames)]
test_set = balance_classes(
data=test_set, background_ratio=background_ratio, random_state=seed
)
test_set = test_set.sample(frac=1, random_state=seed)
target = "class"
X_train = train_set[feature_names]
y_train = train_set[target]
X_test = test_set[feature_names]
y_test = test_set[target]
assert not set(train_set["frame"]).intersection(set(test_set["frame"]))
print(train_set["class"].value_counts())
print(test_set["class"].value_counts())
return X_train, y_train, X_test, y_test
def fit_model(X_train, y_train, seed=42):
model = XGBClassifier(
learning_rate=1.0,
n_estimators=1, # only one tree
n_jobs=8,
max_depth=6, # maximum tree depth
random_state=seed,
)
model.fit(X_train, y_train)
return model
def evaluate(model, X, y):
# compute and print fraction of correct labels
# also measure time
print("success rate: ", compute_identical_fraction(model.predict(X), y))
start = time.time()
model.predict(X)
end = time.time()
print("n evaluations: ", X.shape[0])
print("elapsed time: ", end - start)
def load_data_and_fit_model(input_filename, output_filename, feature_names):
print("preparing training data")
data = pd.read_pickle(input_filename)
X_train, y_train, X_test, y_test = prepare_data(
data=data,
feature_names=feature_names,
train_fraction=0.8,
background_ratio=20,
seed=22,
)
print("done preparing training data")
print("fitting model")
model = fit_model(X_train, y_train)
model.save_model(output_filename)
model.get_booster().dump_model(output_filename + "_dump.txt")
print("done fitting model")
print("test data ------------------------")
evaluate(model, X_test, y_test)
print("train data -----------------------")
evaluate(model, X_train, y_train)
def load_model_and_generate_evaluation_images(
model_filename,
input_path: pathlib.Path,
output_path: pathlib.Path,
feature_names,
):
model = XGBClassifier()
model.load_model(model_filename)
for frame_folder in sorted(get_subdirectories(input_path)):
segment_names = [
f.name
for f in frame_folder.iterdir()
if f.is_file() and f.name[1].isdigit()
]
if len(segment_names) != 0:
continue
for camera_name in ["60", "180", "300"]:
image_name = "camera" + camera_name + ".png"
print(frame_folder / image_name)
image_bgr = cv.imread(str(frame_folder / image_name))
features, shape = create_features(
image_bgr=image_bgr, flatten=True
)
X = pd.DataFrame(features)[feature_names]
y = model.predict(X)
segments = y.reshape(shape)
segments_bgr = [class2bgr(idx) for idx in segments.flatten()]
segments_bgr = np.array(segments_bgr).reshape(*shape, 3)
path = output_path / frame_folder.name
if not path.exists():
path.mkdir(parents=True)
image_and_segments_bgr = np.concatenate(
[image_bgr, segments_bgr], axis=1
)
segments_filename = "camera" + camera_name + "_segments" + ".png"
cv.imwrite(
filename=str(path / segments_filename),
img=image_and_segments_bgr,
)
def main():
color_space_features = {
"bgr": ["b", "g", "r"],
"hsv": ["h", "s", "v"],
"xyz": ["x", "y", "z"],
"Lab": ["L", "A", "B"],
}
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--train",
action="store_true",
help="Train a new model.",
)
parser.add_argument(
"--image-dir",
required=True,
type=pathlib.Path,
help="Directory containing the training data.",
)
parser.add_argument(
"--output-dir",
type=pathlib.Path,
help="Output directory for test run.",
)
parser.add_argument(
"--color-spaces",
nargs="+",
type=str,
choices=color_space_features.keys(),
default=["bgr", "hsv"],
help="Color spaces that are used as features.",
)
args = parser.parse_args()
feature_names = []
for color_space in args.color_spaces:
feature_names += color_space_features[color_space]
if args.train:
load_images_and_create_data(
input_path=args.image_dir, output_filename="data.pkl"
)
load_data_and_fit_model(
input_filename="data.pkl",
output_filename="xgb_model.bin",
feature_names=feature_names,
)
if args.output_dir:
load_model_and_generate_evaluation_images(
model_filename="xgb_model.bin",
input_path=args.image_dir,
output_path=args.output_dir,
feature_names=feature_names,
)
return 0
if __name__ == "__main__":
sys.exit(main()) | scripts/train_xgb_tree.py | import argparse
import pathlib
import random
import sys
import time
import cv2 as cv
import numpy as np
import pandas as pd
from xgboost import XGBClassifier
from matplotlib import colors
filename2class = {}
filename2class["n"] = "background"
filename2class["y"] = "yellow"
filename2class["r"] = "red"
filename2class["m"] = "magenta"
filename2class["b"] = "blue"
filename2class["c"] = "cyan"
filename2class["g"] = "green"
def class2bgr(pixel_class):
if pixel_class == "background":
color = "black"
else:
color = pixel_class
rgb = np.array(colors.to_rgba(color)[:3])
rgb = (rgb * 255).astype("uint8")
return rgb[::-1]
def compute_identical_fraction(a, b):
assert a.shape == b.shape
identical_fraction = (a == b).sum() / a.size
return identical_fraction
def create_channels(image_bgr):
conversions = {
"hsv": cv.COLOR_BGR2HSV,
"xyz": cv.COLOR_BGR2XYZ,
"LAB": cv.COLOR_BGR2Lab,
# "LUV": cv.COLOR_BGR2Luv,
}
channels = {"bgr"[i]: image_bgr[:, :, i] for i in range(3)}
for key in conversions:
image = cv.cvtColor(image_bgr, conversions[key])
new_channels = {key[i]: image[:, :, i] for i in range(len(key))}
channels = {**channels, **new_channels}
return channels
def create_features(image_bgr, flatten=False):
image_bgr = cv.medianBlur(image_bgr, 7)
channels = create_channels(image_bgr=image_bgr)
if flatten:
channels = {key: channels[key].flatten() for key in channels}
return channels, image_bgr.shape[:2]
def load_segment(path: pathlib.Path, name: str) -> pd.DataFrame:
image = cv.imread(str(path / ("camera" + name[1:])))
features, shape = create_features(image_bgr=image, flatten=True)
data = pd.DataFrame(features)
mask = cv.imread(str(path / name))
mask = mask.sum(axis=2) != 0
mask = mask.flatten()
data = data[mask]
data["class"] = filename2class[name[0]]
return data
def balance_classes(data, background_ratio, random_state):
foreground = data[data["class"] != "background"]
min_class_size = foreground["class"].value_counts().min()
foreground = foreground.groupby("class").apply(
lambda d: d.sample(min_class_size, random_state=random_state)
)
foreground = foreground.reset_index(drop=True)
background = data[data["class"] == "background"]
n_background_points = int(background_ratio * foreground.shape[0])
background = background.sample(
n_background_points, random_state=random_state
)
return pd.concat([foreground, background])
def get_subdirectories(input_path: pathlib.Path):
return [f for f in input_path.iterdir() if f.is_dir()]
def load_images_and_create_data(
input_path: pathlib.Path, output_filename: str
):
# go through the folders and load all the annotated images
# then compute features and create a pandas frame
print("loading images")
data = []
for frame_folder in get_subdirectories(input_path):
segment_names = [
f.name
for f in frame_folder.iterdir()
if f.is_file() and f.name[1].isdigit()
]
for segment_name in segment_names:
segment_data = load_segment(path=frame_folder, name=segment_name)
segment_data["frame"] = frame_folder.name
data.append(segment_data)
pd_data = pd.concat(data, axis="index")
pd_data.to_pickle(output_filename)
print("done loading images")
def prepare_data(
data: pd.DataFrame, feature_names, train_fraction, background_ratio, seed
):
# create training and test data from entire data frame
# we split according to frames, not single pixels
# to properly test generalization to other frames
frames = sorted(list(set(data["frame"])))
random.seed(seed)
random.shuffle(frames)
n_train_frames = int(len(frames) * train_fraction)
train_frames = frames[:n_train_frames]
test_frames = frames[n_train_frames:]
train_set = data.loc[data["frame"].isin(train_frames)]
train_set = balance_classes(
data=train_set, background_ratio=background_ratio, random_state=seed
)
train_set = train_set.sample(frac=1, random_state=seed)
test_set = data.loc[data["frame"].isin(test_frames)]
test_set = balance_classes(
data=test_set, background_ratio=background_ratio, random_state=seed
)
test_set = test_set.sample(frac=1, random_state=seed)
target = "class"
X_train = train_set[feature_names]
y_train = train_set[target]
X_test = test_set[feature_names]
y_test = test_set[target]
assert not set(train_set["frame"]).intersection(set(test_set["frame"]))
print(train_set["class"].value_counts())
print(test_set["class"].value_counts())
return X_train, y_train, X_test, y_test
def fit_model(X_train, y_train, seed=42):
model = XGBClassifier(
learning_rate=1.0,
n_estimators=1, # only one tree
n_jobs=8,
max_depth=6, # maximum tree depth
random_state=seed,
)
model.fit(X_train, y_train)
return model
def evaluate(model, X, y):
# compute and print fraction of correct labels
# also measure time
print("success rate: ", compute_identical_fraction(model.predict(X), y))
start = time.time()
model.predict(X)
end = time.time()
print("n evaluations: ", X.shape[0])
print("elapsed time: ", end - start)
def load_data_and_fit_model(input_filename, output_filename, feature_names):
print("preparing training data")
data = pd.read_pickle(input_filename)
X_train, y_train, X_test, y_test = prepare_data(
data=data,
feature_names=feature_names,
train_fraction=0.8,
background_ratio=20,
seed=22,
)
print("done preparing training data")
print("fitting model")
model = fit_model(X_train, y_train)
model.save_model(output_filename)
model.get_booster().dump_model(output_filename + "_dump.txt")
print("done fitting model")
print("test data ------------------------")
evaluate(model, X_test, y_test)
print("train data -----------------------")
evaluate(model, X_train, y_train)
def load_model_and_generate_evaluation_images(
model_filename,
input_path: pathlib.Path,
output_path: pathlib.Path,
feature_names,
):
model = XGBClassifier()
model.load_model(model_filename)
for frame_folder in sorted(get_subdirectories(input_path)):
segment_names = [
f.name
for f in frame_folder.iterdir()
if f.is_file() and f.name[1].isdigit()
]
if len(segment_names) != 0:
continue
for camera_name in ["60", "180", "300"]:
image_name = "camera" + camera_name + ".png"
print(frame_folder / image_name)
image_bgr = cv.imread(str(frame_folder / image_name))
features, shape = create_features(
image_bgr=image_bgr, flatten=True
)
X = pd.DataFrame(features)[feature_names]
y = model.predict(X)
segments = y.reshape(shape)
segments_bgr = [class2bgr(idx) for idx in segments.flatten()]
segments_bgr = np.array(segments_bgr).reshape(*shape, 3)
path = output_path / frame_folder.name
if not path.exists():
path.mkdir(parents=True)
image_and_segments_bgr = np.concatenate(
[image_bgr, segments_bgr], axis=1
)
segments_filename = "camera" + camera_name + "_segments" + ".png"
cv.imwrite(
filename=str(path / segments_filename),
img=image_and_segments_bgr,
)
def main():
color_space_features = {
"bgr": ["b", "g", "r"],
"hsv": ["h", "s", "v"],
"xyz": ["x", "y", "z"],
"Lab": ["L", "A", "B"],
}
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--train",
action="store_true",
help="Train a new model.",
)
parser.add_argument(
"--image-dir",
required=True,
type=pathlib.Path,
help="Directory containing the training data.",
)
parser.add_argument(
"--output-dir",
type=pathlib.Path,
help="Output directory for test run.",
)
parser.add_argument(
"--color-spaces",
nargs="+",
type=str,
choices=color_space_features.keys(),
default=["bgr", "hsv"],
help="Color spaces that are used as features.",
)
args = parser.parse_args()
feature_names = []
for color_space in args.color_spaces:
feature_names += color_space_features[color_space]
if args.train:
load_images_and_create_data(
input_path=args.image_dir, output_filename="data.pkl"
)
load_data_and_fit_model(
input_filename="data.pkl",
output_filename="xgb_model.bin",
feature_names=feature_names,
)
if args.output_dir:
load_model_and_generate_evaluation_images(
model_filename="xgb_model.bin",
input_path=args.image_dir,
output_path=args.output_dir,
feature_names=feature_names,
)
return 0
if __name__ == "__main__":
sys.exit(main()) | 0.501221 | 0.279583 |
import numpy
from openfermioncirq.optimization import (
OptimizationParams,
OptimizationResult,
OptimizationTrialResult)
from openfermioncirq.testing import ExampleAlgorithm
def test_optimization_result_init():
result = OptimizationResult(
optimal_value=0.339,
optimal_parameters=numpy.array([-1.899, -0.549]),
num_evaluations=121,
cost_spent=1.426,
function_values=[(1.235, 4.119, None), (-2.452, 3.244, None)],
wait_times=[5.329],
time=0.423,
seed=77,
status=195,
message='fdjmolGSHM')
assert result.optimal_value == 0.339
numpy.testing.assert_allclose(result.optimal_parameters,
numpy.array([-1.899, -0.549]))
assert result.num_evaluations == 121
assert result.cost_spent == 1.426
assert result.function_values == [(1.235, 4.119, None),
(-2.452, 3.244, None)]
assert result.wait_times == [5.329]
assert result.time == 0.423
assert result.seed == 77
assert result.status == 195
assert result.message == 'fdjmolGSHM'
def test_optimization_trial_result_init():
result1 = OptimizationResult(
optimal_value=5.7,
optimal_parameters=numpy.array([1.3, 8.7]),
num_evaluations=59,
cost_spent=3.1,
seed=60,
status=54,
message='ZibVTBNe8')
result2 = OptimizationResult(
optimal_value=4.7,
optimal_parameters=numpy.array([1.7, 2.1]),
num_evaluations=57,
cost_spent=9.3,
seed=51,
status=32,
message='cicCZ8iCg0D')
trial = OptimizationTrialResult(
[result1, result2],
params=OptimizationParams(ExampleAlgorithm()))
assert all(trial.data_frame['optimal_value'] == [5.7, 4.7])
numpy.testing.assert_allclose(
trial.data_frame['optimal_parameters'][0], numpy.array([1.3, 8.7]))
numpy.testing.assert_allclose(
trial.data_frame['optimal_parameters'][1], numpy.array([1.7, 2.1]))
assert all(trial.data_frame['num_evaluations'] == [59, 57])
assert all(trial.data_frame['cost_spent'] == [3.1, 9.3])
assert all(trial.data_frame['seed'] == [60, 51])
assert all(trial.data_frame['status'] == [54, 32])
assert all(trial.data_frame['message'] == ['ZibVTBNe8', 'cicCZ8iCg0D'])
def test_optimization_trial_result_extend():
result1 = OptimizationResult(
optimal_value=4.7,
optimal_parameters=numpy.array([2.3, 2.7]),
num_evaluations=39,
cost_spent=3.9,
seed=63,
status=44,
message='di382j2f')
result2 = OptimizationResult(
optimal_value=3.7,
optimal_parameters=numpy.array([1.2, 3.1]),
num_evaluations=47,
cost_spent=9.9,
seed=21,
status=22,
message='i328d8ie3')
trial = OptimizationTrialResult(
[result1],
params=OptimizationParams(ExampleAlgorithm()))
assert len(trial.results) == 1
assert trial.repetitions == 1
trial.extend([result2])
assert len(trial.results) == 2
assert trial.repetitions == 2
def test_optimization_trial_result_data_methods():
result1 = OptimizationResult(
optimal_value=5.7,
optimal_parameters=numpy.array([1.3, 8.7]),
num_evaluations=59,
cost_spent=3.1,
seed=60,
status=54,
message='ZibVTBNe8',
time=0.1)
result2 = OptimizationResult(
optimal_value=4.7,
optimal_parameters=numpy.array([1.7, 2.1]),
num_evaluations=57,
cost_spent=9.3,
seed=51,
status=32,
message='cicCZ8iCg0D',
time=0.2)
trial = OptimizationTrialResult(
[result1, result2],
params=OptimizationParams(ExampleAlgorithm()))
assert trial.repetitions == 2
assert trial.optimal_value == 4.7
numpy.testing.assert_allclose(trial.optimal_parameters,
numpy.array([1.7, 2.1])) | openfermioncirq/optimization/result_test.py |
import numpy
from openfermioncirq.optimization import (
OptimizationParams,
OptimizationResult,
OptimizationTrialResult)
from openfermioncirq.testing import ExampleAlgorithm
def test_optimization_result_init():
result = OptimizationResult(
optimal_value=0.339,
optimal_parameters=numpy.array([-1.899, -0.549]),
num_evaluations=121,
cost_spent=1.426,
function_values=[(1.235, 4.119, None), (-2.452, 3.244, None)],
wait_times=[5.329],
time=0.423,
seed=77,
status=195,
message='fdjmolGSHM')
assert result.optimal_value == 0.339
numpy.testing.assert_allclose(result.optimal_parameters,
numpy.array([-1.899, -0.549]))
assert result.num_evaluations == 121
assert result.cost_spent == 1.426
assert result.function_values == [(1.235, 4.119, None),
(-2.452, 3.244, None)]
assert result.wait_times == [5.329]
assert result.time == 0.423
assert result.seed == 77
assert result.status == 195
assert result.message == 'fdjmolGSHM'
def test_optimization_trial_result_init():
result1 = OptimizationResult(
optimal_value=5.7,
optimal_parameters=numpy.array([1.3, 8.7]),
num_evaluations=59,
cost_spent=3.1,
seed=60,
status=54,
message='ZibVTBNe8')
result2 = OptimizationResult(
optimal_value=4.7,
optimal_parameters=numpy.array([1.7, 2.1]),
num_evaluations=57,
cost_spent=9.3,
seed=51,
status=32,
message='cicCZ8iCg0D')
trial = OptimizationTrialResult(
[result1, result2],
params=OptimizationParams(ExampleAlgorithm()))
assert all(trial.data_frame['optimal_value'] == [5.7, 4.7])
numpy.testing.assert_allclose(
trial.data_frame['optimal_parameters'][0], numpy.array([1.3, 8.7]))
numpy.testing.assert_allclose(
trial.data_frame['optimal_parameters'][1], numpy.array([1.7, 2.1]))
assert all(trial.data_frame['num_evaluations'] == [59, 57])
assert all(trial.data_frame['cost_spent'] == [3.1, 9.3])
assert all(trial.data_frame['seed'] == [60, 51])
assert all(trial.data_frame['status'] == [54, 32])
assert all(trial.data_frame['message'] == ['ZibVTBNe8', 'cicCZ8iCg0D'])
def test_optimization_trial_result_extend():
result1 = OptimizationResult(
optimal_value=4.7,
optimal_parameters=numpy.array([2.3, 2.7]),
num_evaluations=39,
cost_spent=3.9,
seed=63,
status=44,
message='di382j2f')
result2 = OptimizationResult(
optimal_value=3.7,
optimal_parameters=numpy.array([1.2, 3.1]),
num_evaluations=47,
cost_spent=9.9,
seed=21,
status=22,
message='i328d8ie3')
trial = OptimizationTrialResult(
[result1],
params=OptimizationParams(ExampleAlgorithm()))
assert len(trial.results) == 1
assert trial.repetitions == 1
trial.extend([result2])
assert len(trial.results) == 2
assert trial.repetitions == 2
def test_optimization_trial_result_data_methods():
result1 = OptimizationResult(
optimal_value=5.7,
optimal_parameters=numpy.array([1.3, 8.7]),
num_evaluations=59,
cost_spent=3.1,
seed=60,
status=54,
message='ZibVTBNe8',
time=0.1)
result2 = OptimizationResult(
optimal_value=4.7,
optimal_parameters=numpy.array([1.7, 2.1]),
num_evaluations=57,
cost_spent=9.3,
seed=51,
status=32,
message='cicCZ8iCg0D',
time=0.2)
trial = OptimizationTrialResult(
[result1, result2],
params=OptimizationParams(ExampleAlgorithm()))
assert trial.repetitions == 2
assert trial.optimal_value == 4.7
numpy.testing.assert_allclose(trial.optimal_parameters,
numpy.array([1.7, 2.1])) | 0.698432 | 0.608536 |
import logging
# logging.basicConfig(format='%(message)s', level=logging.INFO)
DEBUG_SUCCESS_NUM = 1001
DEBUG_FAILED_NUM = 1002
logging.addLevelName(DEBUG_SUCCESS_NUM, "SUCCESS")
logging.addLevelName(DEBUG_FAILED_NUM, "FAILED")
def debug_success(self, message, *args, **kws):
if self.isEnabledFor(DEBUG_SUCCESS_NUM):
self._log(DEBUG_SUCCESS_NUM, message, args, **kws)
def debug_failed(self, message, *args, **kws):
if self.isEnabledFor(DEBUG_FAILED_NUM):
self._log(DEBUG_FAILED_NUM, message, args, **kws)
logging.Logger.success = debug_success
logging.Logger.failed = debug_failed
class ColorFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
blue = "\x1b[34m"
cyan = "\x1b[36;1m"
green = "\x1b[32;1m"
orange = "\x1b[33;21m"
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
time_prefix = "[%(asctime)s]"
filename_prefix = " (%(filename)s:%(lineno)d) "
msg = "%(message)s"
prefix = orange + time_prefix + reset + grey + filename_prefix + reset
FORMATS = {
logging.DEBUG: prefix + blue + msg + reset,
logging.INFO: prefix + cyan + msg + reset,
logging.WARNING: prefix + yellow + msg + reset,
logging.ERROR: prefix + red + msg + reset,
logging.CRITICAL: prefix + bold_red + msg + reset,
DEBUG_SUCCESS_NUM: prefix + green + msg + reset,
DEBUG_FAILED_NUM: prefix + bold_red + msg + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def Log(filename: str, name_scope="0", write_to_console=True):
"""Return instance of logger 统一的日志样式
Examples:
>>> from toolbox.utils.Log import Log
>>> log = Log("./train.log")
>>> log.debug("debug message")
>>> log.info("info message")
>>> log.warning("warning message")
>>> log.error("error message")
>>> log.critical("critical message")
"""
logger = logging.getLogger('log-%s' % name_scope)
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(filename)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('[%(asctime)s] p%(process)s (%(filename)s:%(lineno)d) - %(message)s', '%m-%d %H:%M:%S'))
logger.addHandler(file_handler)
if write_to_console:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(ColorFormatter())
logger.addHandler(console_handler)
return logger
def log_result(logger, result):
"""
:param logger: from toolbox.utils.Log()
:param result: from toolbox.Evaluate.evaluate()
"""
from toolbox.evaluate.Evaluate import pretty_print
pretty_print(result, logger.info) | toolbox/utils/Log.py | import logging
# logging.basicConfig(format='%(message)s', level=logging.INFO)
DEBUG_SUCCESS_NUM = 1001
DEBUG_FAILED_NUM = 1002
logging.addLevelName(DEBUG_SUCCESS_NUM, "SUCCESS")
logging.addLevelName(DEBUG_FAILED_NUM, "FAILED")
def debug_success(self, message, *args, **kws):
if self.isEnabledFor(DEBUG_SUCCESS_NUM):
self._log(DEBUG_SUCCESS_NUM, message, args, **kws)
def debug_failed(self, message, *args, **kws):
if self.isEnabledFor(DEBUG_FAILED_NUM):
self._log(DEBUG_FAILED_NUM, message, args, **kws)
logging.Logger.success = debug_success
logging.Logger.failed = debug_failed
class ColorFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
blue = "\x1b[34m"
cyan = "\x1b[36;1m"
green = "\x1b[32;1m"
orange = "\x1b[33;21m"
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
time_prefix = "[%(asctime)s]"
filename_prefix = " (%(filename)s:%(lineno)d) "
msg = "%(message)s"
prefix = orange + time_prefix + reset + grey + filename_prefix + reset
FORMATS = {
logging.DEBUG: prefix + blue + msg + reset,
logging.INFO: prefix + cyan + msg + reset,
logging.WARNING: prefix + yellow + msg + reset,
logging.ERROR: prefix + red + msg + reset,
logging.CRITICAL: prefix + bold_red + msg + reset,
DEBUG_SUCCESS_NUM: prefix + green + msg + reset,
DEBUG_FAILED_NUM: prefix + bold_red + msg + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def Log(filename: str, name_scope="0", write_to_console=True):
"""Return instance of logger 统一的日志样式
Examples:
>>> from toolbox.utils.Log import Log
>>> log = Log("./train.log")
>>> log.debug("debug message")
>>> log.info("info message")
>>> log.warning("warning message")
>>> log.error("error message")
>>> log.critical("critical message")
"""
logger = logging.getLogger('log-%s' % name_scope)
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(filename)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('[%(asctime)s] p%(process)s (%(filename)s:%(lineno)d) - %(message)s', '%m-%d %H:%M:%S'))
logger.addHandler(file_handler)
if write_to_console:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(ColorFormatter())
logger.addHandler(console_handler)
return logger
def log_result(logger, result):
"""
:param logger: from toolbox.utils.Log()
:param result: from toolbox.Evaluate.evaluate()
"""
from toolbox.evaluate.Evaluate import pretty_print
pretty_print(result, logger.info) | 0.447702 | 0.091707 |
import numpy as np
from fusion_engine_client.analysis.file_reader import FileReader, MessageData, TimeAlignmentMode
from fusion_engine_client.messages import *
def setup():
data = {
PoseMessage.MESSAGE_TYPE: MessageData(PoseMessage.MESSAGE_TYPE, None),
PoseAuxMessage.MESSAGE_TYPE: MessageData(PoseAuxMessage.MESSAGE_TYPE, None),
GNSSInfoMessage.MESSAGE_TYPE: MessageData(GNSSInfoMessage.MESSAGE_TYPE, None),
}
message = PoseMessage()
message.p1_time = Timestamp(1.0)
message.velocity_body_mps = np.array([1.0, 2.0, 3.0])
data[PoseMessage.MESSAGE_TYPE].messages.append(message)
message = PoseMessage()
message.p1_time = Timestamp(2.0)
message.velocity_body_mps = np.array([4.0, 5.0, 6.0])
data[PoseMessage.MESSAGE_TYPE].messages.append(message)
message = PoseAuxMessage()
message.p1_time = Timestamp(2.0)
message.velocity_enu_mps = np.array([14.0, 15.0, 16.0])
data[PoseAuxMessage.MESSAGE_TYPE].messages.append(message)
message = PoseAuxMessage()
message.p1_time = Timestamp(3.0)
message.velocity_enu_mps = np.array([17.0, 18.0, 19.0])
data[PoseAuxMessage.MESSAGE_TYPE].messages.append(message)
message = GNSSInfoMessage()
message.p1_time = Timestamp(2.0)
message.gdop = 5.0
data[GNSSInfoMessage.MESSAGE_TYPE].messages.append(message)
message = GNSSInfoMessage()
message.p1_time = Timestamp(3.0)
message.gdop = 6.0
data[GNSSInfoMessage.MESSAGE_TYPE].messages.append(message)
return data
def test_time_align_drop():
data = setup()
FileReader.time_align_data(data, TimeAlignmentMode.DROP)
assert len(data[PoseMessage.MESSAGE_TYPE].messages) == 1
assert float(data[PoseMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
assert len(data[PoseAuxMessage.MESSAGE_TYPE].messages) == 1
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
assert len(data[GNSSInfoMessage.MESSAGE_TYPE].messages) == 1
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
def test_time_align_insert():
data = setup()
FileReader.time_align_data(data, TimeAlignmentMode.INSERT)
assert len(data[PoseMessage.MESSAGE_TYPE].messages) == 3
assert float(data[PoseMessage.MESSAGE_TYPE].messages[0].p1_time) == 1.0
assert float(data[PoseMessage.MESSAGE_TYPE].messages[1].p1_time) == 2.0
assert float(data[PoseMessage.MESSAGE_TYPE].messages[2].p1_time) == 3.0
assert data[PoseMessage.MESSAGE_TYPE].messages[0].velocity_body_mps[0] == 1.0
assert data[PoseMessage.MESSAGE_TYPE].messages[1].velocity_body_mps[0] == 4.0
assert np.isnan(data[PoseMessage.MESSAGE_TYPE].messages[2].velocity_body_mps[0])
assert len(data[PoseAuxMessage.MESSAGE_TYPE].messages) == 3
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[0].p1_time) == 1.0
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[1].p1_time) == 2.0
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[2].p1_time) == 3.0
assert np.isnan(data[PoseAuxMessage.MESSAGE_TYPE].messages[0].velocity_enu_mps[0])
assert data[PoseAuxMessage.MESSAGE_TYPE].messages[1].velocity_enu_mps[0] == 14.0
assert data[PoseAuxMessage.MESSAGE_TYPE].messages[2].velocity_enu_mps[0] == 17.0
assert len(data[GNSSInfoMessage.MESSAGE_TYPE].messages) == 3
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[0].p1_time) == 1.0
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[1].p1_time) == 2.0
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[2].p1_time) == 3.0
assert np.isnan(data[GNSSInfoMessage.MESSAGE_TYPE].messages[0].gdop)
assert data[GNSSInfoMessage.MESSAGE_TYPE].messages[1].gdop == 5.0
assert data[GNSSInfoMessage.MESSAGE_TYPE].messages[2].gdop == 6.0
def test_time_align_specific():
data = setup()
FileReader.time_align_data(data, TimeAlignmentMode.DROP,
message_types=[PoseMessage.MESSAGE_TYPE, GNSSInfoMessage.MESSAGE_TYPE])
assert len(data[PoseMessage.MESSAGE_TYPE].messages) == 1
assert float(data[PoseMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
assert len(data[PoseAuxMessage.MESSAGE_TYPE].messages) == 2
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[1].p1_time) == 3.0
assert len(data[GNSSInfoMessage.MESSAGE_TYPE].messages) == 1
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0 | python/tests/test_file_reader.py | import numpy as np
from fusion_engine_client.analysis.file_reader import FileReader, MessageData, TimeAlignmentMode
from fusion_engine_client.messages import *
def setup():
data = {
PoseMessage.MESSAGE_TYPE: MessageData(PoseMessage.MESSAGE_TYPE, None),
PoseAuxMessage.MESSAGE_TYPE: MessageData(PoseAuxMessage.MESSAGE_TYPE, None),
GNSSInfoMessage.MESSAGE_TYPE: MessageData(GNSSInfoMessage.MESSAGE_TYPE, None),
}
message = PoseMessage()
message.p1_time = Timestamp(1.0)
message.velocity_body_mps = np.array([1.0, 2.0, 3.0])
data[PoseMessage.MESSAGE_TYPE].messages.append(message)
message = PoseMessage()
message.p1_time = Timestamp(2.0)
message.velocity_body_mps = np.array([4.0, 5.0, 6.0])
data[PoseMessage.MESSAGE_TYPE].messages.append(message)
message = PoseAuxMessage()
message.p1_time = Timestamp(2.0)
message.velocity_enu_mps = np.array([14.0, 15.0, 16.0])
data[PoseAuxMessage.MESSAGE_TYPE].messages.append(message)
message = PoseAuxMessage()
message.p1_time = Timestamp(3.0)
message.velocity_enu_mps = np.array([17.0, 18.0, 19.0])
data[PoseAuxMessage.MESSAGE_TYPE].messages.append(message)
message = GNSSInfoMessage()
message.p1_time = Timestamp(2.0)
message.gdop = 5.0
data[GNSSInfoMessage.MESSAGE_TYPE].messages.append(message)
message = GNSSInfoMessage()
message.p1_time = Timestamp(3.0)
message.gdop = 6.0
data[GNSSInfoMessage.MESSAGE_TYPE].messages.append(message)
return data
def test_time_align_drop():
data = setup()
FileReader.time_align_data(data, TimeAlignmentMode.DROP)
assert len(data[PoseMessage.MESSAGE_TYPE].messages) == 1
assert float(data[PoseMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
assert len(data[PoseAuxMessage.MESSAGE_TYPE].messages) == 1
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
assert len(data[GNSSInfoMessage.MESSAGE_TYPE].messages) == 1
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
def test_time_align_insert():
data = setup()
FileReader.time_align_data(data, TimeAlignmentMode.INSERT)
assert len(data[PoseMessage.MESSAGE_TYPE].messages) == 3
assert float(data[PoseMessage.MESSAGE_TYPE].messages[0].p1_time) == 1.0
assert float(data[PoseMessage.MESSAGE_TYPE].messages[1].p1_time) == 2.0
assert float(data[PoseMessage.MESSAGE_TYPE].messages[2].p1_time) == 3.0
assert data[PoseMessage.MESSAGE_TYPE].messages[0].velocity_body_mps[0] == 1.0
assert data[PoseMessage.MESSAGE_TYPE].messages[1].velocity_body_mps[0] == 4.0
assert np.isnan(data[PoseMessage.MESSAGE_TYPE].messages[2].velocity_body_mps[0])
assert len(data[PoseAuxMessage.MESSAGE_TYPE].messages) == 3
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[0].p1_time) == 1.0
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[1].p1_time) == 2.0
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[2].p1_time) == 3.0
assert np.isnan(data[PoseAuxMessage.MESSAGE_TYPE].messages[0].velocity_enu_mps[0])
assert data[PoseAuxMessage.MESSAGE_TYPE].messages[1].velocity_enu_mps[0] == 14.0
assert data[PoseAuxMessage.MESSAGE_TYPE].messages[2].velocity_enu_mps[0] == 17.0
assert len(data[GNSSInfoMessage.MESSAGE_TYPE].messages) == 3
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[0].p1_time) == 1.0
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[1].p1_time) == 2.0
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[2].p1_time) == 3.0
assert np.isnan(data[GNSSInfoMessage.MESSAGE_TYPE].messages[0].gdop)
assert data[GNSSInfoMessage.MESSAGE_TYPE].messages[1].gdop == 5.0
assert data[GNSSInfoMessage.MESSAGE_TYPE].messages[2].gdop == 6.0
def test_time_align_specific():
data = setup()
FileReader.time_align_data(data, TimeAlignmentMode.DROP,
message_types=[PoseMessage.MESSAGE_TYPE, GNSSInfoMessage.MESSAGE_TYPE])
assert len(data[PoseMessage.MESSAGE_TYPE].messages) == 1
assert float(data[PoseMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
assert len(data[PoseAuxMessage.MESSAGE_TYPE].messages) == 2
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0
assert float(data[PoseAuxMessage.MESSAGE_TYPE].messages[1].p1_time) == 3.0
assert len(data[GNSSInfoMessage.MESSAGE_TYPE].messages) == 1
assert float(data[GNSSInfoMessage.MESSAGE_TYPE].messages[0].p1_time) == 2.0 | 0.501953 | 0.418103 |
from __future__ import absolute_import, unicode_literals
import logging
import os.path
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
import polib
from .util import app_name_from_filepath
logger = logging.getLogger(__name__)
# UserModel represents the model used by the project
UserModel = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
@python_2_unicode_compatible
class TranslationFile(models.Model):
name = models.CharField(max_length=512, blank=False, null=False)
filepath = models.CharField(max_length=1024, blank=False, null=False)
language_code = models.CharField(max_length=32, choices=settings.LANGUAGES, blank=False)
created = models.DateTimeField(auto_now_add=True)
last_compiled = models.DateTimeField(null=True)
is_valid = models.BooleanField(default=True)
def __str__(self):
return "{} ({})".format(self.name, self.filepath)
@property
def model_name(self):
return app_name_from_filepath(self.filepath)
def get_polib_object(self):
return polib.pofile(self.filepath)
def save_mofile(self):
if os.path.isfile(self.filepath):
pofile = polib.pofile(self.filepath)
mopath = "{}mo".format(self.filepath[:-2])
pofile.save_as_mofile(mopath)
self.last_compiled = timezone.now()
else:
self.is_valid = False
self.save()
def get_statistics(self):
"""
Return statistics for this file:
- % translated
- total messages
- messages translated
- fuzzy messages
- obsolete messages
"""
try:
pofile = self.get_polib_object()
except Exception as exc:
logger.warning("Could not get polib object", exc_info=True)
return {
'percent_translated': 0,
'total_messages': 0,
'translated_messages': 0,
'fuzzy_messages': 0,
'obsolete_messages': 0
}
translated_entries = len(pofile.translated_entries())
untranslated_entries = len(pofile.untranslated_entries())
fuzzy_entries = len(pofile.fuzzy_entries())
obsolete_entries = len(pofile.obsolete_entries())
return {
'percent_translated': pofile.percent_translated(),
'total_messages': translated_entries + untranslated_entries,
'translated_messages': translated_entries,
'fuzzy_messages': fuzzy_entries,
'obsolete_messages': obsolete_entries,
}
def get_language_name(self):
return dict(settings.LANGUAGES)[self.language_code]
class BaseEditLog(models.Model):
created = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(UserModel, related_name='%(app_label)s_%(class)ss', on_delete=models.CASCADE)
msgid = models.TextField()
msghash = models.CharField(max_length=32, null=False, blank=False)
"""
``msghash`` is an md5 hash of the msgid and msgctxt, using util.get_hash_from_msgid_context.
"""
fieldname = models.CharField(max_length=127, blank=False, null=False)
old_value = models.CharField(max_length=255, blank=True, null=True)
new_value = models.CharField(max_length=255, blank=True, null=True)
class Meta:
abstract = True
ordering = ['created']
def __unicode__(self):
return u"[{}] Field {} | \"{}\" -> \"{}\" in {}".format(
str(self.user),
self.fieldname,
self.old_value,
self.new_value,
self.file_edited.filepath,
)
class EditLog(BaseEditLog):
file_edited = models.ForeignKey(
TranslationFile, blank=False, null=False,
related_name='edit_logs', on_delete=models.CASCADE
)
class BaseMessageComment(models.Model):
created = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(UserModel, related_name='%(app_label)s_%(class)ss', on_delete=models.CASCADE)
msghash = models.CharField(max_length=32, null=False, blank=False)
"""
``msghash`` is an md5 hash of the msgid and msgctxt, using util.get_hash_from_msgid_context.
"""
body = models.CharField(max_length=1024, blank=False, null=False)
class Meta:
abstract = True
ordering = ['created']
def __unicode__(self):
return u"Comment by {} on \"{}\" ({}) at {}".format(
str(self.user),
self.msghash,
self.translation_file.language_code,
self.created.strftime('%d-%m-%Y')
)
class MessageComment(BaseMessageComment):
translation_file = models.ForeignKey(
TranslationFile, blank=False, null=False,
related_name='comments', on_delete=models.CASCADE
) | mobetta/models.py | from __future__ import absolute_import, unicode_literals
import logging
import os.path
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
import polib
from .util import app_name_from_filepath
logger = logging.getLogger(__name__)
# UserModel represents the model used by the project
UserModel = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
@python_2_unicode_compatible
class TranslationFile(models.Model):
name = models.CharField(max_length=512, blank=False, null=False)
filepath = models.CharField(max_length=1024, blank=False, null=False)
language_code = models.CharField(max_length=32, choices=settings.LANGUAGES, blank=False)
created = models.DateTimeField(auto_now_add=True)
last_compiled = models.DateTimeField(null=True)
is_valid = models.BooleanField(default=True)
def __str__(self):
return "{} ({})".format(self.name, self.filepath)
@property
def model_name(self):
return app_name_from_filepath(self.filepath)
def get_polib_object(self):
return polib.pofile(self.filepath)
def save_mofile(self):
if os.path.isfile(self.filepath):
pofile = polib.pofile(self.filepath)
mopath = "{}mo".format(self.filepath[:-2])
pofile.save_as_mofile(mopath)
self.last_compiled = timezone.now()
else:
self.is_valid = False
self.save()
def get_statistics(self):
"""
Return statistics for this file:
- % translated
- total messages
- messages translated
- fuzzy messages
- obsolete messages
"""
try:
pofile = self.get_polib_object()
except Exception as exc:
logger.warning("Could not get polib object", exc_info=True)
return {
'percent_translated': 0,
'total_messages': 0,
'translated_messages': 0,
'fuzzy_messages': 0,
'obsolete_messages': 0
}
translated_entries = len(pofile.translated_entries())
untranslated_entries = len(pofile.untranslated_entries())
fuzzy_entries = len(pofile.fuzzy_entries())
obsolete_entries = len(pofile.obsolete_entries())
return {
'percent_translated': pofile.percent_translated(),
'total_messages': translated_entries + untranslated_entries,
'translated_messages': translated_entries,
'fuzzy_messages': fuzzy_entries,
'obsolete_messages': obsolete_entries,
}
def get_language_name(self):
return dict(settings.LANGUAGES)[self.language_code]
class BaseEditLog(models.Model):
created = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(UserModel, related_name='%(app_label)s_%(class)ss', on_delete=models.CASCADE)
msgid = models.TextField()
msghash = models.CharField(max_length=32, null=False, blank=False)
"""
``msghash`` is an md5 hash of the msgid and msgctxt, using util.get_hash_from_msgid_context.
"""
fieldname = models.CharField(max_length=127, blank=False, null=False)
old_value = models.CharField(max_length=255, blank=True, null=True)
new_value = models.CharField(max_length=255, blank=True, null=True)
class Meta:
abstract = True
ordering = ['created']
def __unicode__(self):
return u"[{}] Field {} | \"{}\" -> \"{}\" in {}".format(
str(self.user),
self.fieldname,
self.old_value,
self.new_value,
self.file_edited.filepath,
)
class EditLog(BaseEditLog):
file_edited = models.ForeignKey(
TranslationFile, blank=False, null=False,
related_name='edit_logs', on_delete=models.CASCADE
)
class BaseMessageComment(models.Model):
created = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(UserModel, related_name='%(app_label)s_%(class)ss', on_delete=models.CASCADE)
msghash = models.CharField(max_length=32, null=False, blank=False)
"""
``msghash`` is an md5 hash of the msgid and msgctxt, using util.get_hash_from_msgid_context.
"""
body = models.CharField(max_length=1024, blank=False, null=False)
class Meta:
abstract = True
ordering = ['created']
def __unicode__(self):
return u"Comment by {} on \"{}\" ({}) at {}".format(
str(self.user),
self.msghash,
self.translation_file.language_code,
self.created.strftime('%d-%m-%Y')
)
class MessageComment(BaseMessageComment):
translation_file = models.ForeignKey(
TranslationFile, blank=False, null=False,
related_name='comments', on_delete=models.CASCADE
) | 0.657868 | 0.08163 |
from ..snescpu.addressing import get_addressing_mode
from ..snescpu.disasm import disassemble
from ..snescpu.instructions import get_instruction
from ..snescpu.states import (DisassembleState, DumpState)
# A dictionary of subroutines with following bytes as arguments.
# key: the address of subroutine
# value: the corresponding byte-partition for DumpState.byte_count
SPECIAL_SUBROUTINES = {
0xC90566: (1, 2, 3, 2, 3,),
0xC90572: (1, 2, 3, 2, 3,),
0xC9062D: (1, 2, 3, 2, 3,),
0xC77808: (2, 2, 2, 2,),
0xC903E2: (1, 2, 3, 2,),
0xC903EE: (1, 2, 3, 2,),
0xC90501: (1, 2, 3, 1,),
0xC9050D: (1, 2, 3, 1,),
0xC46F9B: (1, 2, 3,),
0xC90789: (3, 3,),
0xC907CC: (3, 3,),
0xC447D3: (3, 2,),
0xC44A72: (3, 2,),
0xC691AD: (3, 2,),
0xC69234: (3, 2,),
0xC6928E: (3, 2,),
0xC908F0: (3, 2,),
0xC90937: (3, 2,),
0xC909AE: (3, 2,),
0xC738E2: (2, 3,),
0xC04604: (3, 1,),
0xC2CAD9: (2, 2,),
0xC2CAE0: (2, 2,),
0xC2CB2B: (2, 2,),
0xC2CB32: (2, 2,),
0xC2CB70: (2, 2,),
0xC2CB79: (2, 2,),
0xC2CC25: (2, 2,),
0xC2CC2C: (2, 2,),
0xC2CC47: (2, 2,),
0xC2CC4E: (2, 2,),
0xC2CC8B: (2, 2,),
0xC2CC92: (2, 2,),
0xC2CCF8: (2, 2,),
0xC42B06: (2, 2,),
0xC43C07: (3, 1,),
0xC44739: (3, 1,),
0xC4487F: (3, 1,),
0xC44927: (3, 1,),
0xC4497B: (4,),
0xC44A03: (3, 1,),
0xC44BB9: (3, 1,),
0xC44C1B: (3, 1,),
0xC44D5E: (4,),
0xC44DC0: (3, 1,),
0xC451E2: (3, 1,),
0xC452E3: (3, 1,),
0xC77104: (4,),
0xC77791: (4,),
0xC027B4: (3,),
0xC027D8: (3,),
0xC02ABA: (3,),
0xC02C2D: (3,),
0xC02EC7: (3,),
0xC047B2: (3,),
0xC04835: (3,),
0xC32296: (3,),
0xC3230B: (3,),
0xC42777: (3,),
0xC429DA: (3,),
0xC42A12: (3,),
0xC42A6D: (3,),
0xC42DA1: (3,),
0xC42E19: (3,),
0xC42E53: (3,),
0xC42EA9: (3,),
0xC42ED6: (3,),
0xC42F5E: (3,),
0xC42FEB: (3,),
0xC43041: (3,),
0xC4307F: (3,),
0xC43115: (3,),
0xC43154: (3,),
0xC43193: (3,),
0xC431D7: (3,),
0xC43231: (3,),
0xC4326F: (3,),
0xC43305: (3,),
0xC43337: (3,),
0xC43376: (3,),
0xC433BA: (3,),
0xC43414: (3,),
0xC43468: (3,),
0xC4350C: (3,),
0xC4355D: (3,),
0xC435A2: (3,),
0xC43644: (3,),
0xC43672: (3,),
0xC4371F: (3,),
0xC437C1: (3,),
0xC43808: (3,),
0xC43859: (3,),
0xC438FB: (3,),
0xC43929: (3,),
0xC439A0: (3,),
0xC43A42: (3,),
0xC43AFA: (3,),
0xC43B5F: (3,),
0xC43BA6: (3,),
0xC43F87: (3,),
0xC446A4: (3,),
0xC446D6: (3,),
0xC44708: (3,),
0xC456BC: (3,),
0xC45796: (3,),
0xC457C1: (3,),
0xC459E4: (3,),
0xC45A16: (3,),
0xC45A4A: (3,),
0xC45A7C: (3,),
0xC46951: (3,),
0xC46987: (3,),
0xC46AFD: (3,),
0xC773FE: (3,),
0xC77470: (3,),
0xC774AA: (3,),
0xC774E4: (3,),
0xC77843: (3,),
0xC77851: (3,),
0xC7785F: (3,),
0xC7786D: (3,),
0xC7787B: (3,),
0xC77889: (3,),
0xC77897: (3,),
0xC778A5: (3,),
0xC778B3: (3,),
0xC1A867: (2,),
0xC1A8D4: (2,),
0xC1A92E: (2,),
0xC1A944: (2,),
0xC1A988: (2,),
0xC1A9D3: (2,),
0xC1E32E: (2,),
0xC1E59C: (2,),
0xC2CA5B: (2,),
0xC2CA62: (2,),
0xC2CA98: (2,),
0xC2CC03: (2,),
0xC2CC0A: (2,),
0xC2CC69: (2,),
0xC2CC70: (2,),
0xC3226F: (2,),
0xC322E4: (2,),
0xC32359: (2,),
0xC42763: (2,),
0xC4297C: (2,),
0xC42F28: (2,),
0xC42FAE: (2,),
0xC43C52: (2,),
0xC44011: (2,),
0xC44045: (2,),
0xC44078: (2,),
0xC440B0: (2,),
0xC440F1: (2,),
0xC44129: (2,),
0xC44566: (2,),
0xC445F8: (2,),
0xC44824: (2,),
0xC44E32: (2,),
0xC44E68: (2,),
0xC44EA6: (2,),
0xC44FE2: (2,),
0xC4501B: (2,),
0xC452AA: (2,),
0xC455FD: (2,),
0xC456E7: (2,),
0xC4691B: (2,),
0xC46A64: (2,),
0xC46BED: (2,),
0xC46C28: (2,),
0xC73C42: (2,),
0xC737BE: (2,),
0xC2BE8A: (1,),
0xC2C240: (1,),
0xC2C573: (1,),
0xC2C739: (1,),
0xC2C766: (1,),
0xC2C766: (1,),
0xC2C791: (1,),
0xC2C7C6: (1,),
0xC32251: (1,),
0xC322C6: (1,),
0xC322C6: (1,),
0xC3233B: (1,),
0xC323B0: (1,),
0xC32436: (1,),
0xC32569: (1,),
0xC4274F: (1,),
0xC42B9F: (1,),
0xC42BCE: (1,),
0xC42BFD: (1,),
0xC42C2C: (1,),
0xC42CF4: (1,),
0xC42D43: (1,),
0xC42D72: (1,),
0xC441AE: (1,),
0xC44F00: (1,),
0xC44F55: (1,),
0xC44FA5: (1,),
0xC4508A: (1,),
0xC451A3: (1,),
0xC45345: (1,),
0xC45399: (1,),
0xC453F7: (1,),
0xC45458: (1,),
0xC4559E: (1,),
0xC458C8: (1,),
0xC45AB0: (1,),
0xC45ADC: (1,),
0xC45B1A: (1,),
0xC45B66: (1,),
0xC45B66: (1,),
0xC45BEB: (1,),
0xC45C5A: (1,),
0xC4624E: (1,),
0xC463AC: (1,),
0xC464C9: (1,),
0xC466EA: (1,),}
class DisassembleStateDQ3(DisassembleState):
"""A specialized state."""
def _init_instructions(self):
immed = get_addressing_mode('Immediate')
implied = get_addressing_mode('Implied')
class BRK(get_instruction(0x00)):
operand_size = 3
addressing_mode = immed
class COP(get_instruction(0x02)):
operand_size = 1
addressing_mode = implied
class JSR(get_instruction(0x22)):
@staticmethod
def execute(state, context):
addr = self.current_operand
byte_count = SPECIAL_SUBROUTINES.get(addr)
if byte_count:
context.update(
next_state='DisassembleStateDQ3',
byte_count=byte_count,
record_count=1,)
return context, 'DumpState'
return context, None
return {0x00: BRK, 0x02: COP, 0x22: JSR}
if __name__ == '__main__':
disassemble('DRAGONQUEST3',
[DisassembleStateDQ3, DumpState],
'DisassembleStateDQ3') | dqutils/dq3/disasm.py | from ..snescpu.addressing import get_addressing_mode
from ..snescpu.disasm import disassemble
from ..snescpu.instructions import get_instruction
from ..snescpu.states import (DisassembleState, DumpState)
# A dictionary of subroutines with following bytes as arguments.
# key: the address of subroutine
# value: the corresponding byte-partition for DumpState.byte_count
SPECIAL_SUBROUTINES = {
0xC90566: (1, 2, 3, 2, 3,),
0xC90572: (1, 2, 3, 2, 3,),
0xC9062D: (1, 2, 3, 2, 3,),
0xC77808: (2, 2, 2, 2,),
0xC903E2: (1, 2, 3, 2,),
0xC903EE: (1, 2, 3, 2,),
0xC90501: (1, 2, 3, 1,),
0xC9050D: (1, 2, 3, 1,),
0xC46F9B: (1, 2, 3,),
0xC90789: (3, 3,),
0xC907CC: (3, 3,),
0xC447D3: (3, 2,),
0xC44A72: (3, 2,),
0xC691AD: (3, 2,),
0xC69234: (3, 2,),
0xC6928E: (3, 2,),
0xC908F0: (3, 2,),
0xC90937: (3, 2,),
0xC909AE: (3, 2,),
0xC738E2: (2, 3,),
0xC04604: (3, 1,),
0xC2CAD9: (2, 2,),
0xC2CAE0: (2, 2,),
0xC2CB2B: (2, 2,),
0xC2CB32: (2, 2,),
0xC2CB70: (2, 2,),
0xC2CB79: (2, 2,),
0xC2CC25: (2, 2,),
0xC2CC2C: (2, 2,),
0xC2CC47: (2, 2,),
0xC2CC4E: (2, 2,),
0xC2CC8B: (2, 2,),
0xC2CC92: (2, 2,),
0xC2CCF8: (2, 2,),
0xC42B06: (2, 2,),
0xC43C07: (3, 1,),
0xC44739: (3, 1,),
0xC4487F: (3, 1,),
0xC44927: (3, 1,),
0xC4497B: (4,),
0xC44A03: (3, 1,),
0xC44BB9: (3, 1,),
0xC44C1B: (3, 1,),
0xC44D5E: (4,),
0xC44DC0: (3, 1,),
0xC451E2: (3, 1,),
0xC452E3: (3, 1,),
0xC77104: (4,),
0xC77791: (4,),
0xC027B4: (3,),
0xC027D8: (3,),
0xC02ABA: (3,),
0xC02C2D: (3,),
0xC02EC7: (3,),
0xC047B2: (3,),
0xC04835: (3,),
0xC32296: (3,),
0xC3230B: (3,),
0xC42777: (3,),
0xC429DA: (3,),
0xC42A12: (3,),
0xC42A6D: (3,),
0xC42DA1: (3,),
0xC42E19: (3,),
0xC42E53: (3,),
0xC42EA9: (3,),
0xC42ED6: (3,),
0xC42F5E: (3,),
0xC42FEB: (3,),
0xC43041: (3,),
0xC4307F: (3,),
0xC43115: (3,),
0xC43154: (3,),
0xC43193: (3,),
0xC431D7: (3,),
0xC43231: (3,),
0xC4326F: (3,),
0xC43305: (3,),
0xC43337: (3,),
0xC43376: (3,),
0xC433BA: (3,),
0xC43414: (3,),
0xC43468: (3,),
0xC4350C: (3,),
0xC4355D: (3,),
0xC435A2: (3,),
0xC43644: (3,),
0xC43672: (3,),
0xC4371F: (3,),
0xC437C1: (3,),
0xC43808: (3,),
0xC43859: (3,),
0xC438FB: (3,),
0xC43929: (3,),
0xC439A0: (3,),
0xC43A42: (3,),
0xC43AFA: (3,),
0xC43B5F: (3,),
0xC43BA6: (3,),
0xC43F87: (3,),
0xC446A4: (3,),
0xC446D6: (3,),
0xC44708: (3,),
0xC456BC: (3,),
0xC45796: (3,),
0xC457C1: (3,),
0xC459E4: (3,),
0xC45A16: (3,),
0xC45A4A: (3,),
0xC45A7C: (3,),
0xC46951: (3,),
0xC46987: (3,),
0xC46AFD: (3,),
0xC773FE: (3,),
0xC77470: (3,),
0xC774AA: (3,),
0xC774E4: (3,),
0xC77843: (3,),
0xC77851: (3,),
0xC7785F: (3,),
0xC7786D: (3,),
0xC7787B: (3,),
0xC77889: (3,),
0xC77897: (3,),
0xC778A5: (3,),
0xC778B3: (3,),
0xC1A867: (2,),
0xC1A8D4: (2,),
0xC1A92E: (2,),
0xC1A944: (2,),
0xC1A988: (2,),
0xC1A9D3: (2,),
0xC1E32E: (2,),
0xC1E59C: (2,),
0xC2CA5B: (2,),
0xC2CA62: (2,),
0xC2CA98: (2,),
0xC2CC03: (2,),
0xC2CC0A: (2,),
0xC2CC69: (2,),
0xC2CC70: (2,),
0xC3226F: (2,),
0xC322E4: (2,),
0xC32359: (2,),
0xC42763: (2,),
0xC4297C: (2,),
0xC42F28: (2,),
0xC42FAE: (2,),
0xC43C52: (2,),
0xC44011: (2,),
0xC44045: (2,),
0xC44078: (2,),
0xC440B0: (2,),
0xC440F1: (2,),
0xC44129: (2,),
0xC44566: (2,),
0xC445F8: (2,),
0xC44824: (2,),
0xC44E32: (2,),
0xC44E68: (2,),
0xC44EA6: (2,),
0xC44FE2: (2,),
0xC4501B: (2,),
0xC452AA: (2,),
0xC455FD: (2,),
0xC456E7: (2,),
0xC4691B: (2,),
0xC46A64: (2,),
0xC46BED: (2,),
0xC46C28: (2,),
0xC73C42: (2,),
0xC737BE: (2,),
0xC2BE8A: (1,),
0xC2C240: (1,),
0xC2C573: (1,),
0xC2C739: (1,),
0xC2C766: (1,),
0xC2C766: (1,),
0xC2C791: (1,),
0xC2C7C6: (1,),
0xC32251: (1,),
0xC322C6: (1,),
0xC322C6: (1,),
0xC3233B: (1,),
0xC323B0: (1,),
0xC32436: (1,),
0xC32569: (1,),
0xC4274F: (1,),
0xC42B9F: (1,),
0xC42BCE: (1,),
0xC42BFD: (1,),
0xC42C2C: (1,),
0xC42CF4: (1,),
0xC42D43: (1,),
0xC42D72: (1,),
0xC441AE: (1,),
0xC44F00: (1,),
0xC44F55: (1,),
0xC44FA5: (1,),
0xC4508A: (1,),
0xC451A3: (1,),
0xC45345: (1,),
0xC45399: (1,),
0xC453F7: (1,),
0xC45458: (1,),
0xC4559E: (1,),
0xC458C8: (1,),
0xC45AB0: (1,),
0xC45ADC: (1,),
0xC45B1A: (1,),
0xC45B66: (1,),
0xC45B66: (1,),
0xC45BEB: (1,),
0xC45C5A: (1,),
0xC4624E: (1,),
0xC463AC: (1,),
0xC464C9: (1,),
0xC466EA: (1,),}
class DisassembleStateDQ3(DisassembleState):
"""A specialized state."""
def _init_instructions(self):
immed = get_addressing_mode('Immediate')
implied = get_addressing_mode('Implied')
class BRK(get_instruction(0x00)):
operand_size = 3
addressing_mode = immed
class COP(get_instruction(0x02)):
operand_size = 1
addressing_mode = implied
class JSR(get_instruction(0x22)):
@staticmethod
def execute(state, context):
addr = self.current_operand
byte_count = SPECIAL_SUBROUTINES.get(addr)
if byte_count:
context.update(
next_state='DisassembleStateDQ3',
byte_count=byte_count,
record_count=1,)
return context, 'DumpState'
return context, None
return {0x00: BRK, 0x02: COP, 0x22: JSR}
if __name__ == '__main__':
disassemble('DRAGONQUEST3',
[DisassembleStateDQ3, DumpState],
'DisassembleStateDQ3') | 0.333612 | 0.552479 |
import copy
import os
import pytest
import salt.utils.files
from tests.support.mock import patch
def test_safe_rm():
with patch("os.remove") as os_remove_mock:
salt.utils.files.safe_rm("dummy_tgt")
assert os_remove_mock.called is True
def test_safe_rm_exceptions(tmp_path):
assert (
salt.utils.files.safe_rm(str(tmp_path / "no_way_this_is_a_file_nope.sh"))
is None
)
def test_safe_walk_symlink_recursion(tmp_path):
if tmp_path.stat().st_ino == 0:
pytest.xfail(reason="inodes not supported in {}".format(tmp_path))
tmp_path = str(tmp_path)
os.mkdir(os.path.join(tmp_path, "fax"))
os.makedirs(os.path.join(tmp_path, "foo", "bar"))
os.symlink(os.path.join("..", ".."), os.path.join(tmp_path, "foo", "bar", "baz"))
os.symlink("foo", os.path.join(tmp_path, "root"))
expected = [
(os.path.join(tmp_path, "root"), ["bar"], []),
(os.path.join(tmp_path, "root", "bar"), ["baz"], []),
(os.path.join(tmp_path, "root", "bar", "baz"), ["fax", "foo", "root"], []),
(os.path.join(tmp_path, "root", "bar", "baz", "fax"), [], []),
]
paths = []
for root, dirs, names in salt.utils.files.safe_walk(os.path.join(tmp_path, "root")):
paths.append((root, sorted(dirs), names))
assert paths == expected
def test_fopen_with_disallowed_fds():
"""
This is safe to have as a unit test since we aren't going to actually
try to read or write. We want to ensure that we are raising a
TypeError. Python 3's open() builtin will treat the booleans as file
descriptor numbers and try to open stdin/stdout. We also want to test
fd 2 which is stderr.
"""
for invalid_fn in (False, True, 0, 1, 2):
try:
with salt.utils.files.fopen(invalid_fn):
pass
except TypeError:
# This is expected. We aren't using an assertRaises here
# because we want to ensure that if we did somehow open the
# filehandle, that it doesn't remain open.
pass
else:
# We probably won't even get this far if we actually opened
# stdin/stdout as a file descriptor. It is likely to cause the
# integration suite to die since, news flash, closing
# stdin/stdout/stderr is usually not a wise thing to do in the
# middle of a program's execution.
pytest.fail(
"fopen() should have been prevented from opening a file "
"using {} as the filename".format(invalid_fn)
)
def _create_temp_structure(temp_directory, structure):
for folder, files in structure.items():
current_directory = os.path.join(temp_directory, folder)
os.makedirs(current_directory)
for name, content in files.items():
path = os.path.join(temp_directory, folder, name)
with salt.utils.files.fopen(path, "w+") as fh:
fh.write(content)
def _validate_folder_structure_and_contents(target_directory, desired_structure):
for folder, files in desired_structure.items():
for name, content in files.items():
path = os.path.join(target_directory, folder, name)
with salt.utils.files.fopen(path) as fh:
assert fh.read().strip() == content
def test_recursive_copy(tmp_path):
src = str(tmp_path / "src")
dest = str(tmp_path / "dest")
src_structure = {
"foo": {"foofile.txt": "fooSTRUCTURE"},
"bar": {"barfile.txt": "barSTRUCTURE"},
}
dest_structure = {
"foo": {"foo.txt": "fooTARGET_STRUCTURE"},
"baz": {"baz.txt": "bazTARGET_STRUCTURE"},
}
# Create the file structures in both src and dest dirs
_create_temp_structure(src, src_structure)
_create_temp_structure(dest, dest_structure)
# Perform the recursive copy
salt.utils.files.recursive_copy(src, dest)
# Confirm results match expected results
desired_structure = copy.copy(dest_structure)
desired_structure.update(src_structure)
_validate_folder_structure_and_contents(dest, desired_structure)
@pytest.mark.skip_unless_on_windows
def test_case_sensitive_filesystem_win():
"""
Test case insensitivity on Windows.
"""
result = salt.utils.files.case_insensitive_filesystem()
assert result is True
@pytest.mark.skip_unless_on_linux
def test_case_sensitive_filesystem_lin():
"""
Test case insensitivity on Linux.
"""
result = salt.utils.files.case_insensitive_filesystem()
assert result is False
@pytest.mark.skip_unless_on_darwin
def test_case_sensitive_filesystem_dar():
"""
Test case insensitivity on Darwin.
"""
result = salt.utils.files.case_insensitive_filesystem()
assert result is True | tests/pytests/unit/utils/test_files.py | import copy
import os
import pytest
import salt.utils.files
from tests.support.mock import patch
def test_safe_rm():
with patch("os.remove") as os_remove_mock:
salt.utils.files.safe_rm("dummy_tgt")
assert os_remove_mock.called is True
def test_safe_rm_exceptions(tmp_path):
assert (
salt.utils.files.safe_rm(str(tmp_path / "no_way_this_is_a_file_nope.sh"))
is None
)
def test_safe_walk_symlink_recursion(tmp_path):
if tmp_path.stat().st_ino == 0:
pytest.xfail(reason="inodes not supported in {}".format(tmp_path))
tmp_path = str(tmp_path)
os.mkdir(os.path.join(tmp_path, "fax"))
os.makedirs(os.path.join(tmp_path, "foo", "bar"))
os.symlink(os.path.join("..", ".."), os.path.join(tmp_path, "foo", "bar", "baz"))
os.symlink("foo", os.path.join(tmp_path, "root"))
expected = [
(os.path.join(tmp_path, "root"), ["bar"], []),
(os.path.join(tmp_path, "root", "bar"), ["baz"], []),
(os.path.join(tmp_path, "root", "bar", "baz"), ["fax", "foo", "root"], []),
(os.path.join(tmp_path, "root", "bar", "baz", "fax"), [], []),
]
paths = []
for root, dirs, names in salt.utils.files.safe_walk(os.path.join(tmp_path, "root")):
paths.append((root, sorted(dirs), names))
assert paths == expected
def test_fopen_with_disallowed_fds():
"""
This is safe to have as a unit test since we aren't going to actually
try to read or write. We want to ensure that we are raising a
TypeError. Python 3's open() builtin will treat the booleans as file
descriptor numbers and try to open stdin/stdout. We also want to test
fd 2 which is stderr.
"""
for invalid_fn in (False, True, 0, 1, 2):
try:
with salt.utils.files.fopen(invalid_fn):
pass
except TypeError:
# This is expected. We aren't using an assertRaises here
# because we want to ensure that if we did somehow open the
# filehandle, that it doesn't remain open.
pass
else:
# We probably won't even get this far if we actually opened
# stdin/stdout as a file descriptor. It is likely to cause the
# integration suite to die since, news flash, closing
# stdin/stdout/stderr is usually not a wise thing to do in the
# middle of a program's execution.
pytest.fail(
"fopen() should have been prevented from opening a file "
"using {} as the filename".format(invalid_fn)
)
def _create_temp_structure(temp_directory, structure):
for folder, files in structure.items():
current_directory = os.path.join(temp_directory, folder)
os.makedirs(current_directory)
for name, content in files.items():
path = os.path.join(temp_directory, folder, name)
with salt.utils.files.fopen(path, "w+") as fh:
fh.write(content)
def _validate_folder_structure_and_contents(target_directory, desired_structure):
for folder, files in desired_structure.items():
for name, content in files.items():
path = os.path.join(target_directory, folder, name)
with salt.utils.files.fopen(path) as fh:
assert fh.read().strip() == content
def test_recursive_copy(tmp_path):
src = str(tmp_path / "src")
dest = str(tmp_path / "dest")
src_structure = {
"foo": {"foofile.txt": "fooSTRUCTURE"},
"bar": {"barfile.txt": "barSTRUCTURE"},
}
dest_structure = {
"foo": {"foo.txt": "fooTARGET_STRUCTURE"},
"baz": {"baz.txt": "bazTARGET_STRUCTURE"},
}
# Create the file structures in both src and dest dirs
_create_temp_structure(src, src_structure)
_create_temp_structure(dest, dest_structure)
# Perform the recursive copy
salt.utils.files.recursive_copy(src, dest)
# Confirm results match expected results
desired_structure = copy.copy(dest_structure)
desired_structure.update(src_structure)
_validate_folder_structure_and_contents(dest, desired_structure)
@pytest.mark.skip_unless_on_windows
def test_case_sensitive_filesystem_win():
"""
Test case insensitivity on Windows.
"""
result = salt.utils.files.case_insensitive_filesystem()
assert result is True
@pytest.mark.skip_unless_on_linux
def test_case_sensitive_filesystem_lin():
"""
Test case insensitivity on Linux.
"""
result = salt.utils.files.case_insensitive_filesystem()
assert result is False
@pytest.mark.skip_unless_on_darwin
def test_case_sensitive_filesystem_dar():
"""
Test case insensitivity on Darwin.
"""
result = salt.utils.files.case_insensitive_filesystem()
assert result is True | 0.384565 | 0.424591 |
import io
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from pytoshop import codecs
from pytoshop import enums
@pytest.mark.parametrize("depth", (8, 16))
def test_zip_with_prediction(depth):
np.random.seed(0)
dtype = codecs.color_depth_dtype_map[depth]
x = np.random.randint(0, (2**depth) - 1, size=(255, 256), dtype=dtype)
fd = io.BytesIO()
codecs.compress_image(
fd, x, enums.Compression.zip_prediction, (255, 256), 1, depth, 1)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.zip_prediction, (255, 256), depth, 1)
assert_array_equal(x, y)
@pytest.mark.parametrize("depth", (1, 8, 16, 32))
def test_zip(depth):
np.random.seed(0)
dtype = codecs.color_depth_dtype_map[depth]
x = np.random.randint(0, (2**depth) - 1, size=(255, 256), dtype=dtype)
fd = io.BytesIO()
codecs.compress_image(
fd, x, enums.Compression.zip, (255, 256), 1, depth, 1)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.zip, (255, 256), depth, 1)
assert_array_equal(x, y)
@pytest.mark.parametrize("depth", (8, 16, 32))
@pytest.mark.parametrize("version", (1, 2))
def test_rle(depth, version):
np.random.seed(0)
dtype = codecs.color_depth_dtype_map[depth]
x = np.random.randint(0, (2**depth) - 1, size=(255, 256), dtype=dtype)
fd = io.BytesIO()
codecs.compress_image(
fd, x, enums.Compression.rle, (255, 256), 1, depth, version)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.rle, (255, 256), depth, version)
assert_array_equal(x, y)
@pytest.mark.parametrize("depth", (1, 8, 16, 32))
def test_raw_constant(depth):
if depth == 1:
value = 1
else:
value = 42
dtype = codecs.color_depth_dtype_map[depth]
x = np.ones((255, 256), dtype=dtype) * value
fd = io.BytesIO()
codecs.compress_image(
fd, value, enums.Compression.raw, (255, 256), 1, depth, 1)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.raw, (255, 256), depth, 1)
assert_array_equal(x, y)
@pytest.mark.parametrize("depth", (8, 16))
def test_zip_with_prediction_constant(depth):
dtype = codecs.color_depth_dtype_map[depth]
x = np.ones((255, 256), dtype=dtype) * 42
fd = io.BytesIO()
codecs.compress_image(
fd, 42, enums.Compression.zip_prediction, (255, 256), 1, depth, 1)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.zip_prediction, (255, 256), depth, 1)
assert_array_equal(x, y)
@pytest.mark.parametrize("depth", (1, 8, 16, 32))
def test_zip_constant(depth):
if depth == 1:
value = 1
else:
value = 42
dtype = codecs.color_depth_dtype_map[depth]
x = np.ones((255, 256), dtype=dtype) * value
fd = io.BytesIO()
codecs.compress_image(
fd, value, enums.Compression.zip, (255, 256), 1, depth, 1)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.zip, (255, 256), depth, 1)
assert_array_equal(x, y)
@pytest.mark.parametrize("depth", (8, 16, 32))
@pytest.mark.parametrize("version", (1, 2))
def test_rle_constant(depth, version):
dtype = codecs.color_depth_dtype_map[depth]
x = np.ones((255, 256), dtype=dtype) * 42
fd = io.BytesIO()
codecs.compress_image(
fd, 42, enums.Compression.rle, (255, 256), 1, depth, version)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.rle, (255, 256), depth, version)
assert_array_equal(x, y) | tests/test_codecs.py |
import io
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from pytoshop import codecs
from pytoshop import enums
@pytest.mark.parametrize("depth", (8, 16))
def test_zip_with_prediction(depth):
np.random.seed(0)
dtype = codecs.color_depth_dtype_map[depth]
x = np.random.randint(0, (2**depth) - 1, size=(255, 256), dtype=dtype)
fd = io.BytesIO()
codecs.compress_image(
fd, x, enums.Compression.zip_prediction, (255, 256), 1, depth, 1)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.zip_prediction, (255, 256), depth, 1)
assert_array_equal(x, y)
@pytest.mark.parametrize("depth", (1, 8, 16, 32))
def test_zip(depth):
np.random.seed(0)
dtype = codecs.color_depth_dtype_map[depth]
x = np.random.randint(0, (2**depth) - 1, size=(255, 256), dtype=dtype)
fd = io.BytesIO()
codecs.compress_image(
fd, x, enums.Compression.zip, (255, 256), 1, depth, 1)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.zip, (255, 256), depth, 1)
assert_array_equal(x, y)
@pytest.mark.parametrize("depth", (8, 16, 32))
@pytest.mark.parametrize("version", (1, 2))
def test_rle(depth, version):
np.random.seed(0)
dtype = codecs.color_depth_dtype_map[depth]
x = np.random.randint(0, (2**depth) - 1, size=(255, 256), dtype=dtype)
fd = io.BytesIO()
codecs.compress_image(
fd, x, enums.Compression.rle, (255, 256), 1, depth, version)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.rle, (255, 256), depth, version)
assert_array_equal(x, y)
@pytest.mark.parametrize("depth", (1, 8, 16, 32))
def test_raw_constant(depth):
if depth == 1:
value = 1
else:
value = 42
dtype = codecs.color_depth_dtype_map[depth]
x = np.ones((255, 256), dtype=dtype) * value
fd = io.BytesIO()
codecs.compress_image(
fd, value, enums.Compression.raw, (255, 256), 1, depth, 1)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.raw, (255, 256), depth, 1)
assert_array_equal(x, y)
@pytest.mark.parametrize("depth", (8, 16))
def test_zip_with_prediction_constant(depth):
dtype = codecs.color_depth_dtype_map[depth]
x = np.ones((255, 256), dtype=dtype) * 42
fd = io.BytesIO()
codecs.compress_image(
fd, 42, enums.Compression.zip_prediction, (255, 256), 1, depth, 1)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.zip_prediction, (255, 256), depth, 1)
assert_array_equal(x, y)
@pytest.mark.parametrize("depth", (1, 8, 16, 32))
def test_zip_constant(depth):
if depth == 1:
value = 1
else:
value = 42
dtype = codecs.color_depth_dtype_map[depth]
x = np.ones((255, 256), dtype=dtype) * value
fd = io.BytesIO()
codecs.compress_image(
fd, value, enums.Compression.zip, (255, 256), 1, depth, 1)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.zip, (255, 256), depth, 1)
assert_array_equal(x, y)
@pytest.mark.parametrize("depth", (8, 16, 32))
@pytest.mark.parametrize("version", (1, 2))
def test_rle_constant(depth, version):
dtype = codecs.color_depth_dtype_map[depth]
x = np.ones((255, 256), dtype=dtype) * 42
fd = io.BytesIO()
codecs.compress_image(
fd, 42, enums.Compression.rle, (255, 256), 1, depth, version)
y = codecs.decompress_image(
fd.getvalue(), enums.Compression.rle, (255, 256), depth, version)
assert_array_equal(x, y) | 0.598195 | 0.742865 |
import numpy as np
from ._base_network import _baseNetwork
class SoftmaxRegression(_baseNetwork):
def __init__(self, input_size=28*28, num_classes=10):
'''
A single layer softmax regression. The network is composed by:
a linear layer without bias => (optional ReLU activation) => Softmax
:param input_size: the input dimension
:param num_classes: the number of classes in total
'''
super().__init__(input_size, num_classes)
self._weight_init()
self.X = None
self.y = None
def _weight_init(self):
'''
initialize weights of the single layer regression network. No bias term included.
:return: None; self.weights is filled based on method
- W1: The weight matrix of the linear layer of shape (num_features, hidden_size)
'''
np.random.seed(1024)
self.weights['W1'] = 0.001 * np.random.randn(self.input_size, self.num_classes)
self.gradients['W1'] = np.zeros((self.input_size, self.num_classes))
def forward(self, X, y, mode='train'):
'''
Compute loss and gradients using softmax with vectorization.
:param X: a batch of image (N, 28x28)
:param y: labels of images in the batch (N,)
:return:
loss: the loss associated with the batch
accuracy: the accuracy of the batch
'''
loss = None
gradient = None
accuracy = None
# 1) Implement the forward process and compute the Cross-Entropy loss
# 2) Compute the gradient with respect to the loss
self.X = np.array(X)
self.y = np.array(y)
X = self.X
y = self.y
N = X.shape[0]
z = X @ self.weights['W1']
y_hot = np.zeros((len(y), self.num_classes))
y_hot[np.arange(len(y)), y] = 1
y_hat = self.ReLU(z)
y_til = self.softmax(y_hat)
loss = self.cross_entropy_loss(y_til,y)
accuracy = self.compute_accuracy(y_til,y)
if mode != 'train':
return loss, accuracy
# 1) Implement the backward process:
# 1) Compute gradients of each weight and bias by chain rule
# 2) Store the gradients in self.gradients
self.gradients['W1'] = (1/N) * np.dot( X.T, self.ReLU_dev(z) * (y_til - y_hot))
return loss, accuracy | hw1 Two-layer-network/models/softmax_regression.py | import numpy as np
from ._base_network import _baseNetwork
class SoftmaxRegression(_baseNetwork):
def __init__(self, input_size=28*28, num_classes=10):
'''
A single layer softmax regression. The network is composed by:
a linear layer without bias => (optional ReLU activation) => Softmax
:param input_size: the input dimension
:param num_classes: the number of classes in total
'''
super().__init__(input_size, num_classes)
self._weight_init()
self.X = None
self.y = None
def _weight_init(self):
'''
initialize weights of the single layer regression network. No bias term included.
:return: None; self.weights is filled based on method
- W1: The weight matrix of the linear layer of shape (num_features, hidden_size)
'''
np.random.seed(1024)
self.weights['W1'] = 0.001 * np.random.randn(self.input_size, self.num_classes)
self.gradients['W1'] = np.zeros((self.input_size, self.num_classes))
def forward(self, X, y, mode='train'):
'''
Compute loss and gradients using softmax with vectorization.
:param X: a batch of image (N, 28x28)
:param y: labels of images in the batch (N,)
:return:
loss: the loss associated with the batch
accuracy: the accuracy of the batch
'''
loss = None
gradient = None
accuracy = None
# 1) Implement the forward process and compute the Cross-Entropy loss
# 2) Compute the gradient with respect to the loss
self.X = np.array(X)
self.y = np.array(y)
X = self.X
y = self.y
N = X.shape[0]
z = X @ self.weights['W1']
y_hot = np.zeros((len(y), self.num_classes))
y_hot[np.arange(len(y)), y] = 1
y_hat = self.ReLU(z)
y_til = self.softmax(y_hat)
loss = self.cross_entropy_loss(y_til,y)
accuracy = self.compute_accuracy(y_til,y)
if mode != 'train':
return loss, accuracy
# 1) Implement the backward process:
# 1) Compute gradients of each weight and bias by chain rule
# 2) Store the gradients in self.gradients
self.gradients['W1'] = (1/N) * np.dot( X.T, self.ReLU_dev(z) * (y_til - y_hot))
return loss, accuracy | 0.883488 | 0.498901 |
import numpy as np
import pandas as pd
import pytest
from pydicom import dcmread
from dicom_csv.spatial import (
get_orientation_matrix,
get_image_position_patient,
get_slice_locations,
get_image_plane,
Plane,
_get_slices_deltas,
get_pixel_spacing,
get_image_size,
order_series
)
@pytest.fixture
def image(tests_folder):
df = pd.read_csv(tests_folder / 'spatial/mri_data.csv')
# TODO: add more series for diversity
SERIES = '1.2.840.113619.2.374.2807.4233243.16142.1527731842.74'
return df.query('SeriesInstanceUID == @SERIES')
@pytest.fixture
def series(tests_folder):
return [dcmread(tests_folder / 'spatial' / file.PathToFolder / file.FileName) for _, file in image.iterrows()]
def test_get_orientation_matrix(image):
om = get_orientation_matrix(image)
target = np.array([0.9882921127294, 0.03687270420588, 0.14805101688742,
-0.0437989943104, 0.99807987034582, 0.04379749431055]).reshape(2, 3)
assert om.shape == (3, 3)
assert np.allclose(om[:2, :], target, atol=1e-5)
assert np.allclose(om[0, :] @ om[1, :], 0, atol=1e-5)
def test_get_image_position_patient(image):
pos = get_image_position_patient(image)
assert pos.shape == (216, 3)
# TODO: add values, e.g. pos[0] check
def test_get_slice_locations(image):
test_slice_loc = image.SliceLocation.values
loc = get_slice_locations(image)
order_loc = np.argsort(loc)
order_test = np.argsort(test_slice_loc)
assert len(loc) == 216
assert np.allclose(order_loc, order_test)
def test_get_image_plane(image):
plane = get_image_plane(image)
assert plane == Plane.Axial
def test_get_slice_spacing(image):
spacings = _get_slices_deltas(image)
assert spacings.shape == (215,)
assert np.allclose(spacings.mean(), 0.8)
def test_get_pixel_spacing(image):
xy_spacings = get_pixel_spacing(image)
assert xy_spacings.shape == (2,)
assert np.allclose(xy_spacings, [0.4688, 0.4688])
def test_get_image_size(image):
rows, columns, slices = get_image_size(image)
assert (rows, columns, slices) == (512, 512, 216)
@pytest.mark.skip
def test_order_series(series):
series = order_series(series)
pass | tests/test_spatial.py | import numpy as np
import pandas as pd
import pytest
from pydicom import dcmread
from dicom_csv.spatial import (
get_orientation_matrix,
get_image_position_patient,
get_slice_locations,
get_image_plane,
Plane,
_get_slices_deltas,
get_pixel_spacing,
get_image_size,
order_series
)
@pytest.fixture
def image(tests_folder):
df = pd.read_csv(tests_folder / 'spatial/mri_data.csv')
# TODO: add more series for diversity
SERIES = '1.2.840.113619.2.374.2807.4233243.16142.1527731842.74'
return df.query('SeriesInstanceUID == @SERIES')
@pytest.fixture
def series(tests_folder):
return [dcmread(tests_folder / 'spatial' / file.PathToFolder / file.FileName) for _, file in image.iterrows()]
def test_get_orientation_matrix(image):
om = get_orientation_matrix(image)
target = np.array([0.9882921127294, 0.03687270420588, 0.14805101688742,
-0.0437989943104, 0.99807987034582, 0.04379749431055]).reshape(2, 3)
assert om.shape == (3, 3)
assert np.allclose(om[:2, :], target, atol=1e-5)
assert np.allclose(om[0, :] @ om[1, :], 0, atol=1e-5)
def test_get_image_position_patient(image):
pos = get_image_position_patient(image)
assert pos.shape == (216, 3)
# TODO: add values, e.g. pos[0] check
def test_get_slice_locations(image):
test_slice_loc = image.SliceLocation.values
loc = get_slice_locations(image)
order_loc = np.argsort(loc)
order_test = np.argsort(test_slice_loc)
assert len(loc) == 216
assert np.allclose(order_loc, order_test)
def test_get_image_plane(image):
plane = get_image_plane(image)
assert plane == Plane.Axial
def test_get_slice_spacing(image):
spacings = _get_slices_deltas(image)
assert spacings.shape == (215,)
assert np.allclose(spacings.mean(), 0.8)
def test_get_pixel_spacing(image):
xy_spacings = get_pixel_spacing(image)
assert xy_spacings.shape == (2,)
assert np.allclose(xy_spacings, [0.4688, 0.4688])
def test_get_image_size(image):
rows, columns, slices = get_image_size(image)
assert (rows, columns, slices) == (512, 512, 216)
@pytest.mark.skip
def test_order_series(series):
series = order_series(series)
pass | 0.275519 | 0.541348 |
from numpy import loadtxt, degrees, arcsin, arctan2, sort, unique, ones, zeros_like, array
from mpl_toolkits.basemap import Basemap
import reverse_geocoder as rg
import randomcolor
def domino(lol):
# Takes a list (length n) of lists (length 2)
# and returns a list of indices order,
# such that lol[order[i]] and lol[order[i+1]]
# have at least one element in common.
# If that is not possible, multiple
# domino chains will be created.
# This works in a greedy way.
n = len(lol)
order = [0] # Greedy
link = lol[0][-1]
links = [lol[0][0],lol[0][1]]
while len(order)<n:
for i in [j for j in range(n) if not j in order]:
if link in lol[i]: # They connect
order.append(i) # Save the id of the "stone"
link = lol[i][0] if not(lol[i][0]==link) else lol[i][1] # The new link is the other element
links.append(link)
break
return order,links[:-1]
def getpatches(color,quadrature):
xyz,neighbours,triangles = quadrature["xyz"], quadrature["neighbours"], quadrature["triangles"]
nq = len(color)
patches = []
for center in range(nq):
lol = [] # list of lists
for i in neighbours[center,:]:
if i>-1:
lol.append(list(sort(triangles[i,triangles[i,:] != center])))
order,links = domino(lol)
neighx = [xyz[j,0] for j in links]
neighy = [xyz[j,1] for j in links]
neighz = [xyz[j,2] for j in links]
# Get the actual hexagon that surrounds a center point
x = []
y = []
z = []
for i in range(len(order)):
x.append((xyz[center,0]+neighx[i]) / 2)
x.append((xyz[center,0]+neighx[i]+neighx[(i+1)%len(order)])/3)
y.append((xyz[center,1]+neighy[i]) / 2)
y.append((xyz[center,1]+neighy[i]+neighy[(i+1)%len(order)])/3)
z.append((xyz[center,2]+neighz[i]) / 2)
z.append((xyz[center,2]+neighz[i]+neighz[(i+1)%len(order)])/3)
verts = [list(zip(x,y,z))]
patches.append(verts[0])
return patches
def getquadrature(nq):
quadrature = {}
quadrature["nq"] = nq
quadrature["xyz"] = loadtxt(f"quadrature/{nq}/points.txt")
quadrature["weights"] = loadtxt(f"quadrature/{nq}/weights.txt")
quadrature["neighbours"] = loadtxt(f"quadrature/{nq}/neighbours.txt",dtype=int)-1 # julia starts at 1
quadrature["triangles"] = loadtxt(f"quadrature/{nq}/triangles.txt",dtype=int)-1 # julia starts at 1
# Also convert to latitute, longitude
quadrature["lat"] = degrees(arcsin(quadrature["xyz"][:,2]/1))
quadrature["lon"] = degrees(arctan2(quadrature["xyz"][:,1], quadrature["xyz"][:,0]))
# Compute connectivity between nodes
connection = -100*ones((quadrature["nq"],6),dtype=int)
for qp in range(quadrature["nq"]):
attachedtriangles = quadrature["neighbours"][qp]
attachedtriangles = attachedtriangles[attachedtriangles>-1] # drop
lol = []
for at in attachedtriangles:
tmp = quadrature["triangles"][at]
tmp = tmp[tmp != qp ]
lol.append(list(tmp))
_,x = domino(lol)
connection[qp,:len(x)] = x
quadrature["connection"] = connection
return quadrature
def get_land(quadrature):
bm = Basemap()
island = []
for i,(ypt, xpt) in enumerate(zip(quadrature["lat"],quadrature["lon"])):
land = (bm.is_land(xpt,ypt))
island.append(land)
return array(island)
def color_land(quadrature):
island = get_land(quadrature)
colors = ["tab:green" if land else "tab:blue" for land in island]
return colors
def color_country(quadrature):
# uses reverse_geocoder
results = rg.search([(la,lo) for la,lo in zip(quadrature["lat"],quadrature["lon"])]) # default mode = 2
countries = []
for i in range(len(results)):
c = results[i]["cc"]
countries.append(c)
nunique = len(unique(countries))
raco = randomcolor.RandomColor()
randomcolors = raco.generate(luminosity="dark", count=nunique) # options: https://github.com/kevinwuhoo/randomcolor-py
colordict = dict(zip(unique(countries),randomcolors))
colorland = color_land(quadrature) # so we can color the ocean also in "tab:blue"
colorcountries = [colordict[country] if colorland[i]!="tab:blue" else "tab:blue" for i,country in enumerate(countries) ]
return colorcountries
def applyupdate(quadrature,rule,states):
nextstate = zeros_like(states)
for i,(state, neighbours) in enumerate(zip(states,quadrature["connection"])):
idx = neighbours[neighbours>-1]
stateneighbours = states[idx]
nextstate[i] = rule(state,stateneighbours)
return nextstate | helpers.py | from numpy import loadtxt, degrees, arcsin, arctan2, sort, unique, ones, zeros_like, array
from mpl_toolkits.basemap import Basemap
import reverse_geocoder as rg
import randomcolor
def domino(lol):
# Takes a list (length n) of lists (length 2)
# and returns a list of indices order,
# such that lol[order[i]] and lol[order[i+1]]
# have at least one element in common.
# If that is not possible, multiple
# domino chains will be created.
# This works in a greedy way.
n = len(lol)
order = [0] # Greedy
link = lol[0][-1]
links = [lol[0][0],lol[0][1]]
while len(order)<n:
for i in [j for j in range(n) if not j in order]:
if link in lol[i]: # They connect
order.append(i) # Save the id of the "stone"
link = lol[i][0] if not(lol[i][0]==link) else lol[i][1] # The new link is the other element
links.append(link)
break
return order,links[:-1]
def getpatches(color,quadrature):
xyz,neighbours,triangles = quadrature["xyz"], quadrature["neighbours"], quadrature["triangles"]
nq = len(color)
patches = []
for center in range(nq):
lol = [] # list of lists
for i in neighbours[center,:]:
if i>-1:
lol.append(list(sort(triangles[i,triangles[i,:] != center])))
order,links = domino(lol)
neighx = [xyz[j,0] for j in links]
neighy = [xyz[j,1] for j in links]
neighz = [xyz[j,2] for j in links]
# Get the actual hexagon that surrounds a center point
x = []
y = []
z = []
for i in range(len(order)):
x.append((xyz[center,0]+neighx[i]) / 2)
x.append((xyz[center,0]+neighx[i]+neighx[(i+1)%len(order)])/3)
y.append((xyz[center,1]+neighy[i]) / 2)
y.append((xyz[center,1]+neighy[i]+neighy[(i+1)%len(order)])/3)
z.append((xyz[center,2]+neighz[i]) / 2)
z.append((xyz[center,2]+neighz[i]+neighz[(i+1)%len(order)])/3)
verts = [list(zip(x,y,z))]
patches.append(verts[0])
return patches
def getquadrature(nq):
quadrature = {}
quadrature["nq"] = nq
quadrature["xyz"] = loadtxt(f"quadrature/{nq}/points.txt")
quadrature["weights"] = loadtxt(f"quadrature/{nq}/weights.txt")
quadrature["neighbours"] = loadtxt(f"quadrature/{nq}/neighbours.txt",dtype=int)-1 # julia starts at 1
quadrature["triangles"] = loadtxt(f"quadrature/{nq}/triangles.txt",dtype=int)-1 # julia starts at 1
# Also convert to latitute, longitude
quadrature["lat"] = degrees(arcsin(quadrature["xyz"][:,2]/1))
quadrature["lon"] = degrees(arctan2(quadrature["xyz"][:,1], quadrature["xyz"][:,0]))
# Compute connectivity between nodes
connection = -100*ones((quadrature["nq"],6),dtype=int)
for qp in range(quadrature["nq"]):
attachedtriangles = quadrature["neighbours"][qp]
attachedtriangles = attachedtriangles[attachedtriangles>-1] # drop
lol = []
for at in attachedtriangles:
tmp = quadrature["triangles"][at]
tmp = tmp[tmp != qp ]
lol.append(list(tmp))
_,x = domino(lol)
connection[qp,:len(x)] = x
quadrature["connection"] = connection
return quadrature
def get_land(quadrature):
bm = Basemap()
island = []
for i,(ypt, xpt) in enumerate(zip(quadrature["lat"],quadrature["lon"])):
land = (bm.is_land(xpt,ypt))
island.append(land)
return array(island)
def color_land(quadrature):
island = get_land(quadrature)
colors = ["tab:green" if land else "tab:blue" for land in island]
return colors
def color_country(quadrature):
# uses reverse_geocoder
results = rg.search([(la,lo) for la,lo in zip(quadrature["lat"],quadrature["lon"])]) # default mode = 2
countries = []
for i in range(len(results)):
c = results[i]["cc"]
countries.append(c)
nunique = len(unique(countries))
raco = randomcolor.RandomColor()
randomcolors = raco.generate(luminosity="dark", count=nunique) # options: https://github.com/kevinwuhoo/randomcolor-py
colordict = dict(zip(unique(countries),randomcolors))
colorland = color_land(quadrature) # so we can color the ocean also in "tab:blue"
colorcountries = [colordict[country] if colorland[i]!="tab:blue" else "tab:blue" for i,country in enumerate(countries) ]
return colorcountries
def applyupdate(quadrature,rule,states):
nextstate = zeros_like(states)
for i,(state, neighbours) in enumerate(zip(states,quadrature["connection"])):
idx = neighbours[neighbours>-1]
stateneighbours = states[idx]
nextstate[i] = rule(state,stateneighbours)
return nextstate | 0.375134 | 0.447641 |
import json
import requests
# Constants for base profile URL's.
# Might be worth adding config for this,
# but as this is only used here and none
# of this is sensitive, no need for now
BASE_URL = 'https://hl7.org/fhir/'
BASE_FILE_TYPE = '.profile.json'
# Again, only need a cache here, slightly more cohesive and would be easy to move if needed
resource_cache = {}
# TODO. Could make this more flexible to allow for using cache as opposed to web
# Could use npm
# Naming convention seems to be much messier
# For simplicity leaving this for now
def get_base_component(element_operands, component, version):
base_component = check_base_definition(element_operands, component, version)
if base_component != {}:
return base_component
return check_defined_base_path(element_operands, component, version)
def check_base_definition(element_operands, component, version):
element_key = str(*element_operands.keys())
resource_type = element_key.split('.')[0]
base_definition = json.loads(get_definition(resource_type, version)) # TODO pull fhirVersion out of operands
return search_definition(base_definition, element_key, component)
def check_defined_base_path(element_operands, component, version):
element_base_path = get_element_base_path(element_operands)
if element_base_path:
resource_type = element_base_path['path'].split('.')[0]
base_element_definition = json.loads(get_definition(resource_type, version))
return search_definition(base_element_definition, element_base_path['path'], component)
return {}
def get_element_base_path(element_operands):
left_element = tuple(*element_operands.values())[0]
right_element = tuple(*element_operands.values())[1]
if 'base' in left_element and 'base' in right_element and \
left_element and right_element and \
(left_element['base'] != right_element['base']):
raise ValueError('Corresponding elements do not have the same base path definition\n\n' +
'Left element -->\n\n' + str(left_element) +
'Right element -->\n\n' + str(right_element))
if 'base' in left_element:
return left_element['base']
if 'base' in right_element:
return right_element['base']
return None
def search_definition(base_definition, element, component):
if not base_definition:
return {}
if 'snapshot' not in base_definition:
raise ValueError('Snapshot is missing from base definition.\n\nBase definition -->\n\n' +
str(base_definition))
if 'element' not in base_definition['snapshot']:
raise ValueError('No elements found in base definition.\n\nBase definition -->\n\n' +
str(base_definition))
for e in base_definition['snapshot']['element']:
if 'id' in e and e['id'].lower() == element.lower():
if component in e.keys():
return e[component]
return {}
def get_definition(resource_type, version):
if resource_type + version not in resource_cache:
return download_definition(resource_type, version)
return resource_cache[resource_type + version]
def download_definition(resource_type, version):
profile_url = get_profile_url(resource_type, version)
# Allow exceptions to be raised, i.e. connection failure etc...
response = requests.get(profile_url)
if response.ok:
resource_cache[resource_type + version] = response.content.decode('utf-8')
return resource_cache[resource_type + version]
else:
# requests will have raised an exception on a connection error, so this just
# stops attempts to download invalid types by storing an empty json object
resource_cache[resource_type + version] = '{}'
return resource_cache[resource_type + version]
def get_profile_url(resource_type, version):
if not resource_type or \
not version or \
not str(version)[0].isnumeric() or \
not isinstance(resource_type, str) or \
not isinstance(version, str):
raise ValueError('Unknown FHIR version and resourceType\nVersion: ' +
str(version) + ', resourceType: ' + str(resource_type))
version_map = {
0: 'DSTU1/',
1: 'DSTU2/',
3: 'STU3/',
4: 'R4/'
}
fhir_version = version_map.get(int(version[0]), None)
if not fhir_version:
raise ValueError('Unknown FHIR version: ' + version)
return BASE_URL + fhir_version + resource_type + BASE_FILE_TYPE | src/lib/base_definitions.py | import json
import requests
# Constants for base profile URL's.
# Might be worth adding config for this,
# but as this is only used here and none
# of this is sensitive, no need for now
BASE_URL = 'https://hl7.org/fhir/'
BASE_FILE_TYPE = '.profile.json'
# Again, only need a cache here, slightly more cohesive and would be easy to move if needed
resource_cache = {}
# TODO. Could make this more flexible to allow for using cache as opposed to web
# Could use npm
# Naming convention seems to be much messier
# For simplicity leaving this for now
def get_base_component(element_operands, component, version):
base_component = check_base_definition(element_operands, component, version)
if base_component != {}:
return base_component
return check_defined_base_path(element_operands, component, version)
def check_base_definition(element_operands, component, version):
element_key = str(*element_operands.keys())
resource_type = element_key.split('.')[0]
base_definition = json.loads(get_definition(resource_type, version)) # TODO pull fhirVersion out of operands
return search_definition(base_definition, element_key, component)
def check_defined_base_path(element_operands, component, version):
element_base_path = get_element_base_path(element_operands)
if element_base_path:
resource_type = element_base_path['path'].split('.')[0]
base_element_definition = json.loads(get_definition(resource_type, version))
return search_definition(base_element_definition, element_base_path['path'], component)
return {}
def get_element_base_path(element_operands):
left_element = tuple(*element_operands.values())[0]
right_element = tuple(*element_operands.values())[1]
if 'base' in left_element and 'base' in right_element and \
left_element and right_element and \
(left_element['base'] != right_element['base']):
raise ValueError('Corresponding elements do not have the same base path definition\n\n' +
'Left element -->\n\n' + str(left_element) +
'Right element -->\n\n' + str(right_element))
if 'base' in left_element:
return left_element['base']
if 'base' in right_element:
return right_element['base']
return None
def search_definition(base_definition, element, component):
if not base_definition:
return {}
if 'snapshot' not in base_definition:
raise ValueError('Snapshot is missing from base definition.\n\nBase definition -->\n\n' +
str(base_definition))
if 'element' not in base_definition['snapshot']:
raise ValueError('No elements found in base definition.\n\nBase definition -->\n\n' +
str(base_definition))
for e in base_definition['snapshot']['element']:
if 'id' in e and e['id'].lower() == element.lower():
if component in e.keys():
return e[component]
return {}
def get_definition(resource_type, version):
if resource_type + version not in resource_cache:
return download_definition(resource_type, version)
return resource_cache[resource_type + version]
def download_definition(resource_type, version):
profile_url = get_profile_url(resource_type, version)
# Allow exceptions to be raised, i.e. connection failure etc...
response = requests.get(profile_url)
if response.ok:
resource_cache[resource_type + version] = response.content.decode('utf-8')
return resource_cache[resource_type + version]
else:
# requests will have raised an exception on a connection error, so this just
# stops attempts to download invalid types by storing an empty json object
resource_cache[resource_type + version] = '{}'
return resource_cache[resource_type + version]
def get_profile_url(resource_type, version):
if not resource_type or \
not version or \
not str(version)[0].isnumeric() or \
not isinstance(resource_type, str) or \
not isinstance(version, str):
raise ValueError('Unknown FHIR version and resourceType\nVersion: ' +
str(version) + ', resourceType: ' + str(resource_type))
version_map = {
0: 'DSTU1/',
1: 'DSTU2/',
3: 'STU3/',
4: 'R4/'
}
fhir_version = version_map.get(int(version[0]), None)
if not fhir_version:
raise ValueError('Unknown FHIR version: ' + version)
return BASE_URL + fhir_version + resource_type + BASE_FILE_TYPE | 0.206334 | 0.121503 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('product', '0009_review_product_created_at'),
('account', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Basket',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Wishlist',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_list', models.ManyToManyField(blank=True, null=True, related_name='wishlist_of_products', to='product.ProductVersion')),
('user', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='wishlist_of_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('basket', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='order_of_basket', to='checkout.basket')),
('billing_address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.billingaddress')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_of_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='BasketItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('basket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='checkout.basket')),
('product_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.productversion')),
],
),
migrations.AddField(
model_name='basket',
name='product_list',
field=models.ManyToManyField(blank=True, null=True, related_name='basket_of_products', to='checkout.BasketItem'),
),
migrations.AddField(
model_name='basket',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='basket_of_user', to=settings.AUTH_USER_MODEL),
),
] | Project/checkout/migrations/0001_initial.py |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('product', '0009_review_product_created_at'),
('account', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Basket',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Wishlist',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_list', models.ManyToManyField(blank=True, null=True, related_name='wishlist_of_products', to='product.ProductVersion')),
('user', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='wishlist_of_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('basket', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='order_of_basket', to='checkout.basket')),
('billing_address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.billingaddress')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_of_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='BasketItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('basket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='checkout.basket')),
('product_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.productversion')),
],
),
migrations.AddField(
model_name='basket',
name='product_list',
field=models.ManyToManyField(blank=True, null=True, related_name='basket_of_products', to='checkout.BasketItem'),
),
migrations.AddField(
model_name='basket',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='basket_of_user', to=settings.AUTH_USER_MODEL),
),
] | 0.541894 | 0.149656 |
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import numpy as np
from hierarchical_transformer_memory.hierarchical_attention import htm_attention
def _build_queries_and_memory(query_length, num_memories, mem_chunk_size,
batch_size=2, embedding_size=12):
"""Builds dummy queries + memory contents for tests."""
queries = np.random.random([batch_size, query_length, embedding_size])
memory_contents = np.random.random(
[batch_size, num_memories, mem_chunk_size, embedding_size])
# summary key = average across chunk
memory_keys = np.mean(memory_contents, axis=2)
# to accumulate newest memories before writing
memory_accumulator = np.zeros_like(memory_contents[:, -1, :, :])
memory = htm_attention.HierarchicalMemory(
keys=memory_keys,
contents=memory_contents,
accumulator=memory_accumulator,
steps_since_last_write=np.zeros([batch_size,], dtype=np.int32))
return queries, memory
class HierarchicalAttentionTest(parameterized.TestCase):
@parameterized.parameters([
{
'query_length': 1,
'num_memories': 7,
'mem_chunk_size': 5,
'mem_k': 4,
},
{
'query_length': 9,
'num_memories': 7,
'mem_chunk_size': 5,
'mem_k': 4,
},
])
@hk.testing.transform_and_run
def test_output_shapes(self, query_length, num_memories, mem_chunk_size,
mem_k):
np.random.seed(0)
batch_size = 2
embedding_size = 12
num_heads = 3
queries, memory = _build_queries_and_memory(
query_length=query_length, num_memories=num_memories,
mem_chunk_size=mem_chunk_size, embedding_size=embedding_size)
hm_att = htm_attention.HierarchicalMemoryAttention(
feature_size=embedding_size,
k=mem_k,
num_heads=num_heads)
results = hm_att(queries, memory)
self.assertEqual(results.shape,
(batch_size, query_length, embedding_size))
self.assertTrue(np.all(np.isfinite(results)))
@hk.testing.transform_and_run
def test_masking(self):
np.random.seed(0)
batch_size = 2
embedding_size = 12
num_heads = 3
query_length = 5
num_memories = 7
mem_chunk_size = 6
mem_k = 4
queries, memory = _build_queries_and_memory(
query_length=query_length, num_memories=num_memories,
mem_chunk_size=mem_chunk_size, embedding_size=embedding_size)
hm_att = htm_attention.HierarchicalMemoryAttention(
feature_size=embedding_size,
k=mem_k,
num_heads=num_heads)
# get a random boolean mask
mask = np.random.binomial(
1, 0.5, [batch_size, query_length, num_memories]).astype(bool)
results = hm_att(queries, memory, hm_mask=mask)
self.assertEqual(results.shape,
(batch_size, query_length, embedding_size))
self.assertTrue(np.all(np.isfinite(results)))
if __name__ == '__main__':
absltest.main() | hierarchical_transformer_memory/hierarchical_attention/htm_attention_test.py | from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import numpy as np
from hierarchical_transformer_memory.hierarchical_attention import htm_attention
def _build_queries_and_memory(query_length, num_memories, mem_chunk_size,
batch_size=2, embedding_size=12):
"""Builds dummy queries + memory contents for tests."""
queries = np.random.random([batch_size, query_length, embedding_size])
memory_contents = np.random.random(
[batch_size, num_memories, mem_chunk_size, embedding_size])
# summary key = average across chunk
memory_keys = np.mean(memory_contents, axis=2)
# to accumulate newest memories before writing
memory_accumulator = np.zeros_like(memory_contents[:, -1, :, :])
memory = htm_attention.HierarchicalMemory(
keys=memory_keys,
contents=memory_contents,
accumulator=memory_accumulator,
steps_since_last_write=np.zeros([batch_size,], dtype=np.int32))
return queries, memory
class HierarchicalAttentionTest(parameterized.TestCase):
@parameterized.parameters([
{
'query_length': 1,
'num_memories': 7,
'mem_chunk_size': 5,
'mem_k': 4,
},
{
'query_length': 9,
'num_memories': 7,
'mem_chunk_size': 5,
'mem_k': 4,
},
])
@hk.testing.transform_and_run
def test_output_shapes(self, query_length, num_memories, mem_chunk_size,
mem_k):
np.random.seed(0)
batch_size = 2
embedding_size = 12
num_heads = 3
queries, memory = _build_queries_and_memory(
query_length=query_length, num_memories=num_memories,
mem_chunk_size=mem_chunk_size, embedding_size=embedding_size)
hm_att = htm_attention.HierarchicalMemoryAttention(
feature_size=embedding_size,
k=mem_k,
num_heads=num_heads)
results = hm_att(queries, memory)
self.assertEqual(results.shape,
(batch_size, query_length, embedding_size))
self.assertTrue(np.all(np.isfinite(results)))
@hk.testing.transform_and_run
def test_masking(self):
np.random.seed(0)
batch_size = 2
embedding_size = 12
num_heads = 3
query_length = 5
num_memories = 7
mem_chunk_size = 6
mem_k = 4
queries, memory = _build_queries_and_memory(
query_length=query_length, num_memories=num_memories,
mem_chunk_size=mem_chunk_size, embedding_size=embedding_size)
hm_att = htm_attention.HierarchicalMemoryAttention(
feature_size=embedding_size,
k=mem_k,
num_heads=num_heads)
# get a random boolean mask
mask = np.random.binomial(
1, 0.5, [batch_size, query_length, num_memories]).astype(bool)
results = hm_att(queries, memory, hm_mask=mask)
self.assertEqual(results.shape,
(batch_size, query_length, embedding_size))
self.assertTrue(np.all(np.isfinite(results)))
if __name__ == '__main__':
absltest.main() | 0.831554 | 0.514095 |
from PIL import Image, ImageFilter, ImageDraw, ImageFont # image manipulation
import os # For dir making
import secrets # For Handling FileExisting Error
import cv2
import numpy
class ImageHandler:
def __init__(self, _image_file:str) -> None:
self._image_file = _image_file
self._image_file_extension = _image_file[_image_file.rfind('.'):]
if not os.path.exists('./imagesfromimagehandler'):
os.makedirs("./imagesfromimagehandler")
def filter_image(self, option, size = (64, 64)):
""""""
image = Image.open(self._image_file)
options = ['blur', 'contour', 'detail', 'edge_enhance', 'emboss', 'edge_enhance_more', 'find_edges', 'sharpen', 'smooth', 'smooth_more', 'color_2_grayscale', 'color_2_HSV', 'resize']
if option in options:
if option == options[0]:
new_image = image.filter(ImageFilter.BLUR)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[0]}Image{token}{self._image_file_extension}')
elif option == options[1]:
new_image = image.filter(ImageFilter.CONTOUR)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[1]}Image{token}{self._image_file_extension}')
elif option == options[2]:
new_image= image.filter(ImageFilter.DETAIL)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[2]}Image{token}{self._image_file_extension}')
elif option == options[3]:
new_image = image.filter(ImageFilter.EDGE_ENHANCE)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[3]}Image{token}{self._image_file_extension}')
elif option == options[4]:
new_image = image.filter(ImageFilter.EMBOSS)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[4]}Image{token}{self._image_file_extension}')
elif option == options[5]:
new_image = image.filter(ImageFilter.EDGE_ENHANCE_MORE)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[5]}Image{token}{self._image_file_extension}')
elif option == options[6]:
new_image = image.filter(ImageFilter.FIND_EDGES)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[6]}Image{token}{self._image_file_extension}')
elif option == options[7]:
new_image = image.filter(ImageFilter.SHARPEN)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[7]}Image{token}{self._image_file_extension}')
elif option == options[8]:
new_image = image.filter(ImageFilter.SMOOTH)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[8]}Image{token}{self._image_file_extension}')
elif option == options[9]:
new_image = image.filter(ImageFilter.SMOOTH_MORE)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[9]}Image{token}{self._image_file_extension}')
elif option == options[10]:
numpy_image=numpy.array(image)
new_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR) # converting PIL object to cv2
new_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2GRAY) # converting to GRAYSCALE
new_image=Image.fromarray(new_image)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[9]}Image{token}{self._image_file_extension}')
elif option == options[11]:
numpy_image=numpy.array(image)
new_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR) # converting PIL image to cv2
new_image = cv2.cvtColor(numpy_image, cv2.COLOR_BGR2HSV) # converting to HSV
new_image=Image.fromarray(new_image)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[9]}Image{token}{self._image_file_extension}')
elif option == options[12]:
numpy_image=numpy.array(image)
new_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR)
new_image = cv2.reshape(new_image, size) # here the size is set to a default of 64, 64, x where x ==3 for RGB and 1 for GRAYSCALE
new_image=Image.fromarray(new_image)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[9]}Image{token}{self._image_file_extension}')
else:
return 'Not a valid Image option', options
else:
return 'Not a valid Image option', options
def draw_text(self, x, y, text: str, fontsize: int, rgb=(0, 0, 0), font="arial.ttf"):
font = ImageFont.truetype(font, fontsize)
image = Image.open(self._image_file)
draw = ImageDraw.Draw(image)
draw.text((x, y), text, rgb, font=font)
token = secrets.token_urlsafe(4)
image.save(f'./imagesfromimagehandler/drawedImage{token}{self._image_file_extension}') | src/EasyFileHandling/imagehandler.py | from PIL import Image, ImageFilter, ImageDraw, ImageFont # image manipulation
import os # For dir making
import secrets # For Handling FileExisting Error
import cv2
import numpy
class ImageHandler:
def __init__(self, _image_file:str) -> None:
self._image_file = _image_file
self._image_file_extension = _image_file[_image_file.rfind('.'):]
if not os.path.exists('./imagesfromimagehandler'):
os.makedirs("./imagesfromimagehandler")
def filter_image(self, option, size = (64, 64)):
""""""
image = Image.open(self._image_file)
options = ['blur', 'contour', 'detail', 'edge_enhance', 'emboss', 'edge_enhance_more', 'find_edges', 'sharpen', 'smooth', 'smooth_more', 'color_2_grayscale', 'color_2_HSV', 'resize']
if option in options:
if option == options[0]:
new_image = image.filter(ImageFilter.BLUR)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[0]}Image{token}{self._image_file_extension}')
elif option == options[1]:
new_image = image.filter(ImageFilter.CONTOUR)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[1]}Image{token}{self._image_file_extension}')
elif option == options[2]:
new_image= image.filter(ImageFilter.DETAIL)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[2]}Image{token}{self._image_file_extension}')
elif option == options[3]:
new_image = image.filter(ImageFilter.EDGE_ENHANCE)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[3]}Image{token}{self._image_file_extension}')
elif option == options[4]:
new_image = image.filter(ImageFilter.EMBOSS)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[4]}Image{token}{self._image_file_extension}')
elif option == options[5]:
new_image = image.filter(ImageFilter.EDGE_ENHANCE_MORE)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[5]}Image{token}{self._image_file_extension}')
elif option == options[6]:
new_image = image.filter(ImageFilter.FIND_EDGES)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[6]}Image{token}{self._image_file_extension}')
elif option == options[7]:
new_image = image.filter(ImageFilter.SHARPEN)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[7]}Image{token}{self._image_file_extension}')
elif option == options[8]:
new_image = image.filter(ImageFilter.SMOOTH)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[8]}Image{token}{self._image_file_extension}')
elif option == options[9]:
new_image = image.filter(ImageFilter.SMOOTH_MORE)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[9]}Image{token}{self._image_file_extension}')
elif option == options[10]:
numpy_image=numpy.array(image)
new_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR) # converting PIL object to cv2
new_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2GRAY) # converting to GRAYSCALE
new_image=Image.fromarray(new_image)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[9]}Image{token}{self._image_file_extension}')
elif option == options[11]:
numpy_image=numpy.array(image)
new_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR) # converting PIL image to cv2
new_image = cv2.cvtColor(numpy_image, cv2.COLOR_BGR2HSV) # converting to HSV
new_image=Image.fromarray(new_image)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[9]}Image{token}{self._image_file_extension}')
elif option == options[12]:
numpy_image=numpy.array(image)
new_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR)
new_image = cv2.reshape(new_image, size) # here the size is set to a default of 64, 64, x where x ==3 for RGB and 1 for GRAYSCALE
new_image=Image.fromarray(new_image)
token = secrets.token_urlsafe(4)
new_image.save(f'./imagesfromimagehandler/{options[9]}Image{token}{self._image_file_extension}')
else:
return 'Not a valid Image option', options
else:
return 'Not a valid Image option', options
def draw_text(self, x, y, text: str, fontsize: int, rgb=(0, 0, 0), font="arial.ttf"):
font = ImageFont.truetype(font, fontsize)
image = Image.open(self._image_file)
draw = ImageDraw.Draw(image)
draw.text((x, y), text, rgb, font=font)
token = secrets.token_urlsafe(4)
image.save(f'./imagesfromimagehandler/drawedImage{token}{self._image_file_extension}') | 0.383295 | 0.140071 |
import logging
import numpy as np
import hypothesis.strategies as hst
from hypothesis import HealthCheck, given, example, settings
from qcodes.dataset.measurements import Measurement
@given(n_points=hst.integers(min_value=1, max_value=100))
@example(n_points=5)
@settings(deadline=None, suppress_health_check=(HealthCheck.function_scoped_fixture,))
def test_datasaver_1d(experiment, DAC, DMM, caplog,
n_points):
meas = Measurement()
meas.register_parameter(DAC.ch1)
meas.register_parameter(DMM.v1, setpoints=(DAC.ch1,))
n_points_expected = 5
meas.set_shapes({DMM.v1.full_name: (n_points_expected,)})
with meas.run() as datasaver:
for set_v in np.linspace(0, 1, n_points):
DAC.ch1()
datasaver.add_result((DAC.ch1, set_v),
(DMM.v1, DMM.v1()))
ds = datasaver.dataset
caplog.clear()
data = ds.get_parameter_data()
for dataarray in data[DMM.v1.full_name].values():
assert dataarray.shape == (n_points,)
if n_points == n_points_expected:
assert len(caplog.record_tuples) == 0
elif n_points > n_points_expected:
assert len(caplog.record_tuples) == 2
exp_module = "qcodes.dataset.sqlite.queries"
exp_level = logging.WARNING
exp_msg = ("Tried to set data shape for {} in "
"dataset {} "
"from metadata when loading "
"but found inconsistent lengths {} and {}")
assert caplog.record_tuples[0] == (exp_module,
exp_level,
exp_msg.format(DMM.v1.full_name,
DMM.v1.full_name,
n_points,
n_points_expected))
assert caplog.record_tuples[1] == (exp_module,
exp_level,
exp_msg.format(DAC.ch1.full_name,
DMM.v1.full_name,
n_points,
n_points_expected))
@settings(deadline=None, suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(n_points_1=hst.integers(min_value=1, max_value=50),
n_points_2=hst.integers(min_value=1, max_value=50))
@example(n_points_1=5, n_points_2=10)
def test_datasaver_2d(experiment, DAC, DMM, caplog,
n_points_1, n_points_2):
meas = Measurement()
meas.register_parameter(DAC.ch1)
meas.register_parameter(DAC.ch2)
meas.register_parameter(DMM.v1, setpoints=(DAC.ch1,
DAC.ch2))
n_points_expected_1 = 5
n_points_expected_2 = 10
meas.set_shapes({DMM.v1.full_name: (n_points_expected_1,
n_points_expected_2,)})
with meas.run() as datasaver:
for set_v_1 in np.linspace(0, 1, n_points_1):
for set_v_2 in np.linspace(0, 1, n_points_2):
datasaver.add_result((DAC.ch1, set_v_1),
(DAC.ch2, set_v_2),
(DMM.v1, DMM.v1()))
ds = datasaver.dataset
caplog.clear()
data = ds.get_parameter_data()
if n_points_1*n_points_2 == n_points_expected_1*n_points_expected_2:
assert len(caplog.record_tuples) == 0
for dataarray in data[DMM.v1.full_name].values():
assert dataarray.shape == (n_points_expected_1, n_points_expected_2)
elif n_points_1*n_points_2 > n_points_expected_1*n_points_expected_2:
assert len(caplog.record_tuples) == 3
exp_module = "qcodes.dataset.sqlite.queries"
exp_level = logging.WARNING
exp_msg = ("Tried to set data shape for {} in "
"dataset {} "
"from metadata when loading "
"but found inconsistent lengths {} and {}")
assert caplog.record_tuples[0] == (
exp_module,
exp_level,
exp_msg.format(
DMM.v1.full_name,
DMM.v1.full_name,
n_points_1*n_points_2,
n_points_expected_1*n_points_expected_2
)
)
assert caplog.record_tuples[1] == (
exp_module,
exp_level,
exp_msg.format(
DAC.ch1.full_name,
DMM.v1.full_name,
n_points_1*n_points_2,
n_points_expected_1*n_points_expected_2)
)
assert caplog.record_tuples[2] == (
exp_module,
exp_level,
exp_msg.format(
DAC.ch2.full_name,
DMM.v1.full_name,
n_points_1*n_points_2,
n_points_expected_1*n_points_expected_2
)
) | qcodes/tests/dataset/measurement/test_shapes.py | import logging
import numpy as np
import hypothesis.strategies as hst
from hypothesis import HealthCheck, given, example, settings
from qcodes.dataset.measurements import Measurement
@given(n_points=hst.integers(min_value=1, max_value=100))
@example(n_points=5)
@settings(deadline=None, suppress_health_check=(HealthCheck.function_scoped_fixture,))
def test_datasaver_1d(experiment, DAC, DMM, caplog,
n_points):
meas = Measurement()
meas.register_parameter(DAC.ch1)
meas.register_parameter(DMM.v1, setpoints=(DAC.ch1,))
n_points_expected = 5
meas.set_shapes({DMM.v1.full_name: (n_points_expected,)})
with meas.run() as datasaver:
for set_v in np.linspace(0, 1, n_points):
DAC.ch1()
datasaver.add_result((DAC.ch1, set_v),
(DMM.v1, DMM.v1()))
ds = datasaver.dataset
caplog.clear()
data = ds.get_parameter_data()
for dataarray in data[DMM.v1.full_name].values():
assert dataarray.shape == (n_points,)
if n_points == n_points_expected:
assert len(caplog.record_tuples) == 0
elif n_points > n_points_expected:
assert len(caplog.record_tuples) == 2
exp_module = "qcodes.dataset.sqlite.queries"
exp_level = logging.WARNING
exp_msg = ("Tried to set data shape for {} in "
"dataset {} "
"from metadata when loading "
"but found inconsistent lengths {} and {}")
assert caplog.record_tuples[0] == (exp_module,
exp_level,
exp_msg.format(DMM.v1.full_name,
DMM.v1.full_name,
n_points,
n_points_expected))
assert caplog.record_tuples[1] == (exp_module,
exp_level,
exp_msg.format(DAC.ch1.full_name,
DMM.v1.full_name,
n_points,
n_points_expected))
@settings(deadline=None, suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(n_points_1=hst.integers(min_value=1, max_value=50),
n_points_2=hst.integers(min_value=1, max_value=50))
@example(n_points_1=5, n_points_2=10)
def test_datasaver_2d(experiment, DAC, DMM, caplog,
n_points_1, n_points_2):
meas = Measurement()
meas.register_parameter(DAC.ch1)
meas.register_parameter(DAC.ch2)
meas.register_parameter(DMM.v1, setpoints=(DAC.ch1,
DAC.ch2))
n_points_expected_1 = 5
n_points_expected_2 = 10
meas.set_shapes({DMM.v1.full_name: (n_points_expected_1,
n_points_expected_2,)})
with meas.run() as datasaver:
for set_v_1 in np.linspace(0, 1, n_points_1):
for set_v_2 in np.linspace(0, 1, n_points_2):
datasaver.add_result((DAC.ch1, set_v_1),
(DAC.ch2, set_v_2),
(DMM.v1, DMM.v1()))
ds = datasaver.dataset
caplog.clear()
data = ds.get_parameter_data()
if n_points_1*n_points_2 == n_points_expected_1*n_points_expected_2:
assert len(caplog.record_tuples) == 0
for dataarray in data[DMM.v1.full_name].values():
assert dataarray.shape == (n_points_expected_1, n_points_expected_2)
elif n_points_1*n_points_2 > n_points_expected_1*n_points_expected_2:
assert len(caplog.record_tuples) == 3
exp_module = "qcodes.dataset.sqlite.queries"
exp_level = logging.WARNING
exp_msg = ("Tried to set data shape for {} in "
"dataset {} "
"from metadata when loading "
"but found inconsistent lengths {} and {}")
assert caplog.record_tuples[0] == (
exp_module,
exp_level,
exp_msg.format(
DMM.v1.full_name,
DMM.v1.full_name,
n_points_1*n_points_2,
n_points_expected_1*n_points_expected_2
)
)
assert caplog.record_tuples[1] == (
exp_module,
exp_level,
exp_msg.format(
DAC.ch1.full_name,
DMM.v1.full_name,
n_points_1*n_points_2,
n_points_expected_1*n_points_expected_2)
)
assert caplog.record_tuples[2] == (
exp_module,
exp_level,
exp_msg.format(
DAC.ch2.full_name,
DMM.v1.full_name,
n_points_1*n_points_2,
n_points_expected_1*n_points_expected_2
)
) | 0.620737 | 0.478163 |
import copy
import tempfile
from pathlib import Path
from typing import Optional, List, Tuple, Set, Dict
import numpy as np
from joblib import Memory
from sklearn.base import BaseEstimator, TransformerMixin
from python.handwritten_baseline.pipeline.data.base import Dataset
from python.handwritten_baseline.pipeline.model.feature_extr import FEATURE_EXTRACTOR_FEATURE_NAME_SEPARATOR
from python.util.util import get_dict_hash
class FeatureExtractorMixin(BaseEstimator, TransformerMixin):
"""
Abstract class for custom mention pair features.
See https://scikit-learn.org/0.19/auto_examples/hetero_feature_union.html#sphx-glr-auto-examples-hetero-feature-union-py
"""
def __init__(self,
name: str,
use_cache: bool,
features_to_select: Optional[List[str]]):
"""
:param name: name of this feature extractor
:param use_cache: enable caching for transform() calls
:param features_to_select: The names of features to return in transform() -> these should not be prefixed with
the name of the feature extractor! If None, all features will be returned.
"""
self.name = name
self.use_cache = use_cache
self.features_to_select = features_to_select
@property
def dtype(self):
return np.dtype("float32")
@staticmethod
def from_np_array_back_to_list_of_tuples(pairs: np.array) -> List[Tuple[Tuple, Tuple]]:
"""
Convert pairs of mention identifiers from a numpy array back into the list of tuples of tuples format we have
been using for features all the time. This method makes strong assumptions over the input (and thereby the
whole dataset) format, which is good. If it leads to a crash, we're in trouble.
:param pairs:
:return:
"""
return [((pair[0], int(pair[1])), (pair[2], int(pair[3]))) for pair in pairs]
def fit(self, X, y=None):
dataset, pairs, labels, unique_mentions = X
self._fit(dataset, FeatureExtractorMixin.from_np_array_back_to_list_of_tuples(pairs), unique_mentions)
return self
def _fit(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):
pass
def transform(self, X: Tuple):
dataset, pairs, labels, unique_mentions = X
if self.use_cache:
# We want to cache feature transformation outputs similar to what is asked for / proposed here:
# (1) https://mail.python.org/pipermail/scikit-learn/2017-August/001828.html
# (2) https://gist.github.com/jnothman/019d594d197c98a3d6192fa0cb19c850
# We cannot implement the caching 1:1 as in the github gist because our feature extractors have constructor
# parameters which change the output of transform(), i.e. we want one cache for each set of parameters. To
# do this conveniently, we take the __dict__ of a feature extractor, remove irrelevant entries and hash the
# result. Irrelevant entries are the features to select (read-only modification) and any data-dependent
# attributes ending with an underscore (see https://scikit-learn.org/stable/developers/develop.html#estimated-attributes)
attrs = copy.deepcopy(self.__dict__)
attrs = {k:v for k,v in attrs.items() if not k.endswith("_") and not k in ["name", "features_to_select"]}
cache_key = get_dict_hash(attrs)
cache_location = Path(tempfile.gettempdir()) / f"feature_{self.name}_{cache_key}"
memory = Memory(cache_location, verbose=0)
feature_matrix = memory.cache(self._transform)(dataset, FeatureExtractorMixin.from_np_array_back_to_list_of_tuples(pairs), unique_mentions)
else:
feature_matrix = self._transform(dataset, FeatureExtractorMixin.from_np_array_back_to_list_of_tuples(pairs), unique_mentions)
# filter feature matrix according to feature selection
if self.features_to_select:
all_feature_names = self._get_plain_names_of_all_features()
# sanity check: we can only select what we can extract
for fname in self.features_to_select:
if not fname in all_feature_names:
raise ValueError("Cannot select unknown feature name: " + fname)
mask = np.array([fname in self.features_to_select for fname in all_feature_names])
filtered_feature_matrix = feature_matrix[:, mask]
return filtered_feature_matrix
else:
return feature_matrix
def _transform(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):
raise NotImplementedError
def get_feature_names(self) -> List[str]:
"""
Returns the names of all features this feature extractor will extract (== not all features, only the ones
specified in the constructor), prefixed with the name of this feature.
extractor.
:return:
"""
feature_names = self.features_to_select if self.features_to_select is not None else self._get_plain_names_of_all_features()
assert not any(FEATURE_EXTRACTOR_FEATURE_NAME_SEPARATOR in fname for fname in feature_names)
feature_names_with_extractor_prefix = [self.name + FEATURE_EXTRACTOR_FEATURE_NAME_SEPARATOR + fname for fname in
feature_names]
return feature_names_with_extractor_prefix
def _get_plain_names_of_all_features(self) -> List[str]:
"""
Returns the names of all features this feature extractor can extract.
:return:
"""
raise NotImplementedError
@classmethod
def from_params(cls, config: Dict):
raise NotImplementedError | python/handwritten_baseline/pipeline/model/feature_extr/base_mixin.py | import copy
import tempfile
from pathlib import Path
from typing import Optional, List, Tuple, Set, Dict
import numpy as np
from joblib import Memory
from sklearn.base import BaseEstimator, TransformerMixin
from python.handwritten_baseline.pipeline.data.base import Dataset
from python.handwritten_baseline.pipeline.model.feature_extr import FEATURE_EXTRACTOR_FEATURE_NAME_SEPARATOR
from python.util.util import get_dict_hash
class FeatureExtractorMixin(BaseEstimator, TransformerMixin):
"""
Abstract class for custom mention pair features.
See https://scikit-learn.org/0.19/auto_examples/hetero_feature_union.html#sphx-glr-auto-examples-hetero-feature-union-py
"""
def __init__(self,
name: str,
use_cache: bool,
features_to_select: Optional[List[str]]):
"""
:param name: name of this feature extractor
:param use_cache: enable caching for transform() calls
:param features_to_select: The names of features to return in transform() -> these should not be prefixed with
the name of the feature extractor! If None, all features will be returned.
"""
self.name = name
self.use_cache = use_cache
self.features_to_select = features_to_select
@property
def dtype(self):
return np.dtype("float32")
@staticmethod
def from_np_array_back_to_list_of_tuples(pairs: np.array) -> List[Tuple[Tuple, Tuple]]:
"""
Convert pairs of mention identifiers from a numpy array back into the list of tuples of tuples format we have
been using for features all the time. This method makes strong assumptions over the input (and thereby the
whole dataset) format, which is good. If it leads to a crash, we're in trouble.
:param pairs:
:return:
"""
return [((pair[0], int(pair[1])), (pair[2], int(pair[3]))) for pair in pairs]
def fit(self, X, y=None):
dataset, pairs, labels, unique_mentions = X
self._fit(dataset, FeatureExtractorMixin.from_np_array_back_to_list_of_tuples(pairs), unique_mentions)
return self
def _fit(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):
pass
def transform(self, X: Tuple):
dataset, pairs, labels, unique_mentions = X
if self.use_cache:
# We want to cache feature transformation outputs similar to what is asked for / proposed here:
# (1) https://mail.python.org/pipermail/scikit-learn/2017-August/001828.html
# (2) https://gist.github.com/jnothman/019d594d197c98a3d6192fa0cb19c850
# We cannot implement the caching 1:1 as in the github gist because our feature extractors have constructor
# parameters which change the output of transform(), i.e. we want one cache for each set of parameters. To
# do this conveniently, we take the __dict__ of a feature extractor, remove irrelevant entries and hash the
# result. Irrelevant entries are the features to select (read-only modification) and any data-dependent
# attributes ending with an underscore (see https://scikit-learn.org/stable/developers/develop.html#estimated-attributes)
attrs = copy.deepcopy(self.__dict__)
attrs = {k:v for k,v in attrs.items() if not k.endswith("_") and not k in ["name", "features_to_select"]}
cache_key = get_dict_hash(attrs)
cache_location = Path(tempfile.gettempdir()) / f"feature_{self.name}_{cache_key}"
memory = Memory(cache_location, verbose=0)
feature_matrix = memory.cache(self._transform)(dataset, FeatureExtractorMixin.from_np_array_back_to_list_of_tuples(pairs), unique_mentions)
else:
feature_matrix = self._transform(dataset, FeatureExtractorMixin.from_np_array_back_to_list_of_tuples(pairs), unique_mentions)
# filter feature matrix according to feature selection
if self.features_to_select:
all_feature_names = self._get_plain_names_of_all_features()
# sanity check: we can only select what we can extract
for fname in self.features_to_select:
if not fname in all_feature_names:
raise ValueError("Cannot select unknown feature name: " + fname)
mask = np.array([fname in self.features_to_select for fname in all_feature_names])
filtered_feature_matrix = feature_matrix[:, mask]
return filtered_feature_matrix
else:
return feature_matrix
def _transform(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):
raise NotImplementedError
def get_feature_names(self) -> List[str]:
"""
Returns the names of all features this feature extractor will extract (== not all features, only the ones
specified in the constructor), prefixed with the name of this feature.
extractor.
:return:
"""
feature_names = self.features_to_select if self.features_to_select is not None else self._get_plain_names_of_all_features()
assert not any(FEATURE_EXTRACTOR_FEATURE_NAME_SEPARATOR in fname for fname in feature_names)
feature_names_with_extractor_prefix = [self.name + FEATURE_EXTRACTOR_FEATURE_NAME_SEPARATOR + fname for fname in
feature_names]
return feature_names_with_extractor_prefix
def _get_plain_names_of_all_features(self) -> List[str]:
"""
Returns the names of all features this feature extractor can extract.
:return:
"""
raise NotImplementedError
@classmethod
def from_params(cls, config: Dict):
raise NotImplementedError | 0.827061 | 0.527864 |
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Dict, List
from nuplan.planning.metrics.metric_result import MetricStatistics
@dataclass
class MetricFileKey:
metric_name: str
scenario_name: str
scenario_type: str
planner_name: str
def serialize(self) -> Dict[str, str]:
""" Serialization of metric result key. """
return {'metric_name': self.metric_name,
'scenario_name': self.scenario_name,
'scenario_type': self.scenario_type,
'planner_name': self.planner_name}
@classmethod
def deserialize(cls, data: Dict[str, str]) -> MetricFileKey:
""" Deserialization of .
:param data: A dictionary of data,
:return A Statistic data class.
"""
return MetricFileKey(metric_name=data['metric_name'],
scenario_name=data['scenario_name'],
scenario_type=data['scenario_type'],
planner_name=data['planner_name'])
@dataclass
class MetricFile:
""" Metric storage result. """
key: MetricFileKey # Metric file key
# {metric statistics name: # a list of metric statistics}
metric_statistics: Dict[str, List[MetricStatistics]] = field(default_factory=dict)
def serialize(self) -> Dict[str, Any]:
""" Serialization of metric result key. """
return {
'key': self.key.serialize(),
'metric_statistics': {statistic_name: [metric_statistic.serialize() for metric_statistic
in metric_statistics] for statistic_name, metric_statistics
in self.metric_statistics.items()}
}
@classmethod
def deserialize(cls, data: Dict[str, Any]) -> MetricFile:
"""
Deserialization of metric storage result.
:param data: A dictionary of data,
:return A Statistic data class.
"""
metric_statistics = {
statistic_name: [MetricStatistics.deserialize(statistic) for statistic in statistics]
for statistic_name, statistics in data['metric_statistics'].items()
}
metric_file_key = MetricFileKey.deserialize(data['key'])
return MetricFile(key=metric_file_key,
metric_statistics=metric_statistics) | nuplan/planning/metrics/metric_file.py | from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Dict, List
from nuplan.planning.metrics.metric_result import MetricStatistics
@dataclass
class MetricFileKey:
metric_name: str
scenario_name: str
scenario_type: str
planner_name: str
def serialize(self) -> Dict[str, str]:
""" Serialization of metric result key. """
return {'metric_name': self.metric_name,
'scenario_name': self.scenario_name,
'scenario_type': self.scenario_type,
'planner_name': self.planner_name}
@classmethod
def deserialize(cls, data: Dict[str, str]) -> MetricFileKey:
""" Deserialization of .
:param data: A dictionary of data,
:return A Statistic data class.
"""
return MetricFileKey(metric_name=data['metric_name'],
scenario_name=data['scenario_name'],
scenario_type=data['scenario_type'],
planner_name=data['planner_name'])
@dataclass
class MetricFile:
""" Metric storage result. """
key: MetricFileKey # Metric file key
# {metric statistics name: # a list of metric statistics}
metric_statistics: Dict[str, List[MetricStatistics]] = field(default_factory=dict)
def serialize(self) -> Dict[str, Any]:
""" Serialization of metric result key. """
return {
'key': self.key.serialize(),
'metric_statistics': {statistic_name: [metric_statistic.serialize() for metric_statistic
in metric_statistics] for statistic_name, metric_statistics
in self.metric_statistics.items()}
}
@classmethod
def deserialize(cls, data: Dict[str, Any]) -> MetricFile:
"""
Deserialization of metric storage result.
:param data: A dictionary of data,
:return A Statistic data class.
"""
metric_statistics = {
statistic_name: [MetricStatistics.deserialize(statistic) for statistic in statistics]
for statistic_name, statistics in data['metric_statistics'].items()
}
metric_file_key = MetricFileKey.deserialize(data['key'])
return MetricFile(key=metric_file_key,
metric_statistics=metric_statistics) | 0.93196 | 0.231256 |
import glob
import json
import os
from setuptools import find_packages
from setuptools import setup
# python3 setup.py register -r pypitest
# UNIX:
# rm -rf ./dist
# python3 setup.py sdist bdist_wheel
# twine upload dist/measure*
# python3 conda-recipe/conda-builder.py
# WINDOWS:
# rmdir dist /s /q
# python setup.py sdist bdist_wheel
# twine upload dist/measure*
# python conda-recipe\conda-builder.py
my_directory = os.path.realpath(os.path.dirname(__file__))
settings_path = os.path.join(my_directory, 'measurement_stats', 'settings.json')
with open(settings_path, 'r+') as f:
settings = json.load(f)
def read_me():
with open('README.rst') as f:
return f.read()
def populate_extra_files():
"""
Creates a list of non-python data files to include in package distribution
"""
out = ['measurement_stats/settings.json']
for entry in glob.iglob('measurement_stats/resources/**/*', recursive=True):
out.append(entry)
return out
setup(
name='measurement_stats',
version=settings['version'],
description=(
'Measurement statistics with uncertainties and error propagation'
),
long_description=read_me(),
url='https://github.com/sernst/Measurement_Statistics',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
package_data={'': populate_extra_files()},
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics'
],
install_requires=[
'pandas',
'numpy',
'six',
'scipy'
],
test_suite='nose.collector',
tests_require=['pytest', 'pytest-cover'],
keywords='measurements statistics uncertainty error propagation',
) | setup.py | import glob
import json
import os
from setuptools import find_packages
from setuptools import setup
# python3 setup.py register -r pypitest
# UNIX:
# rm -rf ./dist
# python3 setup.py sdist bdist_wheel
# twine upload dist/measure*
# python3 conda-recipe/conda-builder.py
# WINDOWS:
# rmdir dist /s /q
# python setup.py sdist bdist_wheel
# twine upload dist/measure*
# python conda-recipe\conda-builder.py
my_directory = os.path.realpath(os.path.dirname(__file__))
settings_path = os.path.join(my_directory, 'measurement_stats', 'settings.json')
with open(settings_path, 'r+') as f:
settings = json.load(f)
def read_me():
with open('README.rst') as f:
return f.read()
def populate_extra_files():
"""
Creates a list of non-python data files to include in package distribution
"""
out = ['measurement_stats/settings.json']
for entry in glob.iglob('measurement_stats/resources/**/*', recursive=True):
out.append(entry)
return out
setup(
name='measurement_stats',
version=settings['version'],
description=(
'Measurement statistics with uncertainties and error propagation'
),
long_description=read_me(),
url='https://github.com/sernst/Measurement_Statistics',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
package_data={'': populate_extra_files()},
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics'
],
install_requires=[
'pandas',
'numpy',
'six',
'scipy'
],
test_suite='nose.collector',
tests_require=['pytest', 'pytest-cover'],
keywords='measurements statistics uncertainty error propagation',
) | 0.438304 | 0.122025 |
from django.conf import settings
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import PasswordResetForm
from allauth.account.forms import EmailAwarePasswordResetTokenGenerator
from allauth.account.models import EmailAddress
from allauth.account.utils import user_pk_to_url_str
from rest_framework import serializers
from .models import Account
default_token_generator = EmailAwarePasswordResetTokenGenerator()
class AccountSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=False)
confirm_password = serializers.CharField(write_only=True, required=False)
verified_email = serializers.BooleanField(write_only=True, required=False)
captcha_token = serializers.CharField(required=False)
avatar = serializers.SerializerMethodField()
class Meta:
model = Account
fields = ('id', 'email', 'username', 'created_at', 'updated_at',
'name', 'tagline', 'avatar', 'password',
'confirm_password', 'verified_email', 'slug', 'is_staff', 'captcha_token')
read_only_fields = ('created_at', 'updated_at', 'avatar', 'slug', 'is_staff',)
extra_kwargs = {
'password': {'<PASSWORD>': True},
'confirm_password': {'<PASSWORD>': True},
'verified_email': {'write_only': True},
}
def to_representation(self, obj):
data = super(AccountSerializer, self).to_representation(obj)
return data
def get_avatar(self, obj):
return {
'tiny': obj.get_gravatar_tiny_url(),
'thumbnail': obj.get_gravatar_thumbnail_url(),
'medium': obj.get_gravatar_medium_url(),
}
def validate_username(self, username):
# Check that the username does not have a space in it
if ' ' in username:
raise serializers.ValidationError("Username cannot have spaces")
return username
class AccountReadOnlyLightSerializer(serializers.ModelSerializer):
avatar = serializers.SerializerMethodField()
class Meta:
model = Account
fields = ('email', 'username', 'slug', 'name', 'tagline', 'avatar',)
read_only_fields = ('email', 'username', 'slug', 'name', 'tagline', 'avatar',)
def get_avatar(self, obj):
return {
'tiny': obj.get_gravatar_tiny_url(),
'thumbnail': obj.get_gravatar_thumbnail_url(),
}
class LoginCustomSerializer(serializers.Serializer):
email = serializers.EmailField(max_length=200)
password = serializers.CharField(max_length=200)
class PasswordCustomSerializer(serializers.Serializer):
password = serializers.CharField(max_length=200)
class PasswordResetSerializer(serializers.Serializer):
"""
Serializer for requesting a password reset e-mail.
"""
email = serializers.EmailField()
password_reset_form_class = PasswordResetForm
domain = getattr(settings, 'DOMAIN_BASE_URL')
def validate_email(self, value):
# Create PasswordResetForm with the serializer
self.reset_form = self.password_reset_form_class(data=self.initial_data)
if not self.reset_form.is_valid():
raise serializers.ValidationError(self.reset_form.errors)
return value
def save(self):
request = self.context.get('request')
# Set some values to trigger the send_email method.
opts = {
'from_email': getattr(settings, 'DEFAULT_FROM_EMAIL'),
'request': request
}
user = Account.objects.get(email=self.reset_form.cleaned_data['email'])
self.reset_form.save(
domain_override=getattr(settings, 'DOMAIN_BASE_URL'),
html_email_template_name='registration/password_reset_email_html.html',
extra_email_context={
'uidb36': user_pk_to_url_str(user),
'key': default_token_generator.make_token(user),
'site_name': getattr(settings, 'SITE_NAME'),
'site_domain': getattr(settings, 'DOMAIN_NAME'),
},
**opts
) | server/apps/authentication/serializers.py | from django.conf import settings
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import PasswordResetForm
from allauth.account.forms import EmailAwarePasswordResetTokenGenerator
from allauth.account.models import EmailAddress
from allauth.account.utils import user_pk_to_url_str
from rest_framework import serializers
from .models import Account
default_token_generator = EmailAwarePasswordResetTokenGenerator()
class AccountSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=False)
confirm_password = serializers.CharField(write_only=True, required=False)
verified_email = serializers.BooleanField(write_only=True, required=False)
captcha_token = serializers.CharField(required=False)
avatar = serializers.SerializerMethodField()
class Meta:
model = Account
fields = ('id', 'email', 'username', 'created_at', 'updated_at',
'name', 'tagline', 'avatar', 'password',
'confirm_password', 'verified_email', 'slug', 'is_staff', 'captcha_token')
read_only_fields = ('created_at', 'updated_at', 'avatar', 'slug', 'is_staff',)
extra_kwargs = {
'password': {'<PASSWORD>': True},
'confirm_password': {'<PASSWORD>': True},
'verified_email': {'write_only': True},
}
def to_representation(self, obj):
data = super(AccountSerializer, self).to_representation(obj)
return data
def get_avatar(self, obj):
return {
'tiny': obj.get_gravatar_tiny_url(),
'thumbnail': obj.get_gravatar_thumbnail_url(),
'medium': obj.get_gravatar_medium_url(),
}
def validate_username(self, username):
# Check that the username does not have a space in it
if ' ' in username:
raise serializers.ValidationError("Username cannot have spaces")
return username
class AccountReadOnlyLightSerializer(serializers.ModelSerializer):
avatar = serializers.SerializerMethodField()
class Meta:
model = Account
fields = ('email', 'username', 'slug', 'name', 'tagline', 'avatar',)
read_only_fields = ('email', 'username', 'slug', 'name', 'tagline', 'avatar',)
def get_avatar(self, obj):
return {
'tiny': obj.get_gravatar_tiny_url(),
'thumbnail': obj.get_gravatar_thumbnail_url(),
}
class LoginCustomSerializer(serializers.Serializer):
email = serializers.EmailField(max_length=200)
password = serializers.CharField(max_length=200)
class PasswordCustomSerializer(serializers.Serializer):
password = serializers.CharField(max_length=200)
class PasswordResetSerializer(serializers.Serializer):
"""
Serializer for requesting a password reset e-mail.
"""
email = serializers.EmailField()
password_reset_form_class = PasswordResetForm
domain = getattr(settings, 'DOMAIN_BASE_URL')
def validate_email(self, value):
# Create PasswordResetForm with the serializer
self.reset_form = self.password_reset_form_class(data=self.initial_data)
if not self.reset_form.is_valid():
raise serializers.ValidationError(self.reset_form.errors)
return value
def save(self):
request = self.context.get('request')
# Set some values to trigger the send_email method.
opts = {
'from_email': getattr(settings, 'DEFAULT_FROM_EMAIL'),
'request': request
}
user = Account.objects.get(email=self.reset_form.cleaned_data['email'])
self.reset_form.save(
domain_override=getattr(settings, 'DOMAIN_BASE_URL'),
html_email_template_name='registration/password_reset_email_html.html',
extra_email_context={
'uidb36': user_pk_to_url_str(user),
'key': default_token_generator.make_token(user),
'site_name': getattr(settings, 'SITE_NAME'),
'site_domain': getattr(settings, 'DOMAIN_NAME'),
},
**opts
) | 0.60964 | 0.099383 |
from db_works import db_connect, db_tables
import datetime
def get_settings(interval_param_):
db_schema_name, db_table_name, db_settings_table_name = db_tables()
cursor, cnxn = db_connect()
# interval parameter: current - API data; daily_hist - data from daily files; monthly_hist - data from monthly files
if interval_param_ == "current":
cursor.execute(
"SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, "
"current_range_to_overwrite, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp "
"FROM " + db_schema_name + "." + db_settings_table_name + " WHERE current_update_from_api = 1 and "
"download_setting_status_id = 0 and "
"daily_hist_complete = 1 AND "
"monthly_hist_complete = 1 AND "
"coalesce(next_download_ux_timestamp, 0) <= "
+ str(int(datetime.datetime.utcnow().timestamp())) + " order by next_download_ux_timestamp asc limit 1")
elif interval_param_ == "daily_hist":
cursor.execute("SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, "
"current_range_to_overwrite, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp "
"FROM " + db_schema_name + "." + db_settings_table_name + " WHERE daily_update_from_files = 1 and "
"download_setting_status_id = 0 and "
"daily_hist_complete = 0 AND "
"monthly_hist_complete = 1 AND "
"coalesce(start_hist_download_ux_timestamp, 0) <= "
+ str(int(datetime.datetime.utcnow().timestamp())) + " order by start_hist_download_ux_timestamp asc limit 1")
elif interval_param_ == "monthly_hist":
cursor.execute("SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, "
"current_range_to_overwrite, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp "
"FROM " + db_schema_name + "." + db_settings_table_name + " WHERE monthly_update_from_files = 1 and "
"download_setting_status_id = 0 and "
"monthly_hist_complete = 0 AND "
"coalesce(start_hist_download_ux_timestamp, 0) <= "
+ str(int(datetime.datetime.utcnow().timestamp())) + " order by start_hist_download_ux_timestamp asc limit 1")
else:
exit()
download_setting = cursor.fetchall()
if len(download_setting) > 0:
download_settings_id = download_setting[0][0]
market = download_setting[0][1]
tick_interval = download_setting[0][2]
data_granulation = download_setting[0][3]
stock_type = download_setting[0][4]
stock_exchange = download_setting[0][5]
range_to_download = download_setting[0][6]
download_api_interval_sec = download_setting[0][7]
daily_update_from_files = download_setting[0][8]
monthly_update_from_files = download_setting[0][9]
start_hist_download_ux_timestamp = download_setting[0][10]
else:
print("no data to download")
exit()
# block current setting changing its status
cursor.execute("UPDATE " + db_schema_name + "." + db_settings_table_name + " SET download_setting_status_id = %s where download_settings_id = %s", (1, download_settings_id))
cnxn.commit()
print("settings blocked")
return download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, range_to_download, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp
print() | stock_dwh_functions.py |
from db_works import db_connect, db_tables
import datetime
def get_settings(interval_param_):
db_schema_name, db_table_name, db_settings_table_name = db_tables()
cursor, cnxn = db_connect()
# interval parameter: current - API data; daily_hist - data from daily files; monthly_hist - data from monthly files
if interval_param_ == "current":
cursor.execute(
"SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, "
"current_range_to_overwrite, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp "
"FROM " + db_schema_name + "." + db_settings_table_name + " WHERE current_update_from_api = 1 and "
"download_setting_status_id = 0 and "
"daily_hist_complete = 1 AND "
"monthly_hist_complete = 1 AND "
"coalesce(next_download_ux_timestamp, 0) <= "
+ str(int(datetime.datetime.utcnow().timestamp())) + " order by next_download_ux_timestamp asc limit 1")
elif interval_param_ == "daily_hist":
cursor.execute("SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, "
"current_range_to_overwrite, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp "
"FROM " + db_schema_name + "." + db_settings_table_name + " WHERE daily_update_from_files = 1 and "
"download_setting_status_id = 0 and "
"daily_hist_complete = 0 AND "
"monthly_hist_complete = 1 AND "
"coalesce(start_hist_download_ux_timestamp, 0) <= "
+ str(int(datetime.datetime.utcnow().timestamp())) + " order by start_hist_download_ux_timestamp asc limit 1")
elif interval_param_ == "monthly_hist":
cursor.execute("SELECT download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, "
"current_range_to_overwrite, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp "
"FROM " + db_schema_name + "." + db_settings_table_name + " WHERE monthly_update_from_files = 1 and "
"download_setting_status_id = 0 and "
"monthly_hist_complete = 0 AND "
"coalesce(start_hist_download_ux_timestamp, 0) <= "
+ str(int(datetime.datetime.utcnow().timestamp())) + " order by start_hist_download_ux_timestamp asc limit 1")
else:
exit()
download_setting = cursor.fetchall()
if len(download_setting) > 0:
download_settings_id = download_setting[0][0]
market = download_setting[0][1]
tick_interval = download_setting[0][2]
data_granulation = download_setting[0][3]
stock_type = download_setting[0][4]
stock_exchange = download_setting[0][5]
range_to_download = download_setting[0][6]
download_api_interval_sec = download_setting[0][7]
daily_update_from_files = download_setting[0][8]
monthly_update_from_files = download_setting[0][9]
start_hist_download_ux_timestamp = download_setting[0][10]
else:
print("no data to download")
exit()
# block current setting changing its status
cursor.execute("UPDATE " + db_schema_name + "." + db_settings_table_name + " SET download_setting_status_id = %s where download_settings_id = %s", (1, download_settings_id))
cnxn.commit()
print("settings blocked")
return download_settings_id, market, tick_interval, data_granulation, stock_type, stock_exchange, range_to_download, download_api_interval_sec, daily_update_from_files, monthly_update_from_files, start_hist_download_ux_timestamp
print() | 0.199191 | 0.250311 |
from unittest import TestCase
from chibi.snippet.xml import guaranteed_list, compress_dummy_list
class test_guaranteed_list(TestCase):
def setUp( self ):
self.example = {
'args': 'nmap -oX - -sn 172.16.58.3/24',
'debugging': {'level': '0'},
'host': {
'address': {'addr': '172.16.58.3', 'addrtype': 'ipv4'},
'hostnames': None,
'status': {'reason': 'conn-refused', 'reason_ttl': '0',
'state': 'up'},
'times': {'rttvar': '3768', 'srtt': '204', 'to': '100000'}},
'runstats': {
'finished': {
'elapsed': '6.82', 'exit': 'success',
'summary': (
'Nmap done at Thu May 2 15:24:38 '
'2019; 256 IP addresses (1 host up) '
'scanned in 6.82 seconds' ),
'time': '1556828678',
'timestr': 'Thu May 2 15:24:38 2019'},
'hosts': {'down': '255', 'total': '256', 'up': '1'}},
'scanner': 'nmap', 'start': '1556828671',
'startstr': 'Thu May 2 15:24:31 2019', 'verbose': {'level': '0'},
'version': '7.70', 'xmloutputversion': '1.04'}
self.expected = {
'args': 'nmap -oX - -sn 172.16.58.3/24',
'debugging': {'level': '0'},
'host': [ {
'address': {'addr': '172.16.58.3', 'addrtype': 'ipv4'},
'hostnames': None,
'status': {'reason': 'conn-refused', 'reason_ttl': '0',
'state': 'up'},
'times': {'rttvar': '3768', 'srtt': '204', 'to': '100000'}} ],
'runstats': {
'finished': {
'elapsed': '6.82', 'exit': 'success',
'summary': (
'Nmap done at Thu May 2 15:24:38 '
'2019; 256 IP addresses (1 host up) '
'scanned in 6.82 seconds' ),
'time': '1556828678',
'timestr': 'Thu May 2 15:24:38 2019'},
'hosts': {'down': '255', 'total': '256', 'up': '1'}},
'scanner': 'nmap', 'start': '1556828671',
'startstr': 'Thu May 2 15:24:31 2019', 'verbose': {'level': '0'},
'version': '7.70', 'xmloutputversion': '1.04'}
def test_should_convert_host_in_list( self ):
result = guaranteed_list( self.example, 'host' )
self.assertEqual( self.expected, result )
class test_compress_dummy_list(TestCase):
def setUp( self ):
self.example = {
'regions': { 'region': 'asdf' },
'attrs': { 'attr': { 'asdf': 'asdf' } },
'lists': [ '', [], [ { 'regions': { 'region': 'qq' } } ] ],
'list': [
'',
[ { 'regions': { 'region': 'qq' } } ],
[
{
'regions': {
'region': [ { 'asdfs': { 'asdf': 1 } } ]
}
},
],
],
}
self.expected = {
'attrs': {'asdf': 'asdf'},
'list': [
'',
[ { 'regions': 'qq' } ],
[ { 'regions': [ { 'asdfs': 1 } ] } ] ],
'lists': [ '', [], [ { 'regions': 'qq' } ] ],
'regions': 'asdf', }
def test_should_convert_host_in_list( self ):
result = compress_dummy_list( self.example )
self.assertEqual( self.expected, result ) | tests/snippet/xml.py | from unittest import TestCase
from chibi.snippet.xml import guaranteed_list, compress_dummy_list
class test_guaranteed_list(TestCase):
def setUp( self ):
self.example = {
'args': 'nmap -oX - -sn 172.16.58.3/24',
'debugging': {'level': '0'},
'host': {
'address': {'addr': '172.16.58.3', 'addrtype': 'ipv4'},
'hostnames': None,
'status': {'reason': 'conn-refused', 'reason_ttl': '0',
'state': 'up'},
'times': {'rttvar': '3768', 'srtt': '204', 'to': '100000'}},
'runstats': {
'finished': {
'elapsed': '6.82', 'exit': 'success',
'summary': (
'Nmap done at Thu May 2 15:24:38 '
'2019; 256 IP addresses (1 host up) '
'scanned in 6.82 seconds' ),
'time': '1556828678',
'timestr': 'Thu May 2 15:24:38 2019'},
'hosts': {'down': '255', 'total': '256', 'up': '1'}},
'scanner': 'nmap', 'start': '1556828671',
'startstr': 'Thu May 2 15:24:31 2019', 'verbose': {'level': '0'},
'version': '7.70', 'xmloutputversion': '1.04'}
self.expected = {
'args': 'nmap -oX - -sn 172.16.58.3/24',
'debugging': {'level': '0'},
'host': [ {
'address': {'addr': '172.16.58.3', 'addrtype': 'ipv4'},
'hostnames': None,
'status': {'reason': 'conn-refused', 'reason_ttl': '0',
'state': 'up'},
'times': {'rttvar': '3768', 'srtt': '204', 'to': '100000'}} ],
'runstats': {
'finished': {
'elapsed': '6.82', 'exit': 'success',
'summary': (
'Nmap done at Thu May 2 15:24:38 '
'2019; 256 IP addresses (1 host up) '
'scanned in 6.82 seconds' ),
'time': '1556828678',
'timestr': 'Thu May 2 15:24:38 2019'},
'hosts': {'down': '255', 'total': '256', 'up': '1'}},
'scanner': 'nmap', 'start': '1556828671',
'startstr': 'Thu May 2 15:24:31 2019', 'verbose': {'level': '0'},
'version': '7.70', 'xmloutputversion': '1.04'}
def test_should_convert_host_in_list( self ):
result = guaranteed_list( self.example, 'host' )
self.assertEqual( self.expected, result )
class test_compress_dummy_list(TestCase):
def setUp( self ):
self.example = {
'regions': { 'region': 'asdf' },
'attrs': { 'attr': { 'asdf': 'asdf' } },
'lists': [ '', [], [ { 'regions': { 'region': 'qq' } } ] ],
'list': [
'',
[ { 'regions': { 'region': 'qq' } } ],
[
{
'regions': {
'region': [ { 'asdfs': { 'asdf': 1 } } ]
}
},
],
],
}
self.expected = {
'attrs': {'asdf': 'asdf'},
'list': [
'',
[ { 'regions': 'qq' } ],
[ { 'regions': [ { 'asdfs': 1 } ] } ] ],
'lists': [ '', [], [ { 'regions': 'qq' } ] ],
'regions': 'asdf', }
def test_should_convert_host_in_list( self ):
result = compress_dummy_list( self.example )
self.assertEqual( self.expected, result ) | 0.487063 | 0.305386 |
import httplib2
import json
import random
import requests
import string
from functools import wraps
from database_setup import Base, Category, Item, User
from flask import (Flask,
flash,
jsonify,
make_response,
render_template,
request,
redirect,
session as login_session,
url_for,)
from sqlalchemy import create_engine, asc, desc
from sqlalchemy.orm import sessionmaker
from oauth2client.client import flow_from_clientsecrets, FlowExchangeError
app = Flask(__name__)
# Get client id from the json file provided by google.
CLIENT_ID = json.loads(open("client_secrets.json",
"r").read())["web"]["client_id"]
APPLICATION_NAME = "Item Catalog App"
# Connect to Database and create database session
engine = create_engine("sqlite:///itemCatalog.db")
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create anti-forgery state token
@app.route("/login")
def showLogin():
"""It randomly generate 32 chars to prevent CSRF."""
state = "".join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session["state"] = state
return render_template("login.html", STATE=state)
@app.route("/gconnect", methods=["POST"])
def gconnect():
"""It will allow user to sign in the application with google account."""
# Validate state token
if request.args.get("state") != login_session["state"]:
response = make_response(json.dumps("Invalid state parameter."), 401)
response.headers["Content-Type"] = "application/json"
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets("client_secrets.json", scope="")
oauth_flow.redirect_uri = "postmessage"
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps("Failed to upgrade the authorization code."), 401)
response.headers["Content-Type"] = "application/json"
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ("https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s"
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, "GET")[1])
# If there was an error in the access token info, abort.
if result.get("error") is not None:
response = make_response(json.dumps(result.get("error")), 500)
response.headers["Content-Type"] = "application/json"
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token["sub"]
if result["user_id"] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers["Content-Type"] = "application/json"
return response
# Verify that the access token is valid for this app.
if result["issued_to"] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers["Content-Type"] = "application/json"
return response
stored_access_token = login_session.get("access_token")
stored_gplus_id = login_session.get("gplus_id")
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps("This user's already connected."),
200)
response.headers["Content-Type"] = "application/json"
return response
# Store the access token in the session for later use.
login_session["access_token"] = credentials.access_token
login_session["gplus_id"] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/userinfo/v2/me"
params = {"access_token": credentials.access_token, "alt": "json"}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
print answer.json()
login_session["provider"] = "google"
login_session["username"] = data["name"]
login_session["picture"] = data["picture"]
login_session["email"] = data["email"]
# see if user exists, if it doesn"t make a new one
user_id = getUserID(login_session["email"])
if not user_id:
user_id = createUser(login_session)
login_session["user_id"] = user_id
output = ""
output += "<h1>Welcome, "
output += login_session["username"]
output += "!</h1>"
output += "<img src='"
output += login_session["picture"]
output += ("""'style='width: 300px; height: 300px;border-radius: 150px;
-webkit-border-radius: 150px;-moz-border-radius: 150px;'>""")
flash("you are now logged in as %s" % login_session["username"])
print "done!"
return output
def getUserID(email):
"""It checks if the given email address is already in database.
If yes, it will return the user id.
"""
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
def getUserInfo(user_id):
"""It return the user object by checking the user id."""
user = session.query(User).filter_by(id=user_id).one()
return user
def createUser(login_session):
"""It checks if the user has stored in the database.
If not, it will create a new one."""
newUser = User(name=login_session["username"],
email=login_session["email"],
picture=login_session["picture"])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session["email"]).one()
return user.id
def login_required(f):
"""This checks whether the user has signed in or not"""
@wraps(f)
def decorated_function(*args, **kwargs):
if "username" not in login_session:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route("/gdisconnect")
def gdisconnect():
"""It will clear login_session when logging out google account"""
access_token = login_session["access_token"]
print "In gdisconnect access token is %s" % access_token
print "User name is: "
print login_session["username"]
if access_token is None:
print "Access Token is None"
response = make_response(json.dumps("Current user not connected."),
401)
response.headers["Content-Type"] = "application/json"
return response
url = ("https://accounts.google.com/o/oauth2/revoke?token=%s"
% login_session["access_token"])
h = httplib2.Http()
result = h.request(url, "GET")[0]
print "result is "
print result
if result["status"] == "200":
del login_session["access_token"]
del login_session["gplus_id"]
del login_session["username"]
del login_session["user_id"]
del login_session["email"]
del login_session["picture"]
response = make_response(json.dumps("Successfully disconnected."), 200)
response.headers["Content-Type"] = "application/json"
return response
else:
response = make_response(json.dumps("Failed to revoke user's token.",
400))
response.headers["Content-Type"] = "application/json"
return response
@app.route("/fbconnect", methods=["POST"])
def fbconnect():
"""This allows users to use facebook account to sign in."""
if request.args.get("state") != login_session["state"]:
response = make_response(json.dumps("Invalid state parameter."), 401)
response.headers["Content-Type"] = "application/json"
return response
access_token = request.data
print "access token received %s " % access_token
app_id = json.loads(open("fb_client_secrets.json",
"r").read())["web"]["app_id"]
app_secret = json.loads(open("fb_client_secrets.json",
"r").read())["web"]["app_secret"]
url = ("https://graph.facebook.com/v2.8/oauth/access_token?"
"grant_type=fb_exchange_token&client_id=%s&client_secret=%s"
"&fb_exchange_token=%s") % (app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
token = data["access_token"]
# Use token to get user info from API
userinfo_url = "https://graph.facebook.com/v2.8/me"
url = userinfo_url + "?access_token=%s&fields=name,id,email" % token
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
print data
login_session["provider"] = "facebook"
login_session["username"] = data["name"]
login_session["email"] = data["email"]
login_session["facebook_id"] = data["id"]
login_session["access_token"] = token
# Get user picture
url = userinfo_url + \
"/picture?access_token=%s&redirect=0&height=200&width=200" % token
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
login_session["picture"] = data["data"]["url"]
# see if user exists
user_id = getUserID(login_session["email"])
if not user_id:
user_id = createUser(login_session)
login_session["user_id"] = user_id
output = ""
output += "<h1>Welcome, "
output += login_session["username"]
output += "!</h1>"
output += "<img src='"
output += login_session["picture"]
output += ("""'style='width: 300px; height: 300px;border-radius: 150px;
-webkit-border-radius: 150px;-moz-border-radius: 150px;'>""")
flash("Now logged in as %s" % login_session["username"])
return output
@app.route("/fbdisconnect")
def fbdisconnect():
"""It will clear login_session when logging out facebook account"""
facebook_id = login_session["facebook_id"]
# The access token must me included to successfully logout
access_token = login_session["access_token"]
url = ("https://graph.facebook.com/%s/permissions?access_token=%s"
% (facebook_id, access_token))
h = httplib2.Http()
result = h.request(url, "DELETE")[1]
del login_session["access_token"]
del login_session["username"]
del login_session["user_id"]
del login_session["facebook_id"]
del login_session["email"]
del login_session["picture"]
return "You have been logged out"
# Disconnect based on provider
@app.route("/disconnect")
def disconnect():
"""This is the logout function for facebook and google account"""
if "provider" in login_session:
if login_session["provider"] == "google":
gdisconnect()
if login_session["provider"] == "facebook":
fbdisconnect()
del login_session["provider"]
flash("You have successfully been logged out.")
return redirect(url_for("showCategories"))
else:
flash("You were not logged in")
return redirect(url_for("showCategories"))
# View the whole database
@app.route("/category/JSON")
def categoriesJSON():
categories = session.query(Category).all()
serialized_categories = []
for i in categories:
new_serialized_category = i.serialize
items = session.query(Item).filter_by(category_id=i.id).all()
serialized_items = []
for j in items:
serialized_items.append(j.serialize)
new_serialized_category["items"] = serialized_items
serialized_categories.append(new_serialized_category)
return jsonify(categories=serialized_categories)
# JSON APIs to view Category Information
@app.route("/category/<int:category_id>/item/JSON")
def categoryItemJSON(category_id):
items = session.query(Item).filter_by(
category_id=category_id).all()
return jsonify(items=[i.serialize for i in items]), 200
# View a Item Information
@app.route("/category/<int:category_id>/item/<int:item_id>/JSON")
def itemJSON(category_id, item_id):
item = session.query(Item).filter_by(id=item_id).one()
return jsonify(item=item.serialize)
# View all users
@app.route("/user/JSON")
def userJSON():
users = session.query(User).all()
return jsonify(users=[i.serialize for i in users])
# Show all categories
@app.route("/")
@app.route("/category/")
def showCategories():
categories = session.query(Category).order_by(desc(Category.id)).all()
if "username" not in login_session:
return render_template("publicCategories.html", categories=categories)
else:
return render_template("categories.html", categories=categories)
# Create a new category
@app.route("/category/new/", methods=["GET", "POST"])
@login_required
def newCategory():
if request.method == "POST":
newCategory = Category(
name=request.form["name"], user_id=login_session["user_id"])
session.add(newCategory)
flash("New Category %s Successfully Created" % newCategory.name)
session.commit()
return redirect(url_for("showCategories"))
else:
return render_template("newCategory.html")
# Edit a category
@app.route("/category/<int:category_id>/edit/", methods=["GET", "POST"])
@login_required
def editCategory(category_id):
editedCategory = session.query(
Category).filter_by(id=category_id).one()
if editedCategory.user_id != login_session["user_id"]:
return "<script>function myFunction()\
{alert('You are not authorized to edit this category.\
Please create your own category in order to delete.');\
window.location.href='/category/%s/';}\
</script><body onload='myFunction()''>" % category_id
if request.method == "POST":
if request.form["name"]:
editedCategory.name = request.form["name"]
flash("Category Successfully Updated to %s" % editedCategory.name)
return redirect(url_for("showCategories"))
else:
return render_template("editCategory.html", category=editedCategory)
# Delete a category
@app.route("/category/<int:category_id>/delete/", methods=["GET", "POST"])
@login_required
def deleteCategory(category_id):
categoryToDelete = session.query(
Category).filter_by(id=category_id).one()
if categoryToDelete.user_id != login_session["user_id"]:
return "<script>function myFunction()\
{alert('You are not authorized to delete this category.\
Please create your own category in order to delete.');\
window.location.href='/category/%s/';}\
</script><body onload='myFunction()''>" % category_id
if request.method == "POST":
session.delete(categoryToDelete)
flash("%s Successfully Deleted" % categoryToDelete.name)
session.commit()
return redirect(url_for("showCategories"))
else:
return render_template("deleteCategory.html",
category=categoryToDelete)
# Show all items for a category
@app.route("/category/<int:category_id>/")
@app.route("/category/<int:category_id>/item/")
def showItems(category_id):
category = session.query(Category).filter_by(id=category_id).one()
creator = getUserInfo(category.user_id)
items = session.query(Item).filter_by(
category_id=category_id).order_by(desc('id')).all()
# either one condition is true, the statement will be executed
if ("username" not in login_session or
creator.id != login_session["user_id"]):
return render_template("publicItems.html",
items=items,
category=category,
creator=creator)
else:
return render_template("items.html",
items=items,
category=category,
creator=creator)
# Create a new item for a category
@app.route("/category/<int:category_id>/item/new/", methods=["GET", "POST"])
@login_required
def newItem(category_id):
category = session.query(Category).filter_by(id=category_id).one()
if login_session["user_id"] != category.user_id:
return "<script>function myFunction()\
{alert('You are not authorized to add items to this category.\
Please create your own category in order to add items.');\
window.location.href='/category/%s/';}\
</script><body onload='myFunction()''>" % category_id
if request.method == "POST":
newItem = Item(name=request.form["name"],
category_id=category_id,
user_id=category.user_id)
session.add(newItem)
session.commit()
flash("New Item %s Successfully Created" % (newItem.name))
return redirect(url_for("showItems", category_id=category_id))
else:
return render_template("newItem.html", category_id=category_id)
# Edit a item
@app.route("/category/<int:category_id>/item/<int:item_id>/edit",
methods=["GET", "POST"])
@login_required
def editItem(category_id, item_id):
editedItem = session.query(Item).filter_by(id=item_id).one()
category = session.query(Category).filter_by(id=category_id).one()
if login_session["user_id"] != category.user_id:
return "<script>function myFunction()\
{alert('You are not authorized to edit items to this category.\
Please create your own category in order to edit items.');\
window.location.href='/category/%s/';}\
</script><body onload='myFunction()''>" % category_id
if request.method == "POST":
if request.form["name"]:
editedItem.name = request.form["name"]
session.add(editedItem)
session.commit()
flash("Item Successfully Updated to %s" % editedItem.name)
return redirect(url_for("showItems", category_id=category_id))
else:
return render_template("editItem.html",
category_id=category_id,
item_id=item_id,
item=editedItem)
# Delete a item
@app.route("/category/<int:category_id>/item/<int:item_id>/delete",
methods=["GET", "POST"])
@login_required
def deleteItem(category_id, item_id):
category = session.query(Category).filter_by(id=category_id).one()
itemToDelete = session.query(Item).filter_by(id=item_id).one()
if login_session["user_id"] != category.user_id:
return "<script>function myFunction()\
{alert('You are not authorized to delete items from category.\
Please create your own category in order to delete items.');\
window.location.href='/category/%s/';}\
</script><body onload='myFunction()''>" % category_id
if request.method == "POST":
session.delete(itemToDelete)
session.commit()
flash("Item Successfully Deleted")
return redirect(url_for("showItems", category_id=category_id))
else:
return render_template("deleteItem.html",
item=itemToDelete,
category=category)
if __name__ == "__main__":
app.secret_key = "super_secret_key"
app.debug = True
app.run(host="0.0.0.0", port=8080) | item-catalog/project.py | import httplib2
import json
import random
import requests
import string
from functools import wraps
from database_setup import Base, Category, Item, User
from flask import (Flask,
flash,
jsonify,
make_response,
render_template,
request,
redirect,
session as login_session,
url_for,)
from sqlalchemy import create_engine, asc, desc
from sqlalchemy.orm import sessionmaker
from oauth2client.client import flow_from_clientsecrets, FlowExchangeError
app = Flask(__name__)
# Get client id from the json file provided by google.
CLIENT_ID = json.loads(open("client_secrets.json",
"r").read())["web"]["client_id"]
APPLICATION_NAME = "Item Catalog App"
# Connect to Database and create database session
engine = create_engine("sqlite:///itemCatalog.db")
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create anti-forgery state token
@app.route("/login")
def showLogin():
"""It randomly generate 32 chars to prevent CSRF."""
state = "".join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session["state"] = state
return render_template("login.html", STATE=state)
@app.route("/gconnect", methods=["POST"])
def gconnect():
"""It will allow user to sign in the application with google account."""
# Validate state token
if request.args.get("state") != login_session["state"]:
response = make_response(json.dumps("Invalid state parameter."), 401)
response.headers["Content-Type"] = "application/json"
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets("client_secrets.json", scope="")
oauth_flow.redirect_uri = "postmessage"
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps("Failed to upgrade the authorization code."), 401)
response.headers["Content-Type"] = "application/json"
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ("https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s"
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, "GET")[1])
# If there was an error in the access token info, abort.
if result.get("error") is not None:
response = make_response(json.dumps(result.get("error")), 500)
response.headers["Content-Type"] = "application/json"
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token["sub"]
if result["user_id"] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers["Content-Type"] = "application/json"
return response
# Verify that the access token is valid for this app.
if result["issued_to"] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers["Content-Type"] = "application/json"
return response
stored_access_token = login_session.get("access_token")
stored_gplus_id = login_session.get("gplus_id")
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps("This user's already connected."),
200)
response.headers["Content-Type"] = "application/json"
return response
# Store the access token in the session for later use.
login_session["access_token"] = credentials.access_token
login_session["gplus_id"] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/userinfo/v2/me"
params = {"access_token": credentials.access_token, "alt": "json"}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
print answer.json()
login_session["provider"] = "google"
login_session["username"] = data["name"]
login_session["picture"] = data["picture"]
login_session["email"] = data["email"]
# see if user exists, if it doesn"t make a new one
user_id = getUserID(login_session["email"])
if not user_id:
user_id = createUser(login_session)
login_session["user_id"] = user_id
output = ""
output += "<h1>Welcome, "
output += login_session["username"]
output += "!</h1>"
output += "<img src='"
output += login_session["picture"]
output += ("""'style='width: 300px; height: 300px;border-radius: 150px;
-webkit-border-radius: 150px;-moz-border-radius: 150px;'>""")
flash("you are now logged in as %s" % login_session["username"])
print "done!"
return output
def getUserID(email):
"""It checks if the given email address is already in database.
If yes, it will return the user id.
"""
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
def getUserInfo(user_id):
"""It return the user object by checking the user id."""
user = session.query(User).filter_by(id=user_id).one()
return user
def createUser(login_session):
"""It checks if the user has stored in the database.
If not, it will create a new one."""
newUser = User(name=login_session["username"],
email=login_session["email"],
picture=login_session["picture"])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session["email"]).one()
return user.id
def login_required(f):
"""This checks whether the user has signed in or not"""
@wraps(f)
def decorated_function(*args, **kwargs):
if "username" not in login_session:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route("/gdisconnect")
def gdisconnect():
"""It will clear login_session when logging out google account"""
access_token = login_session["access_token"]
print "In gdisconnect access token is %s" % access_token
print "User name is: "
print login_session["username"]
if access_token is None:
print "Access Token is None"
response = make_response(json.dumps("Current user not connected."),
401)
response.headers["Content-Type"] = "application/json"
return response
url = ("https://accounts.google.com/o/oauth2/revoke?token=%s"
% login_session["access_token"])
h = httplib2.Http()
result = h.request(url, "GET")[0]
print "result is "
print result
if result["status"] == "200":
del login_session["access_token"]
del login_session["gplus_id"]
del login_session["username"]
del login_session["user_id"]
del login_session["email"]
del login_session["picture"]
response = make_response(json.dumps("Successfully disconnected."), 200)
response.headers["Content-Type"] = "application/json"
return response
else:
response = make_response(json.dumps("Failed to revoke user's token.",
400))
response.headers["Content-Type"] = "application/json"
return response
@app.route("/fbconnect", methods=["POST"])
def fbconnect():
"""This allows users to use facebook account to sign in."""
if request.args.get("state") != login_session["state"]:
response = make_response(json.dumps("Invalid state parameter."), 401)
response.headers["Content-Type"] = "application/json"
return response
access_token = request.data
print "access token received %s " % access_token
app_id = json.loads(open("fb_client_secrets.json",
"r").read())["web"]["app_id"]
app_secret = json.loads(open("fb_client_secrets.json",
"r").read())["web"]["app_secret"]
url = ("https://graph.facebook.com/v2.8/oauth/access_token?"
"grant_type=fb_exchange_token&client_id=%s&client_secret=%s"
"&fb_exchange_token=%s") % (app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
token = data["access_token"]
# Use token to get user info from API
userinfo_url = "https://graph.facebook.com/v2.8/me"
url = userinfo_url + "?access_token=%s&fields=name,id,email" % token
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
print data
login_session["provider"] = "facebook"
login_session["username"] = data["name"]
login_session["email"] = data["email"]
login_session["facebook_id"] = data["id"]
login_session["access_token"] = token
# Get user picture
url = userinfo_url + \
"/picture?access_token=%s&redirect=0&height=200&width=200" % token
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
login_session["picture"] = data["data"]["url"]
# see if user exists
user_id = getUserID(login_session["email"])
if not user_id:
user_id = createUser(login_session)
login_session["user_id"] = user_id
output = ""
output += "<h1>Welcome, "
output += login_session["username"]
output += "!</h1>"
output += "<img src='"
output += login_session["picture"]
output += ("""'style='width: 300px; height: 300px;border-radius: 150px;
-webkit-border-radius: 150px;-moz-border-radius: 150px;'>""")
flash("Now logged in as %s" % login_session["username"])
return output
@app.route("/fbdisconnect")
def fbdisconnect():
"""It will clear login_session when logging out facebook account"""
facebook_id = login_session["facebook_id"]
# The access token must me included to successfully logout
access_token = login_session["access_token"]
url = ("https://graph.facebook.com/%s/permissions?access_token=%s"
% (facebook_id, access_token))
h = httplib2.Http()
result = h.request(url, "DELETE")[1]
del login_session["access_token"]
del login_session["username"]
del login_session["user_id"]
del login_session["facebook_id"]
del login_session["email"]
del login_session["picture"]
return "You have been logged out"
# Disconnect based on provider
@app.route("/disconnect")
def disconnect():
"""This is the logout function for facebook and google account"""
if "provider" in login_session:
if login_session["provider"] == "google":
gdisconnect()
if login_session["provider"] == "facebook":
fbdisconnect()
del login_session["provider"]
flash("You have successfully been logged out.")
return redirect(url_for("showCategories"))
else:
flash("You were not logged in")
return redirect(url_for("showCategories"))
# View the whole database
@app.route("/category/JSON")
def categoriesJSON():
categories = session.query(Category).all()
serialized_categories = []
for i in categories:
new_serialized_category = i.serialize
items = session.query(Item).filter_by(category_id=i.id).all()
serialized_items = []
for j in items:
serialized_items.append(j.serialize)
new_serialized_category["items"] = serialized_items
serialized_categories.append(new_serialized_category)
return jsonify(categories=serialized_categories)
# JSON APIs to view Category Information
@app.route("/category/<int:category_id>/item/JSON")
def categoryItemJSON(category_id):
items = session.query(Item).filter_by(
category_id=category_id).all()
return jsonify(items=[i.serialize for i in items]), 200
# View a Item Information
@app.route("/category/<int:category_id>/item/<int:item_id>/JSON")
def itemJSON(category_id, item_id):
item = session.query(Item).filter_by(id=item_id).one()
return jsonify(item=item.serialize)
# View all users
@app.route("/user/JSON")
def userJSON():
users = session.query(User).all()
return jsonify(users=[i.serialize for i in users])
# Show all categories
@app.route("/")
@app.route("/category/")
def showCategories():
categories = session.query(Category).order_by(desc(Category.id)).all()
if "username" not in login_session:
return render_template("publicCategories.html", categories=categories)
else:
return render_template("categories.html", categories=categories)
# Create a new category
@app.route("/category/new/", methods=["GET", "POST"])
@login_required
def newCategory():
if request.method == "POST":
newCategory = Category(
name=request.form["name"], user_id=login_session["user_id"])
session.add(newCategory)
flash("New Category %s Successfully Created" % newCategory.name)
session.commit()
return redirect(url_for("showCategories"))
else:
return render_template("newCategory.html")
# Edit a category
@app.route("/category/<int:category_id>/edit/", methods=["GET", "POST"])
@login_required
def editCategory(category_id):
editedCategory = session.query(
Category).filter_by(id=category_id).one()
if editedCategory.user_id != login_session["user_id"]:
return "<script>function myFunction()\
{alert('You are not authorized to edit this category.\
Please create your own category in order to delete.');\
window.location.href='/category/%s/';}\
</script><body onload='myFunction()''>" % category_id
if request.method == "POST":
if request.form["name"]:
editedCategory.name = request.form["name"]
flash("Category Successfully Updated to %s" % editedCategory.name)
return redirect(url_for("showCategories"))
else:
return render_template("editCategory.html", category=editedCategory)
# Delete a category
@app.route("/category/<int:category_id>/delete/", methods=["GET", "POST"])
@login_required
def deleteCategory(category_id):
categoryToDelete = session.query(
Category).filter_by(id=category_id).one()
if categoryToDelete.user_id != login_session["user_id"]:
return "<script>function myFunction()\
{alert('You are not authorized to delete this category.\
Please create your own category in order to delete.');\
window.location.href='/category/%s/';}\
</script><body onload='myFunction()''>" % category_id
if request.method == "POST":
session.delete(categoryToDelete)
flash("%s Successfully Deleted" % categoryToDelete.name)
session.commit()
return redirect(url_for("showCategories"))
else:
return render_template("deleteCategory.html",
category=categoryToDelete)
# Show all items for a category
@app.route("/category/<int:category_id>/")
@app.route("/category/<int:category_id>/item/")
def showItems(category_id):
category = session.query(Category).filter_by(id=category_id).one()
creator = getUserInfo(category.user_id)
items = session.query(Item).filter_by(
category_id=category_id).order_by(desc('id')).all()
# either one condition is true, the statement will be executed
if ("username" not in login_session or
creator.id != login_session["user_id"]):
return render_template("publicItems.html",
items=items,
category=category,
creator=creator)
else:
return render_template("items.html",
items=items,
category=category,
creator=creator)
# Create a new item for a category
@app.route("/category/<int:category_id>/item/new/", methods=["GET", "POST"])
@login_required
def newItem(category_id):
category = session.query(Category).filter_by(id=category_id).one()
if login_session["user_id"] != category.user_id:
return "<script>function myFunction()\
{alert('You are not authorized to add items to this category.\
Please create your own category in order to add items.');\
window.location.href='/category/%s/';}\
</script><body onload='myFunction()''>" % category_id
if request.method == "POST":
newItem = Item(name=request.form["name"],
category_id=category_id,
user_id=category.user_id)
session.add(newItem)
session.commit()
flash("New Item %s Successfully Created" % (newItem.name))
return redirect(url_for("showItems", category_id=category_id))
else:
return render_template("newItem.html", category_id=category_id)
# Edit a item
@app.route("/category/<int:category_id>/item/<int:item_id>/edit",
methods=["GET", "POST"])
@login_required
def editItem(category_id, item_id):
editedItem = session.query(Item).filter_by(id=item_id).one()
category = session.query(Category).filter_by(id=category_id).one()
if login_session["user_id"] != category.user_id:
return "<script>function myFunction()\
{alert('You are not authorized to edit items to this category.\
Please create your own category in order to edit items.');\
window.location.href='/category/%s/';}\
</script><body onload='myFunction()''>" % category_id
if request.method == "POST":
if request.form["name"]:
editedItem.name = request.form["name"]
session.add(editedItem)
session.commit()
flash("Item Successfully Updated to %s" % editedItem.name)
return redirect(url_for("showItems", category_id=category_id))
else:
return render_template("editItem.html",
category_id=category_id,
item_id=item_id,
item=editedItem)
# Delete a item
@app.route("/category/<int:category_id>/item/<int:item_id>/delete",
methods=["GET", "POST"])
@login_required
def deleteItem(category_id, item_id):
category = session.query(Category).filter_by(id=category_id).one()
itemToDelete = session.query(Item).filter_by(id=item_id).one()
if login_session["user_id"] != category.user_id:
return "<script>function myFunction()\
{alert('You are not authorized to delete items from category.\
Please create your own category in order to delete items.');\
window.location.href='/category/%s/';}\
</script><body onload='myFunction()''>" % category_id
if request.method == "POST":
session.delete(itemToDelete)
session.commit()
flash("Item Successfully Deleted")
return redirect(url_for("showItems", category_id=category_id))
else:
return render_template("deleteItem.html",
item=itemToDelete,
category=category)
if __name__ == "__main__":
app.secret_key = "super_secret_key"
app.debug = True
app.run(host="0.0.0.0", port=8080) | 0.438184 | 0.079032 |
import numpy as np
import matplotlib.pyplot as plt
class Error(Exception):
pass
class InitializationError(Error):
def __init__(self, message):
self.message = message
class SierpinskiTriangle(object):
"""
Reference http://bopace.github.io/python/2016/06/09/python-turtle-sierpinski/
"""
def __init__(self, **options):
''' '''
self._x_ = options.get('x', 0)
self._y_ = options.get('y', 0)
self._length_ = options.get('length', 1)
self._centers_ = []
self._flag_ = 0
vertices =\
np.array\
(
[
[self._x_, self._y_],
[self._x_ + self._length_ / 2, 0.5 * (3 ** 0.5) * self._length_],
[self._x_ + self._length_, self._y_],
[self._x_, self._y_]
]
)
self._vertices_ = vertices[:-1]
plt.plot(vertices[:, 0], vertices[:, 1], color = 'C0')
self._points_ = []
def fractal(self, **options):
''' '''
self._flag_ = 1
self._depth_ = options.get('depth', 3)
self._draw_fractal_(self._vertices_, self._depth_)
return np.asarray(self._points_)[1:]
def draw_triangle(self, vertices):
''' '''
self._points_.append(vertices[0])
self._points_.append(vertices[1])
self._points_.append(vertices[2])
self._centers_.append((vertices[0] + vertices[1] + vertices[2]) / 3)
plt.plot(vertices[:, 0], vertices[:, 1], color = 'C0')
def _midpoint_(self, point1, point2):
''' '''
return [(point1[0] + point2[0]) / 2, (point1[1] + point2[1]) / 2]
def _draw_fractal_(self, vertices, level):
''' '''
self.draw_triangle(vertices)
if level > 0:
v =\
np.array\
(
[
vertices[0],
self._midpoint_(vertices[0], vertices[1]),
self._midpoint_(vertices[0], vertices[2])
]
)
self._draw_fractal_(v, level - 1)
v =\
np.array\
(
[
vertices[1],
self._midpoint_(vertices[0], vertices[1]),
self._midpoint_(vertices[1], vertices[2])
]
)
self._draw_fractal_(v, level - 1)
v =\
np.array\
(
[
vertices[2],
self._midpoint_(vertices[2], vertices[1]),
self._midpoint_(vertices[0], vertices[2])
]
)
self._draw_fractal_(v, level - 1)
def __call__(self, **options):
''' '''
if self._flag_ == 0:
raise InitializationError("Function not initialized")
with_centers = options.get('with_centers', False)
if with_centers:
centers = np.asarray(self._centers_)
plt.scatter(centers[:, 0], centers[:, 1])
plt.show()
def run():
''' '''
spt = SierpinskiTriangle()
points = spt.fractal(depth = 2)
spt()
if __name__ == '__main__':
run() | pyfractals/sierpinski_triangle.py | import numpy as np
import matplotlib.pyplot as plt
class Error(Exception):
pass
class InitializationError(Error):
def __init__(self, message):
self.message = message
class SierpinskiTriangle(object):
"""
Reference http://bopace.github.io/python/2016/06/09/python-turtle-sierpinski/
"""
def __init__(self, **options):
''' '''
self._x_ = options.get('x', 0)
self._y_ = options.get('y', 0)
self._length_ = options.get('length', 1)
self._centers_ = []
self._flag_ = 0
vertices =\
np.array\
(
[
[self._x_, self._y_],
[self._x_ + self._length_ / 2, 0.5 * (3 ** 0.5) * self._length_],
[self._x_ + self._length_, self._y_],
[self._x_, self._y_]
]
)
self._vertices_ = vertices[:-1]
plt.plot(vertices[:, 0], vertices[:, 1], color = 'C0')
self._points_ = []
def fractal(self, **options):
''' '''
self._flag_ = 1
self._depth_ = options.get('depth', 3)
self._draw_fractal_(self._vertices_, self._depth_)
return np.asarray(self._points_)[1:]
def draw_triangle(self, vertices):
''' '''
self._points_.append(vertices[0])
self._points_.append(vertices[1])
self._points_.append(vertices[2])
self._centers_.append((vertices[0] + vertices[1] + vertices[2]) / 3)
plt.plot(vertices[:, 0], vertices[:, 1], color = 'C0')
def _midpoint_(self, point1, point2):
''' '''
return [(point1[0] + point2[0]) / 2, (point1[1] + point2[1]) / 2]
def _draw_fractal_(self, vertices, level):
''' '''
self.draw_triangle(vertices)
if level > 0:
v =\
np.array\
(
[
vertices[0],
self._midpoint_(vertices[0], vertices[1]),
self._midpoint_(vertices[0], vertices[2])
]
)
self._draw_fractal_(v, level - 1)
v =\
np.array\
(
[
vertices[1],
self._midpoint_(vertices[0], vertices[1]),
self._midpoint_(vertices[1], vertices[2])
]
)
self._draw_fractal_(v, level - 1)
v =\
np.array\
(
[
vertices[2],
self._midpoint_(vertices[2], vertices[1]),
self._midpoint_(vertices[0], vertices[2])
]
)
self._draw_fractal_(v, level - 1)
def __call__(self, **options):
''' '''
if self._flag_ == 0:
raise InitializationError("Function not initialized")
with_centers = options.get('with_centers', False)
if with_centers:
centers = np.asarray(self._centers_)
plt.scatter(centers[:, 0], centers[:, 1])
plt.show()
def run():
''' '''
spt = SierpinskiTriangle()
points = spt.fractal(depth = 2)
spt()
if __name__ == '__main__':
run() | 0.574753 | 0.325869 |
def dem_from_bbox(bbox, crs=3857, resolution=2048, path=None):
"""
:param bbox: bounding box as [xmin,ymin,xmax,ymax]
:param crs: crs for bounding box
:param path: As an option, save to a local filepath with extension .tif
:return: DEM image
"""
import requests
from pyproj import Transformer
bbox_in = bbox
height = resolution
# Convert coordinates from WGS 84 to web mercator
transformer = Transformer.from_crs(crs, 3857)
min = transformer.transform(bbox[0], bbox[1])
max = transformer.transform(bbox[2],bbox[3])
bbox= [min[0],min[1],max[0],max[1]]
x_meters=bbox[2]-bbox[0]
y_meters=bbox[3]-bbox[1]
print(f'x_meters = {bbox[2] - bbox[0]}')
print(f'y_meters = {bbox[3] - bbox[1]}')
width=height*(x_meters/y_meters)
# Download data from National Map webserver
bboxstr = str(bbox[0]) + ',' + str(bbox[1]) + ',' + str(bbox[2]) + ',' + str(bbox[3])
demURL = f'https://elevation.nationalmap.gov/arcgis/services/3DEPElevation/ImageServer/WMSServer?SERVICE=WMS&VERSION=1.3.0&REQUEST=GetMap&BBOX={bboxstr}&FORMAT=tiff&WIDTH={width}&HEIGHT={height}&CRS=EPSG:3857&LAYERS=3DEPElevation:None'
response = requests.get(demURL)
if path != None:
with open(path, 'wb') as f:
f.write(response.content)
return
else:
return response.content
def img_from_bbox(bbox, crs=3857, resolution=2048,path=None, format='png', cir=False, dtype='U8'):
"""
:param bbox: bounding box as [xmin,ymin,xmax,ymax]
:param crs: crs for bounding box
:param resolution: vertical resolution of output image. Int, max 4096.
:param path: As an option, save to a local filepath with extension matching format
:param format: output image format. 'png', 'jpg', or 'tiff'
:param cir: Set to True for a color infrared image with band order 4,1,2
:param dtype: Data type. For example, 'U8' for 8bit unsigned int or 'F32' for 32bit float
:return: Image from NAIP
"""
import requests
from pyproj import Transformer
from sys import getsizeof
bbox_in = bbox
height = resolution
# Convert coordinates from WGS 84 to web mercator
transformer = Transformer.from_crs(crs, 3857)
min = transformer.transform(bbox[0], bbox[1])
max = transformer.transform(bbox[2],bbox[3])
bbox= [min[0],min[1],max[0],max[1]]
x_meters=bbox[2]-bbox[0]
y_meters=bbox[3]-bbox[1]
print(f'x_meters = {bbox[2] - bbox[0]}')
print(f'y_meters = {bbox[3] - bbox[1]}')
width=height*(x_meters/y_meters)
# Download data from National Map webserver
bboxstr = str(bbox[0]) + ',' + str(bbox[1]) + ',' + str(bbox[2]) + ',' + str(bbox[3])
imgURL=f'https://gis.apfo.usda.gov/arcgis/rest/services/NAIP/USDA_CONUS_PRIME/ImageServer/exportImage?bbox={bboxstr}&size={width}%2C{height}&pixelType={dtype}&f=image'
if format=='tiff':
imgURL = imgURL + '&format=tiff'
elif format=='jpg':
imgURL = imgURL + '&format=jpg'
else:
imgURL=imgURL+'&format=png'
if cir==True:
imgURL = imgURL + '&bandIds=4%2C1%2C2'
response = requests.get(imgURL)
if getsizeof(response.content) < 1000000:
raise Exception("Resulting file size indicates an error. Try a different resolution in the request")
if path != None:
with open(path, 'wb') as f:
f.write(response.content)
return
else:
return response.content
def bounds_from_coordinate(x,y,acres,crs_in=4326,crs_out=3857):
"""Downloads a square DEM of size "acres" from the USGS National Map centered on the given coordinate with a square area equal to the input acres. Note, current version does not return a georeferenced image.
:param lat: (float) latitude in decimal degrees
:param lon: (float) longitude in decimal degrees
:param acres: (float) number of acres
:param path: As an option, save to a local filepath with extension .tif
:return: DEM image
"""
import requests
from pyproj import Transformer
# Convert coordinates from WGS 84 to web mercator
transformer = Transformer.from_crs(crs_in, crs_out,always_xy=True)
coord = transformer.transform(x, y)
# Download data from National Map webserver
halfside = ((acres * 43560) ** .5) / 3.28 / 2
bbox = [coord[0] - halfside, coord[1] - halfside, coord[0] + halfside, coord[1] + halfside]
return bbox
def tif_to_unity(img,output_path):
from PIL import Image
import io
import numpy as np
# Convert .tif to Unity friendly 16 bit png
if isinstance(img, (bytes, bytearray)):
img = Image.open(io.BytesIO(img))
else:
img = Image.open(img)
data = np.array(img)
elev_min = data.min()
elev_max = data.max()
elev_range = elev_max - elev_min
data = (data - elev_min) * (65534 / elev_range)
data = data.astype(np.uint16)
img = Image.fromarray(data)
img.save(output_path)
print(f'Elevation range={elev_range}') | pyvf/dems.py | def dem_from_bbox(bbox, crs=3857, resolution=2048, path=None):
"""
:param bbox: bounding box as [xmin,ymin,xmax,ymax]
:param crs: crs for bounding box
:param path: As an option, save to a local filepath with extension .tif
:return: DEM image
"""
import requests
from pyproj import Transformer
bbox_in = bbox
height = resolution
# Convert coordinates from WGS 84 to web mercator
transformer = Transformer.from_crs(crs, 3857)
min = transformer.transform(bbox[0], bbox[1])
max = transformer.transform(bbox[2],bbox[3])
bbox= [min[0],min[1],max[0],max[1]]
x_meters=bbox[2]-bbox[0]
y_meters=bbox[3]-bbox[1]
print(f'x_meters = {bbox[2] - bbox[0]}')
print(f'y_meters = {bbox[3] - bbox[1]}')
width=height*(x_meters/y_meters)
# Download data from National Map webserver
bboxstr = str(bbox[0]) + ',' + str(bbox[1]) + ',' + str(bbox[2]) + ',' + str(bbox[3])
demURL = f'https://elevation.nationalmap.gov/arcgis/services/3DEPElevation/ImageServer/WMSServer?SERVICE=WMS&VERSION=1.3.0&REQUEST=GetMap&BBOX={bboxstr}&FORMAT=tiff&WIDTH={width}&HEIGHT={height}&CRS=EPSG:3857&LAYERS=3DEPElevation:None'
response = requests.get(demURL)
if path != None:
with open(path, 'wb') as f:
f.write(response.content)
return
else:
return response.content
def img_from_bbox(bbox, crs=3857, resolution=2048,path=None, format='png', cir=False, dtype='U8'):
"""
:param bbox: bounding box as [xmin,ymin,xmax,ymax]
:param crs: crs for bounding box
:param resolution: vertical resolution of output image. Int, max 4096.
:param path: As an option, save to a local filepath with extension matching format
:param format: output image format. 'png', 'jpg', or 'tiff'
:param cir: Set to True for a color infrared image with band order 4,1,2
:param dtype: Data type. For example, 'U8' for 8bit unsigned int or 'F32' for 32bit float
:return: Image from NAIP
"""
import requests
from pyproj import Transformer
from sys import getsizeof
bbox_in = bbox
height = resolution
# Convert coordinates from WGS 84 to web mercator
transformer = Transformer.from_crs(crs, 3857)
min = transformer.transform(bbox[0], bbox[1])
max = transformer.transform(bbox[2],bbox[3])
bbox= [min[0],min[1],max[0],max[1]]
x_meters=bbox[2]-bbox[0]
y_meters=bbox[3]-bbox[1]
print(f'x_meters = {bbox[2] - bbox[0]}')
print(f'y_meters = {bbox[3] - bbox[1]}')
width=height*(x_meters/y_meters)
# Download data from National Map webserver
bboxstr = str(bbox[0]) + ',' + str(bbox[1]) + ',' + str(bbox[2]) + ',' + str(bbox[3])
imgURL=f'https://gis.apfo.usda.gov/arcgis/rest/services/NAIP/USDA_CONUS_PRIME/ImageServer/exportImage?bbox={bboxstr}&size={width}%2C{height}&pixelType={dtype}&f=image'
if format=='tiff':
imgURL = imgURL + '&format=tiff'
elif format=='jpg':
imgURL = imgURL + '&format=jpg'
else:
imgURL=imgURL+'&format=png'
if cir==True:
imgURL = imgURL + '&bandIds=4%2C1%2C2'
response = requests.get(imgURL)
if getsizeof(response.content) < 1000000:
raise Exception("Resulting file size indicates an error. Try a different resolution in the request")
if path != None:
with open(path, 'wb') as f:
f.write(response.content)
return
else:
return response.content
def bounds_from_coordinate(x,y,acres,crs_in=4326,crs_out=3857):
"""Downloads a square DEM of size "acres" from the USGS National Map centered on the given coordinate with a square area equal to the input acres. Note, current version does not return a georeferenced image.
:param lat: (float) latitude in decimal degrees
:param lon: (float) longitude in decimal degrees
:param acres: (float) number of acres
:param path: As an option, save to a local filepath with extension .tif
:return: DEM image
"""
import requests
from pyproj import Transformer
# Convert coordinates from WGS 84 to web mercator
transformer = Transformer.from_crs(crs_in, crs_out,always_xy=True)
coord = transformer.transform(x, y)
# Download data from National Map webserver
halfside = ((acres * 43560) ** .5) / 3.28 / 2
bbox = [coord[0] - halfside, coord[1] - halfside, coord[0] + halfside, coord[1] + halfside]
return bbox
def tif_to_unity(img,output_path):
from PIL import Image
import io
import numpy as np
# Convert .tif to Unity friendly 16 bit png
if isinstance(img, (bytes, bytearray)):
img = Image.open(io.BytesIO(img))
else:
img = Image.open(img)
data = np.array(img)
elev_min = data.min()
elev_max = data.max()
elev_range = elev_max - elev_min
data = (data - elev_min) * (65534 / elev_range)
data = data.astype(np.uint16)
img = Image.fromarray(data)
img.save(output_path)
print(f'Elevation range={elev_range}') | 0.706798 | 0.571288 |
from django.http import JsonResponse
from ..models import Project, ModelClass, DefaultAttribute
import json
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from pprint import pprint
import pathlib
import os
@csrf_exempt
@require_POST
def create_or_update_model_class(request):
body = json.loads(request.body)
project = Project.objects.filter(id=int(body['project'])).first()
if project is None:
return JsonResponse({'error': 'project could not be found'})
key = body['name'].replace(' ', '_').replace('-','_')
model_class = ModelClass.objects.filter(key=key, project=project).first()
if model_class is None:
model_class = ModelClass.objects.create(
key=key,
label=body['name'],
description=body['description'],
project=project,
run_step_code=body['run_step_code'])
else:
model_class.key = key
model_class.label = body['name']
model_class.description = body['description']
# project=project
print("inputed runstep code:")
model_class.run_step_code = body['run_step_code']
model_class.save()
for param in body['parameters']:
param['kind'] = 'param'
for state in body['states']:
state['kind'] = 'state'
for item in body['parameters'] + body['states']:
default_attr = DefaultAttribute.objects.filter(model_class=model_class, key=item['key'], kind=item['kind']).first()
if default_attr is None:
DefaultAttribute.objects.create(
key=item['key'],
label=item['label'],
dtype=item['dtype'],
units=item.get('units'),
kind=item['kind'],
is_private=item.get('private', False),
value=str(item['value']),
confidence=item.get('confidence', 0),
notes=item.get('notes', ''),
source=item.get('source', ''),
model_class=model_class
)
else:
default_attr.key=item['key']
default_attr.label=item['label']
default_attr.dtype=item['dtype']
default_attr.units=item.get('units')
default_attr.kind=item['kind']
default_attr.is_private=item.get('private', False)
default_attr.value=str(item['value'])
default_attr.confidence=item.get('confidence', 0)
default_attr.notes=item.get('notes', '')
default_attr.source=item.get('source', '')
default_attr.save()
# https://stackoverflow.com/questions/5362771/how-to-load-a-module-from-code-in-a-string
# Note: we probably want to save the code file here as that can then help with local iteration... but then we risk getting out of sync with the database...
# Note: We could check to see when running whether the code is equal to the file!
# Then ask the user to either upload or overwrite.
modelflow_root = pathlib.Path(__file__).parents[5]
projects_folder = os.path.join(modelflow_root, 'projects')
if not os.path.exists(projects_folder):
os.mkdir(projects_folder)
project_folder = os.path.join(projects_folder, project.name)
if not os.path.exists(project_folder):
os.mkdir(project_folder)
model_classes_dir = os.path.join(project_folder, 'model_classes')
if not os.path.exists(model_classes_dir):
os.mkdir(model_classes_dir)
write_file_for_model_class(model_classes_dir, model_class)
return JsonResponse({'id': model_class.id})
def write_file_for_model_class(model_classes_dir, model_class):
model_class_text = ''
# TODO: Handle imports
model_class_text += f'class {model_class.key}:\n'
model_class_text += f' name = "{model_class.label}"\n'
default_params = []
default_states = []
for attribute in DefaultAttribute.objects.filter(model_class=model_class):
value = attribute.value
dtype = attribute.dtype
if dtype in ['int']:
value = int(value)
elif dtype in ['float']:
value = float(value)
obj = dict(
key=attribute.key,
label=attribute.label,
units=attribute.units,
private=attribute.is_private,
value=value,
confidence=attribute.confidence,
notes=attribute.notes,
source=attribute.source
)
if attribute.kind == 'param':
default_params.append(obj)
else:
default_states.append(obj)
for part in [['params', default_params], ['states', default_states]]:
json_str = json.dumps(part[1], indent=4)
json_str = json_str.replace(': false', ': False')
json_str = json_str.replace(': true', ': True')
json_str = json_str.replace(': null', ': ""')
json_str = part[0] + ' = ' + json_str
lines = json_str.split('\n')
new_lines = []
for line in lines:
new_lines.append(' ' + line)
model_class_text += '\n'.join(new_lines)
model_class_text += '\n'
model_class_text += '\n @staticmethod\n'
for line in model_class.run_step_code.split('\n'):
model_class_text += ' ' + line + '\n'
with open(os.path.join(model_classes_dir, f'{model_class.key}.py'), 'w') as f:
f.write(model_class_text) | website/backend/webserver/api/views/model_classes.py | from django.http import JsonResponse
from ..models import Project, ModelClass, DefaultAttribute
import json
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from pprint import pprint
import pathlib
import os
@csrf_exempt
@require_POST
def create_or_update_model_class(request):
body = json.loads(request.body)
project = Project.objects.filter(id=int(body['project'])).first()
if project is None:
return JsonResponse({'error': 'project could not be found'})
key = body['name'].replace(' ', '_').replace('-','_')
model_class = ModelClass.objects.filter(key=key, project=project).first()
if model_class is None:
model_class = ModelClass.objects.create(
key=key,
label=body['name'],
description=body['description'],
project=project,
run_step_code=body['run_step_code'])
else:
model_class.key = key
model_class.label = body['name']
model_class.description = body['description']
# project=project
print("inputed runstep code:")
model_class.run_step_code = body['run_step_code']
model_class.save()
for param in body['parameters']:
param['kind'] = 'param'
for state in body['states']:
state['kind'] = 'state'
for item in body['parameters'] + body['states']:
default_attr = DefaultAttribute.objects.filter(model_class=model_class, key=item['key'], kind=item['kind']).first()
if default_attr is None:
DefaultAttribute.objects.create(
key=item['key'],
label=item['label'],
dtype=item['dtype'],
units=item.get('units'),
kind=item['kind'],
is_private=item.get('private', False),
value=str(item['value']),
confidence=item.get('confidence', 0),
notes=item.get('notes', ''),
source=item.get('source', ''),
model_class=model_class
)
else:
default_attr.key=item['key']
default_attr.label=item['label']
default_attr.dtype=item['dtype']
default_attr.units=item.get('units')
default_attr.kind=item['kind']
default_attr.is_private=item.get('private', False)
default_attr.value=str(item['value'])
default_attr.confidence=item.get('confidence', 0)
default_attr.notes=item.get('notes', '')
default_attr.source=item.get('source', '')
default_attr.save()
# https://stackoverflow.com/questions/5362771/how-to-load-a-module-from-code-in-a-string
# Note: we probably want to save the code file here as that can then help with local iteration... but then we risk getting out of sync with the database...
# Note: We could check to see when running whether the code is equal to the file!
# Then ask the user to either upload or overwrite.
modelflow_root = pathlib.Path(__file__).parents[5]
projects_folder = os.path.join(modelflow_root, 'projects')
if not os.path.exists(projects_folder):
os.mkdir(projects_folder)
project_folder = os.path.join(projects_folder, project.name)
if not os.path.exists(project_folder):
os.mkdir(project_folder)
model_classes_dir = os.path.join(project_folder, 'model_classes')
if not os.path.exists(model_classes_dir):
os.mkdir(model_classes_dir)
write_file_for_model_class(model_classes_dir, model_class)
return JsonResponse({'id': model_class.id})
def write_file_for_model_class(model_classes_dir, model_class):
model_class_text = ''
# TODO: Handle imports
model_class_text += f'class {model_class.key}:\n'
model_class_text += f' name = "{model_class.label}"\n'
default_params = []
default_states = []
for attribute in DefaultAttribute.objects.filter(model_class=model_class):
value = attribute.value
dtype = attribute.dtype
if dtype in ['int']:
value = int(value)
elif dtype in ['float']:
value = float(value)
obj = dict(
key=attribute.key,
label=attribute.label,
units=attribute.units,
private=attribute.is_private,
value=value,
confidence=attribute.confidence,
notes=attribute.notes,
source=attribute.source
)
if attribute.kind == 'param':
default_params.append(obj)
else:
default_states.append(obj)
for part in [['params', default_params], ['states', default_states]]:
json_str = json.dumps(part[1], indent=4)
json_str = json_str.replace(': false', ': False')
json_str = json_str.replace(': true', ': True')
json_str = json_str.replace(': null', ': ""')
json_str = part[0] + ' = ' + json_str
lines = json_str.split('\n')
new_lines = []
for line in lines:
new_lines.append(' ' + line)
model_class_text += '\n'.join(new_lines)
model_class_text += '\n'
model_class_text += '\n @staticmethod\n'
for line in model_class.run_step_code.split('\n'):
model_class_text += ' ' + line + '\n'
with open(os.path.join(model_classes_dir, f'{model_class.key}.py'), 'w') as f:
f.write(model_class_text) | 0.227041 | 0.08043 |
import random
number = random.randrange(0,101)
print(number)
#2 завдання
numb = int(input("Введіть рандомне число від 0 до 10: "))
numb2 = random.randrange(0,11)
print("Випадає число: " ,numb2)
if numb == numb2:
print("Ти вийграв!!!")
else:
print("Ти програв =(")
#3 завдання
pryclad = 100 - 50
print(pryclad)
modul = print(abs(pryclad))
if modul > 0:
print("Число додатне")
else:
print("Завдання зроблено неправильно")
#4 задача
from random import choice, random
from turtle import *
from freegames import vector
def value():
"Randomly generate value between (-5, -3) or (3, 5)."
return (3 + random() * 2) * choice([1, -1])
# початкові координати появи м'яча
ball = vector(5, 0)
# м'яч має рандомний напрямок, який генерується з функції value
aim = vector(value(), value())
state = {1: 0, 2: 0}
def move(player, change):
"Move player position by change."
state[player] += change
def rectangle(x, y, width, height):
"Draw rectangle at (x, y) with given width and height."
up()
goto(x, y)
down()
begin_fill()
for count in range(2):
forward(width)
left(90)
forward(height)
left(90)
end_fill()
def draw():
"Draw game and move pong ball."
clear()
rectangle(-200, state[1], 10, 50)
rectangle(190, state[2], 10, 50)
ball.move(aim)
x = ball.x
y = ball.y
up()
goto(x, y)
# розмір м'яча
dot(20)
update()
# границі
if y < -200 or y > 200:
aim.y = -aim.y
if x < -185:
low = state[1]
high = state[1] + 50
if low <= y <= high:
aim.x = -aim.x
else:
return
if x > 185:
low = state[2]
high = state[2] + 50
if low <= y <= high:
aim.x = -aim.x
else:
return
# швидкість руху м'яча
ontimer(draw, 50)
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
listen()
# клавіші управління
onkey(lambda: move(1, 30), 'w')
onkey(lambda: move(1, -30), 's')
onkey(lambda: move(1, -15), 'd')
onkey(lambda: move(1, 15), 'a')
onkey(lambda: move(2, 30), '8')
onkey(lambda: move(2, -30), '5')
onkey(lambda: move(2, -15), '6')
onkey(lambda: move(2, 15), '4')
draw()
done()
#5 задача
from turtle import *
from freegames import square, vector
# перший гравець
# початкова точка
p1xy = vector(-10, 0)
# напрямок
p1aim = vector(4, 0)
p1body = set()
# другий гравець
# початкова точка
p2xy = vector(100, 0)
# напрямок
p2aim = vector(-4, 0)
p2body = set()
# перший гравець
# початкова точка
p3xy = vector(0, 10)
# напрямок
p3aim = vector(0, 4)
p3body = set()
# чи ми знаходимся в межах карти
def inside(head):
"Return True if head inside screen."
return -200 < head.x < 200 and -200 < head.y < 200
def draw():
"Advance players and draw game."
# рух гравця 1
p1xy.move(p1aim)
p1head = p1xy.copy()
# рух гравця 2
p2xy.move(p2aim)
p2head = p2xy.copy()
# рух гравця 3
p3xy.move(p3aim)
p3head = p3xy.copy()
# перевірка чи гравець 1 врізався в гравця 2
if not inside(p1head) or p1head in p2body:
print('Player blue wins!')
return
# перевірка чи гравець 2 врізався в гравця 1
if not inside(p2head) or p2head in p1body:
print('Player red wins!')
return
# збільшення тіла гравця 1
p1body.add(p1head)
# збільшення тіла гравця 2
p2body.add(p2head)
# збільшення тіла гравця 3
p3body.add(p3head)
# вигляд обох гравців, цифра 3 означає розмір, а колір можна міняти
square(p1xy.x, p1xy.y, 3, 'yellow')
square(p2xy.x, p2xy.y, 3, 'blue')
square(p3xy.x, p3xy.y, 3, 'black')
update()
# швидкість руху гравців
ontimer(draw, 100)
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
listen()
onkey(lambda: p1aim.rotate(90), 'a')
onkey(lambda: p1aim.rotate(-90), 'd')
onkey(lambda: p2aim.rotate(90), '4')
onkey(lambda: p2aim.rotate(-90), '6')
onkey(lambda: p3aim.rotate(90), 'g')
onkey(lambda: p2aim.rotate(-90), 'j')
draw()
done()
#7 - 10 завдання
import random
from random import choice
from turtle import *
from freegames import floor, vector
# рахунок
state = {'score': 0}
path = Turtle(visible=False)
writer = Turtle(visible=False)
# напрямок
aim = vector(5, 0)
# початкові координати пекмена
pacman = vector(-40, -80)
# вороги
ghosts = [
[vector(-180, 160), vector(5, 0)],
[vector(-180, -160), vector(0, 5)],
[vector(100, 160), vector(0, -5)],
[vector(100, -160), vector(-5, 0)],
[vector(-180, 160), vector(5, 0)],
]
def rand_om():
return random.randrange(0,2)
# карта, 0 - це стіна, 1 - це дорога
tiles = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, rand_om(), 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0,
0, 1, 0, 1, 1, rand_om(), 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0,
0, 1, 1, 1, rand_om(), 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0,
0, 1, 0, 0, 1, rand_om(), 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0,
0, 1, 1, 1, 1, 0, 1, rand_om(), 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0,
0, 1, 0, 0, 1, 0, 0, rand_om(), 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0,
0, 1, 0, rand_om(), 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0,
0, 1, 1, 1, 1, 1, 1, rand_om(), 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0,
0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, rand_om(), 0, 1, 0,
0, 0, 0, 0, 1, rand_om(), 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0,
0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, rand_om(), 0,
0, 1, 0, rand_om(), 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0,
0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, rand_om(), 1, 1, 0, 1, 1, 0,
0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, rand_om(), 1, 0, 0, 1, 0, 0,
0, 1, 1, 1, 1, 0, 1, 1, 0, 1, rand_om(), 0, 1, 1, 1, 1, 0, 1, 1, 0,
0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, rand_om(), 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, rand_om(), 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, rand_om(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
]
def square(x, y):
"Draw square using path at (x, y)."
path.up()
path.goto(x, y)
path.down()
path.begin_fill()
for count in range(4):
path.forward(20)
path.left(90)
path.end_fill()
def offset(point):
"Return offset of point in tiles."
x = (floor(point.x, 20) + 200) / 20
y = (180 - floor(point.y, 20)) / 20
index = int(x + y * 20)
return index
def valid(point):
"Return True if point is valid in tiles."
index = offset(point)
if tiles[index] == 0:
return False
index = offset(point + 19)
if tiles[index] == 0:
return False
return point.x % 20 == 0 or point.y % 20 == 0
def world():
"Draw world using path."
bgcolor('black')
path.color('blue')
for index in range(len(tiles)):
tile = tiles[index]
if tile > 0:
x = (index % 20) * 20 - 200
y = 180 - (index // 20) * 20
square(x, y)
if tile == 1:
path.up()
path.goto(x + 10, y + 10)
# розмір їжі та її колір
path.dot(2, 'yellow')
def move():
"Move pacman and all ghosts."
writer.undo()
writer.write(state['score'])
clear()
if valid(pacman + aim):
pacman.move(aim)
index = offset(pacman)
if tiles[index] == 1:
tiles[index] = 2
state['score'] += 1
x = (index % 20) * 20 - 200
y = 180 - (index // 20) * 20
square(x, y)
up()
goto(pacman.x + 10, pacman.y + 10)
# розмір пекмена та його колір
dot(20, 'red')
for point, course in ghosts:
if valid(point + course):
point.move(course)
else:
options = [
vector(5, 0),
vector(-5, 0),
vector(0, 5),
vector(0, -5),
]
plan = choice(options)
course.x = plan.x
course.y = plan.y
up()
goto(point.x + 10, point.y + 10)
# розмір ворогів та їх колір
dot(20, 'white')
update()
for point, course in ghosts:
if abs(pacman - point) < 20:
return
ontimer(move, 100)
def change(x, y):
"Change pacman aim if valid."
if valid(pacman + vector(x, y)):
aim.x = x
aim.y = y
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
writer.goto(160, 160)
writer.color('white')
writer.write(state['score'])
listen()
onkey(lambda: change(5, 0), 'Right')
onkey(lambda: change(-5, 0), 'Left')
onkey(lambda: change(0, 5), 'Up')
onkey(lambda: change(0, -5), 'Down')
world()
move()
done() | classwork11.py | import random
number = random.randrange(0,101)
print(number)
#2 завдання
numb = int(input("Введіть рандомне число від 0 до 10: "))
numb2 = random.randrange(0,11)
print("Випадає число: " ,numb2)
if numb == numb2:
print("Ти вийграв!!!")
else:
print("Ти програв =(")
#3 завдання
pryclad = 100 - 50
print(pryclad)
modul = print(abs(pryclad))
if modul > 0:
print("Число додатне")
else:
print("Завдання зроблено неправильно")
#4 задача
from random import choice, random
from turtle import *
from freegames import vector
def value():
"Randomly generate value between (-5, -3) or (3, 5)."
return (3 + random() * 2) * choice([1, -1])
# початкові координати появи м'яча
ball = vector(5, 0)
# м'яч має рандомний напрямок, який генерується з функції value
aim = vector(value(), value())
state = {1: 0, 2: 0}
def move(player, change):
"Move player position by change."
state[player] += change
def rectangle(x, y, width, height):
"Draw rectangle at (x, y) with given width and height."
up()
goto(x, y)
down()
begin_fill()
for count in range(2):
forward(width)
left(90)
forward(height)
left(90)
end_fill()
def draw():
"Draw game and move pong ball."
clear()
rectangle(-200, state[1], 10, 50)
rectangle(190, state[2], 10, 50)
ball.move(aim)
x = ball.x
y = ball.y
up()
goto(x, y)
# розмір м'яча
dot(20)
update()
# границі
if y < -200 or y > 200:
aim.y = -aim.y
if x < -185:
low = state[1]
high = state[1] + 50
if low <= y <= high:
aim.x = -aim.x
else:
return
if x > 185:
low = state[2]
high = state[2] + 50
if low <= y <= high:
aim.x = -aim.x
else:
return
# швидкість руху м'яча
ontimer(draw, 50)
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
listen()
# клавіші управління
onkey(lambda: move(1, 30), 'w')
onkey(lambda: move(1, -30), 's')
onkey(lambda: move(1, -15), 'd')
onkey(lambda: move(1, 15), 'a')
onkey(lambda: move(2, 30), '8')
onkey(lambda: move(2, -30), '5')
onkey(lambda: move(2, -15), '6')
onkey(lambda: move(2, 15), '4')
draw()
done()
#5 задача
from turtle import *
from freegames import square, vector
# перший гравець
# початкова точка
p1xy = vector(-10, 0)
# напрямок
p1aim = vector(4, 0)
p1body = set()
# другий гравець
# початкова точка
p2xy = vector(100, 0)
# напрямок
p2aim = vector(-4, 0)
p2body = set()
# перший гравець
# початкова точка
p3xy = vector(0, 10)
# напрямок
p3aim = vector(0, 4)
p3body = set()
# чи ми знаходимся в межах карти
def inside(head):
"Return True if head inside screen."
return -200 < head.x < 200 and -200 < head.y < 200
def draw():
"Advance players and draw game."
# рух гравця 1
p1xy.move(p1aim)
p1head = p1xy.copy()
# рух гравця 2
p2xy.move(p2aim)
p2head = p2xy.copy()
# рух гравця 3
p3xy.move(p3aim)
p3head = p3xy.copy()
# перевірка чи гравець 1 врізався в гравця 2
if not inside(p1head) or p1head in p2body:
print('Player blue wins!')
return
# перевірка чи гравець 2 врізався в гравця 1
if not inside(p2head) or p2head in p1body:
print('Player red wins!')
return
# збільшення тіла гравця 1
p1body.add(p1head)
# збільшення тіла гравця 2
p2body.add(p2head)
# збільшення тіла гравця 3
p3body.add(p3head)
# вигляд обох гравців, цифра 3 означає розмір, а колір можна міняти
square(p1xy.x, p1xy.y, 3, 'yellow')
square(p2xy.x, p2xy.y, 3, 'blue')
square(p3xy.x, p3xy.y, 3, 'black')
update()
# швидкість руху гравців
ontimer(draw, 100)
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
listen()
onkey(lambda: p1aim.rotate(90), 'a')
onkey(lambda: p1aim.rotate(-90), 'd')
onkey(lambda: p2aim.rotate(90), '4')
onkey(lambda: p2aim.rotate(-90), '6')
onkey(lambda: p3aim.rotate(90), 'g')
onkey(lambda: p2aim.rotate(-90), 'j')
draw()
done()
#7 - 10 завдання
import random
from random import choice
from turtle import *
from freegames import floor, vector
# рахунок
state = {'score': 0}
path = Turtle(visible=False)
writer = Turtle(visible=False)
# напрямок
aim = vector(5, 0)
# початкові координати пекмена
pacman = vector(-40, -80)
# вороги
ghosts = [
[vector(-180, 160), vector(5, 0)],
[vector(-180, -160), vector(0, 5)],
[vector(100, 160), vector(0, -5)],
[vector(100, -160), vector(-5, 0)],
[vector(-180, 160), vector(5, 0)],
]
def rand_om():
return random.randrange(0,2)
# карта, 0 - це стіна, 1 - це дорога
tiles = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, rand_om(), 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0,
0, 1, 0, 1, 1, rand_om(), 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0,
0, 1, 1, 1, rand_om(), 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0,
0, 1, 0, 0, 1, rand_om(), 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0,
0, 1, 1, 1, 1, 0, 1, rand_om(), 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0,
0, 1, 0, 0, 1, 0, 0, rand_om(), 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0,
0, 1, 0, rand_om(), 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0,
0, 1, 1, 1, 1, 1, 1, rand_om(), 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0,
0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, rand_om(), 0, 1, 0,
0, 0, 0, 0, 1, rand_om(), 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0,
0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, rand_om(), 0,
0, 1, 0, rand_om(), 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0,
0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, rand_om(), 1, 1, 0, 1, 1, 0,
0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, rand_om(), 1, 0, 0, 1, 0, 0,
0, 1, 1, 1, 1, 0, 1, 1, 0, 1, rand_om(), 0, 1, 1, 1, 1, 0, 1, 1, 0,
0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, rand_om(), 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, rand_om(), 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, rand_om(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
]
def square(x, y):
"Draw square using path at (x, y)."
path.up()
path.goto(x, y)
path.down()
path.begin_fill()
for count in range(4):
path.forward(20)
path.left(90)
path.end_fill()
def offset(point):
"Return offset of point in tiles."
x = (floor(point.x, 20) + 200) / 20
y = (180 - floor(point.y, 20)) / 20
index = int(x + y * 20)
return index
def valid(point):
"Return True if point is valid in tiles."
index = offset(point)
if tiles[index] == 0:
return False
index = offset(point + 19)
if tiles[index] == 0:
return False
return point.x % 20 == 0 or point.y % 20 == 0
def world():
"Draw world using path."
bgcolor('black')
path.color('blue')
for index in range(len(tiles)):
tile = tiles[index]
if tile > 0:
x = (index % 20) * 20 - 200
y = 180 - (index // 20) * 20
square(x, y)
if tile == 1:
path.up()
path.goto(x + 10, y + 10)
# розмір їжі та її колір
path.dot(2, 'yellow')
def move():
"Move pacman and all ghosts."
writer.undo()
writer.write(state['score'])
clear()
if valid(pacman + aim):
pacman.move(aim)
index = offset(pacman)
if tiles[index] == 1:
tiles[index] = 2
state['score'] += 1
x = (index % 20) * 20 - 200
y = 180 - (index // 20) * 20
square(x, y)
up()
goto(pacman.x + 10, pacman.y + 10)
# розмір пекмена та його колір
dot(20, 'red')
for point, course in ghosts:
if valid(point + course):
point.move(course)
else:
options = [
vector(5, 0),
vector(-5, 0),
vector(0, 5),
vector(0, -5),
]
plan = choice(options)
course.x = plan.x
course.y = plan.y
up()
goto(point.x + 10, point.y + 10)
# розмір ворогів та їх колір
dot(20, 'white')
update()
for point, course in ghosts:
if abs(pacman - point) < 20:
return
ontimer(move, 100)
def change(x, y):
"Change pacman aim if valid."
if valid(pacman + vector(x, y)):
aim.x = x
aim.y = y
setup(420, 420, 370, 0)
hideturtle()
tracer(False)
writer.goto(160, 160)
writer.color('white')
writer.write(state['score'])
listen()
onkey(lambda: change(5, 0), 'Right')
onkey(lambda: change(-5, 0), 'Left')
onkey(lambda: change(0, 5), 'Up')
onkey(lambda: change(0, -5), 'Down')
world()
move()
done() | 0.195479 | 0.389198 |
import os
import autoprocess.errors
from autoprocess.parsers import distl
from autoprocess.utils import log, misc, programs, xdsio
_logger = log.get_module_logger(__name__)
def harvest_initialize():
if misc.file_requirements('X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf', 'BKGINIT.cbf', 'BLANK.cbf', 'GAIN.cbf'):
return {'step': 'initialize', 'success': True}
else:
return {'step': 'initialize', 'success': False, 'reason': 'Initialization unsuccessful!'}
def initialize(data_info, options=None):
options = options or {}
os.chdir(data_info['working_directory'])
run_info = {'mode': options.get('mode')}
run_info.update(data_info)
xdsio.write_xds_input('XYCORR INIT', run_info)
try:
programs.xds_par('Initializing')
except autoprocess.errors.ProcessError as e:
return {'step': 'initialize', 'success': False, 'reason': str(e)}
return harvest_initialize()
def analyse_image(data_info, options=None):
options = options or {}
os.chdir(data_info['working_directory'])
_logger.info('Analyzing reference image ...')
try:
programs.distl(data_info['reference_image'])
except autoprocess.errors.ProcessError as e:
return {'step': 'image_analysis', 'success': False, 'reason': str(e)}
if not misc.file_requirements('distl.log'):
return {'step': 'image_analysis', 'success': False, 'reason': 'Could not analyse reference image'}
info = distl.parse_distl('distl.log')
return {'step': 'image_analysis', 'success': True, 'data': info}
def harvest_spots():
if misc.file_requirements('SPOT.XDS'):
return {'step': 'spot_search', 'success': True}
else:
return {'step': 'spot_search', 'success': False, 'reason': 'Could not find spots.'}
def find_spots(data_info, options=None):
options = options or {}
os.chdir(data_info['working_directory'])
run_info = {'mode': options.get('mode')}
run_info.update(data_info)
xdsio.write_xds_input('COLSPOT', run_info)
try:
programs.xds_par('Searching for strong spots')
except autoprocess.errors.ProcessError as e:
return {'step': 'spot_search', 'success': False, 'reason': str(e)}
return harvest_spots() | autoprocess/engine/spots.py | import os
import autoprocess.errors
from autoprocess.parsers import distl
from autoprocess.utils import log, misc, programs, xdsio
_logger = log.get_module_logger(__name__)
def harvest_initialize():
if misc.file_requirements('X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf', 'BKGINIT.cbf', 'BLANK.cbf', 'GAIN.cbf'):
return {'step': 'initialize', 'success': True}
else:
return {'step': 'initialize', 'success': False, 'reason': 'Initialization unsuccessful!'}
def initialize(data_info, options=None):
options = options or {}
os.chdir(data_info['working_directory'])
run_info = {'mode': options.get('mode')}
run_info.update(data_info)
xdsio.write_xds_input('XYCORR INIT', run_info)
try:
programs.xds_par('Initializing')
except autoprocess.errors.ProcessError as e:
return {'step': 'initialize', 'success': False, 'reason': str(e)}
return harvest_initialize()
def analyse_image(data_info, options=None):
options = options or {}
os.chdir(data_info['working_directory'])
_logger.info('Analyzing reference image ...')
try:
programs.distl(data_info['reference_image'])
except autoprocess.errors.ProcessError as e:
return {'step': 'image_analysis', 'success': False, 'reason': str(e)}
if not misc.file_requirements('distl.log'):
return {'step': 'image_analysis', 'success': False, 'reason': 'Could not analyse reference image'}
info = distl.parse_distl('distl.log')
return {'step': 'image_analysis', 'success': True, 'data': info}
def harvest_spots():
if misc.file_requirements('SPOT.XDS'):
return {'step': 'spot_search', 'success': True}
else:
return {'step': 'spot_search', 'success': False, 'reason': 'Could not find spots.'}
def find_spots(data_info, options=None):
options = options or {}
os.chdir(data_info['working_directory'])
run_info = {'mode': options.get('mode')}
run_info.update(data_info)
xdsio.write_xds_input('COLSPOT', run_info)
try:
programs.xds_par('Searching for strong spots')
except autoprocess.errors.ProcessError as e:
return {'step': 'spot_search', 'success': False, 'reason': str(e)}
return harvest_spots() | 0.278747 | 0.078997 |
from sims4.tuning.tunable import HasTunableSingletonFactory, AutoFactoryInit
from sims4.tuning.tunable_base import GroupNames
import services, sims4.tuning.tunable
class AllCompletionType(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'description': '\n All of the Objectives as part of this Milestone must be completed\n in order for this Milestone to be considered complete.\n '}
def completion_requirement(self):
pass
class SubsetCompletionType(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'description':'\n A numerical subset of the total Objectives need to be complete for\n this Milestone to be considered complete.\n ',
'number_required':sims4.tuning.tunable.TunableRange(description='\n The number of objectives as part of this Milestone that must be\n completed until this Milestone is considered complete.\n ',
tunable_type=int,
default=1,
minimum=1)}
def completion_requirement(self):
return self.number_required
class Milestone:
INSTANCE_TUNABLES = {'objectives':sims4.tuning.tunable.TunableList(description='\n A list of all of the Objectives that will be tracked in order for\n this Milestone to be completed. Using the Objective Completion Type\n we will determine the action number of Objectives that need to be\n completed.\n ',
tunable=sims4.tuning.tunable.TunableReference(description='\n An Objective that is one of the requirements for this Milestone\n to be completed.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.OBJECTIVE)),
pack_safe=True),
export_modes=sims4.tuning.tunable_base.ExportModes.All,
tuning_group=GroupNames.CORE),
'objective_completion_type':sims4.tuning.tunable.TunableVariant(description='\n A requirement of what objectives need to be completed. \n ',
complete_all=AllCompletionType.TunableFactory(),
complete_subset=SubsetCompletionType.TunableFactory(),
default='complete_all',
tuning_group=GroupNames.CORE),
'track_completion_count':sims4.tuning.tunable.Tunable(description="\n If checked, this Milestone will track how many times it's been\n completed, even through resets. For instance, GP09 Missions reuse the \n same Aspiration but still need to track how many times the Aspiration\n has been completed.\n ",
tunable_type=bool,
default=False),
'can_complete_without_objectives':sims4.tuning.tunable.Tunable(description="\n If checked, this Milestone can have 0 objectives and be completed.\n If unchecked, having zero objectives won't complete this Milestone. \n This can be used for Milestones like Missions that have dynamically-\n added Objectives that might not be available when the Milestone is \n tested for completion.\n ",
tunable_type=bool,
default=True)}
@classmethod
def objective_completion_count(cls):
return cls.objective_completion_type.completion_requirement()
@classmethod
def should_test_on_zone_load(cls):
for objective in cls.objectives:
if objective.should_test_on_zone_load():
return True
return False | Scripts/simulation/event_testing/milestone.py | from sims4.tuning.tunable import HasTunableSingletonFactory, AutoFactoryInit
from sims4.tuning.tunable_base import GroupNames
import services, sims4.tuning.tunable
class AllCompletionType(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'description': '\n All of the Objectives as part of this Milestone must be completed\n in order for this Milestone to be considered complete.\n '}
def completion_requirement(self):
pass
class SubsetCompletionType(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'description':'\n A numerical subset of the total Objectives need to be complete for\n this Milestone to be considered complete.\n ',
'number_required':sims4.tuning.tunable.TunableRange(description='\n The number of objectives as part of this Milestone that must be\n completed until this Milestone is considered complete.\n ',
tunable_type=int,
default=1,
minimum=1)}
def completion_requirement(self):
return self.number_required
class Milestone:
INSTANCE_TUNABLES = {'objectives':sims4.tuning.tunable.TunableList(description='\n A list of all of the Objectives that will be tracked in order for\n this Milestone to be completed. Using the Objective Completion Type\n we will determine the action number of Objectives that need to be\n completed.\n ',
tunable=sims4.tuning.tunable.TunableReference(description='\n An Objective that is one of the requirements for this Milestone\n to be completed.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.OBJECTIVE)),
pack_safe=True),
export_modes=sims4.tuning.tunable_base.ExportModes.All,
tuning_group=GroupNames.CORE),
'objective_completion_type':sims4.tuning.tunable.TunableVariant(description='\n A requirement of what objectives need to be completed. \n ',
complete_all=AllCompletionType.TunableFactory(),
complete_subset=SubsetCompletionType.TunableFactory(),
default='complete_all',
tuning_group=GroupNames.CORE),
'track_completion_count':sims4.tuning.tunable.Tunable(description="\n If checked, this Milestone will track how many times it's been\n completed, even through resets. For instance, GP09 Missions reuse the \n same Aspiration but still need to track how many times the Aspiration\n has been completed.\n ",
tunable_type=bool,
default=False),
'can_complete_without_objectives':sims4.tuning.tunable.Tunable(description="\n If checked, this Milestone can have 0 objectives and be completed.\n If unchecked, having zero objectives won't complete this Milestone. \n This can be used for Milestones like Missions that have dynamically-\n added Objectives that might not be available when the Milestone is \n tested for completion.\n ",
tunable_type=bool,
default=True)}
@classmethod
def objective_completion_count(cls):
return cls.objective_completion_type.completion_requirement()
@classmethod
def should_test_on_zone_load(cls):
for objective in cls.objectives:
if objective.should_test_on_zone_load():
return True
return False | 0.588889 | 0.493714 |
import pyexasol
import _config as config
import multiprocessing
import pyexasol.callback as cb
import pandas
import pprint
printer = pprint.PrettyPrinter(indent=4, width=140)
class ImportProc(multiprocessing.Process):
def __init__(self, node):
self.node = node
self.read_pipe, self.write_pipe = multiprocessing.Pipe(False)
super().__init__()
def start(self):
super().start()
self.write_pipe.close()
@property
def exa_address(self):
return self.read_pipe.recv()
def run(self):
self.read_pipe.close()
# Init HTTP transport connection
http = pyexasol.http_transport(self.node['ipaddr'], self.node['port'])
# Send internal Exasol address to parent process
self.write_pipe.send(http.exa_address)
self.write_pipe.close()
data = [
{'user_id': 1, 'user_name': 'John', 'shard_id': self.node['idx']},
{'user_id': 2, 'user_name': 'Foo', 'shard_id': self.node['idx']},
{'user_id': 3, 'user_name': 'Bar', 'shard_id': self.node['idx']},
]
pd = pandas.DataFrame(data, columns=['user_id', 'user_name', 'shard_id'])
# Send data from DataFrame to HTTP transport
http.import_from_callback(cb.import_from_pandas, pd)
print(f"Child process {self.node['idx']} finished, imported rows: {len(pd)}")
if __name__ == '__main__':
pool_size = 5
pool = []
exa_address_list = []
C = pyexasol.connect(dsn=config.dsn, user=config.user, password=config.password, schema=config.schema)
C.execute('TRUNCATE TABLE parallel_import')
for n in C.get_nodes(pool_size):
proc = ImportProc(n)
proc.start()
pool.append(proc)
exa_address_list.append(proc.exa_address)
printer.pprint(pool)
printer.pprint(exa_address_list)
try:
C.import_parallel(exa_address_list, 'parallel_import')
except (Exception, KeyboardInterrupt):
for p in pool:
p.terminate()
else:
stmt = C.last_statement()
print(f'IMPORTED {stmt.rowcount()} rows in {stmt.execution_time}s')
finally:
for p in pool:
p.join() | examples/b04_parallel_import.py | import pyexasol
import _config as config
import multiprocessing
import pyexasol.callback as cb
import pandas
import pprint
printer = pprint.PrettyPrinter(indent=4, width=140)
class ImportProc(multiprocessing.Process):
def __init__(self, node):
self.node = node
self.read_pipe, self.write_pipe = multiprocessing.Pipe(False)
super().__init__()
def start(self):
super().start()
self.write_pipe.close()
@property
def exa_address(self):
return self.read_pipe.recv()
def run(self):
self.read_pipe.close()
# Init HTTP transport connection
http = pyexasol.http_transport(self.node['ipaddr'], self.node['port'])
# Send internal Exasol address to parent process
self.write_pipe.send(http.exa_address)
self.write_pipe.close()
data = [
{'user_id': 1, 'user_name': 'John', 'shard_id': self.node['idx']},
{'user_id': 2, 'user_name': 'Foo', 'shard_id': self.node['idx']},
{'user_id': 3, 'user_name': 'Bar', 'shard_id': self.node['idx']},
]
pd = pandas.DataFrame(data, columns=['user_id', 'user_name', 'shard_id'])
# Send data from DataFrame to HTTP transport
http.import_from_callback(cb.import_from_pandas, pd)
print(f"Child process {self.node['idx']} finished, imported rows: {len(pd)}")
if __name__ == '__main__':
pool_size = 5
pool = []
exa_address_list = []
C = pyexasol.connect(dsn=config.dsn, user=config.user, password=config.password, schema=config.schema)
C.execute('TRUNCATE TABLE parallel_import')
for n in C.get_nodes(pool_size):
proc = ImportProc(n)
proc.start()
pool.append(proc)
exa_address_list.append(proc.exa_address)
printer.pprint(pool)
printer.pprint(exa_address_list)
try:
C.import_parallel(exa_address_list, 'parallel_import')
except (Exception, KeyboardInterrupt):
for p in pool:
p.terminate()
else:
stmt = C.last_statement()
print(f'IMPORTED {stmt.rowcount()} rows in {stmt.execution_time}s')
finally:
for p in pool:
p.join() | 0.422147 | 0.08389 |
import pandas as pd
import matplotlib.pyplot as plt
from pandas.core.frame import DataFrame
from alphax.src.api.base import BaseAPI, TimeSeriesAPI, TechIndicatorsAPI
from copy import copy, deepcopy
class MACD:
""" Return the moving average convergence/divergence time series in two
json objects as data and meta_data. It raises ValueError when problems
arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
interval: time interval between two conscutive values,
supported values are '1min', '5min', '15min', '30min', '60min', 'daily',
'weekly', 'monthly' (default 'daily'
series_type: The desired price type in the time series. Four types
are supported: 'close', 'open', 'high', 'low' (default 'close')
fastperiod: Positive integers are accepted (default=None)
slowperiod: Positive integers are accepted (default=None)
signalperiod: Positive integers are accepted (default=None)
"""
def __init__(self, symbol) -> None:
# Attrs for alpha vantage api
self.symbol = symbol
self.interval = "daily"
self.series_type = "close"
self.fastperiod = None
self.slowperiod = None
self.signalperiod = None
self.ti = TechIndicatorsAPI()
def get_MACD(self):
return self.ti.api_delegation("get_macd", symbol=self.symbol, interval=self.interval,
series_type=self.series_type, fastperiod=self.fastperiod,
slowperiod=self.slowperiod, signalperiod=self.signalperiod)
def plot(self, df: DataFrame, **kwargs):
time_start = kwargs.get("time_start")
time_end = kwargs.get("time_end")
title = kwargs.get("title")
cols= kwargs.get("cols")
if time_start:
df = df[:time_start]
if time_end:
df = df[time_end:]
if cols:
df[cols].plot()
else:
df.plot()
if title:
plt.title(title)
plt.show()
if __name__ == "__main__":
macd = MACD("AAPL")
df, metadata = macd.get_MACD()
macd.plot(df) | alphax/src/tools/indicator/macd.py | import pandas as pd
import matplotlib.pyplot as plt
from pandas.core.frame import DataFrame
from alphax.src.api.base import BaseAPI, TimeSeriesAPI, TechIndicatorsAPI
from copy import copy, deepcopy
class MACD:
""" Return the moving average convergence/divergence time series in two
json objects as data and meta_data. It raises ValueError when problems
arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
interval: time interval between two conscutive values,
supported values are '1min', '5min', '15min', '30min', '60min', 'daily',
'weekly', 'monthly' (default 'daily'
series_type: The desired price type in the time series. Four types
are supported: 'close', 'open', 'high', 'low' (default 'close')
fastperiod: Positive integers are accepted (default=None)
slowperiod: Positive integers are accepted (default=None)
signalperiod: Positive integers are accepted (default=None)
"""
def __init__(self, symbol) -> None:
# Attrs for alpha vantage api
self.symbol = symbol
self.interval = "daily"
self.series_type = "close"
self.fastperiod = None
self.slowperiod = None
self.signalperiod = None
self.ti = TechIndicatorsAPI()
def get_MACD(self):
return self.ti.api_delegation("get_macd", symbol=self.symbol, interval=self.interval,
series_type=self.series_type, fastperiod=self.fastperiod,
slowperiod=self.slowperiod, signalperiod=self.signalperiod)
def plot(self, df: DataFrame, **kwargs):
time_start = kwargs.get("time_start")
time_end = kwargs.get("time_end")
title = kwargs.get("title")
cols= kwargs.get("cols")
if time_start:
df = df[:time_start]
if time_end:
df = df[time_end:]
if cols:
df[cols].plot()
else:
df.plot()
if title:
plt.title(title)
plt.show()
if __name__ == "__main__":
macd = MACD("AAPL")
df, metadata = macd.get_MACD()
macd.plot(df) | 0.852199 | 0.469155 |
import torchvision.transforms as transforms
from typing import Callable
from .norm import normalize
from .utils import MultiCropTransform
from .mocov2 import MocoTransform
__all__ = ['ressl_transform']
class ReSSLTransform(MocoTransform):
def large(self, split: str = 'train', norm: str = 'imagenet') -> Callable:
if split == 'ssl':
return MultiCropTransform([self._aug_t(224, norm), self._aug_s(224, norm, 0.5)])
else:
return super().large(split, norm)
def medium(self, split: str = 'train', norm: str = 'imagenet') -> Callable:
if split == 'ssl':
return MultiCropTransform([self._aug_t(64, norm), self._aug_s(64, norm, 0.5)])
else:
return super().large(split, norm)
def small(self, split: str = 'train', norm: str = 'cifar10') -> Callable:
if split == 'ssl':
return MultiCropTransform([self._aug_t(32, norm), self._aug_s(32, norm, 0.)])
else:
return super().large(split, norm)
# ========================================================================
# PRIVATE FUNCTIONS
# ========================================================================
def _aug_t(self, size: int, norm: str = 'imagenet') -> Callable:
'''Teacher augmentations / weak augmentations'''
return transforms.Compose([
transforms.RandomResizedCrop(size, scale=(0.2, 1.)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize(norm)
])
def _aug_s(self, size: int, norm: str = 'imagenet', blur_chance: float = 0.5) -> Callable:
'''Student augmentations / hard augmentations'''
kernel_size = int((size // 20) * 2) + 1
return transforms.Compose([
transforms.RandomResizedCrop(size, scale=(0.2, 1.)),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([transforms.GaussianBlur(kernel_size, [.1, 2.])], p=blur_chance),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize(norm)
])
def ressl_transform(*args, **kwargs):
return ReSSLTransform(*args, **kwargs) | sslic/data/transforms/ressl.py | import torchvision.transforms as transforms
from typing import Callable
from .norm import normalize
from .utils import MultiCropTransform
from .mocov2 import MocoTransform
__all__ = ['ressl_transform']
class ReSSLTransform(MocoTransform):
def large(self, split: str = 'train', norm: str = 'imagenet') -> Callable:
if split == 'ssl':
return MultiCropTransform([self._aug_t(224, norm), self._aug_s(224, norm, 0.5)])
else:
return super().large(split, norm)
def medium(self, split: str = 'train', norm: str = 'imagenet') -> Callable:
if split == 'ssl':
return MultiCropTransform([self._aug_t(64, norm), self._aug_s(64, norm, 0.5)])
else:
return super().large(split, norm)
def small(self, split: str = 'train', norm: str = 'cifar10') -> Callable:
if split == 'ssl':
return MultiCropTransform([self._aug_t(32, norm), self._aug_s(32, norm, 0.)])
else:
return super().large(split, norm)
# ========================================================================
# PRIVATE FUNCTIONS
# ========================================================================
def _aug_t(self, size: int, norm: str = 'imagenet') -> Callable:
'''Teacher augmentations / weak augmentations'''
return transforms.Compose([
transforms.RandomResizedCrop(size, scale=(0.2, 1.)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize(norm)
])
def _aug_s(self, size: int, norm: str = 'imagenet', blur_chance: float = 0.5) -> Callable:
'''Student augmentations / hard augmentations'''
kernel_size = int((size // 20) * 2) + 1
return transforms.Compose([
transforms.RandomResizedCrop(size, scale=(0.2, 1.)),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([transforms.GaussianBlur(kernel_size, [.1, 2.])], p=blur_chance),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize(norm)
])
def ressl_transform(*args, **kwargs):
return ReSSLTransform(*args, **kwargs) | 0.861538 | 0.507507 |
from . import Reaction
class Enzyme:
"""
The enzyme class has a few components:
* The subunit(s) that make up the enzyme
* The genes that encode those subunit(s)
* The reactions that this enzyme is connected to.
:ivar name: the name of the enzyme object
:type name: str
:ivar roles: the set of roles associated with the enzyme object
:type roles: set
:ivar pegs: a dict of pegs associated with the enzyme object and their associated roles
:type pegs: dict
:ivar roles_w_pegs: a dict of roles associated with the enzyme and their pegs
:type roles_w_pegs: dict
:ivar reactions: a set of reaction IDs that this enzyme connects to
:type reactions: set
:ivar ec_number: one or more EC numbers associated with this Enzyme. We only store the numeric part (not the EC part)
:type ec_number: set
"""
def __init__(self, name):
"""
Instantiate the enzyme
:param name: the name of the enzyme
:type name: str
"""
self.name = name # whatever name we give to this thing!
self.roles = set() # Roles (text strings)
self.pegs = {} # a hash that connects Roles to PEGs
self.roles_w_pegs = {} # which roles have pegs
self.reactions = set() # RIDs that the enzyme connects to
self.ec_number = set() # one or more EC numbers associated with this Enzyme. We only store the numeric part (not the EC part)
def __eq__(self, other):
"""
Is this enzyme the same as another one?
:param other: The other enzyme
:type other: Enzyme
:return: Whether the two enzymes are the same
:rtype: bool
"""
if isinstance(other, Enzyme):
return (self.name, self.roles) == (other.name, other.roles)
else:
return NotImplemented
def __ne__(self, other):
"""
Are these not equal?
:param other: The other enzyme
:type other: Enzyme
:return: Whether the two enzymes are not equal
:rtype: bool
"""
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
"""
The hash function is based on the name of the compound.
:rtype: int
"""
return hash(self.name)
def __str__(self):
"""
The string representation of the enzyme
:rtype: str
"""
return "ENZYME: " + self.name + " (roles: " + "; ".join([x for x in self.roles]) + ")"
def add_roles(self, roles):
"""
Add roles to this enzyme or complex
:param roles: A set of functional roles that encode the enzyme
:type roles: set
"""
if not isinstance(roles, set):
raise TypeError("Roles must be a set")
self.roles.update(roles)
def has_role(self, role):
"""
Does this enzyme have this role?
:param role: A functional role
:type role: str
:returns: A boolean
:rtype: bool
"""
return role in self.roles
def number_of_roles(self):
"""
How many roles does this enzyme have?
:rtype: int
"""
return len(self.roles)
def add_pegs(self, pegs):
"""
Add a hash of pegs and roles. Keys must be pegs, values must be roles.
Will throw a KeyError if the Role is not present
:param pegs: A hash of pegs and roles that encode the enzyme (e.g. from the assigned functions file)
:type pegs: dict
:raises: KeyError
"""
if not isinstance(pegs, dict):
raise TypeError("pegs must be a hash to add more than one")
for p in pegs:
if pegs[p] in self.roles:
self.pegs[p] = pegs[p]
if pegs[p] not in self.roles_w_pegs:
self.roles_w_pegs[pegs[p]] = []
self.roles_w_pegs[pegs[p]].append(p)
else:
raise KeyError("Role " + pegs[p] + " not found")
def add_a_peg(self, peg, role):
"""
Add a single peg and the role that it connects to.
:param peg: The peg id
:type peg: str
:param role: The role it connects to
:type role: str
:raises: KeyError
"""
if not isinstance(peg, str):
raise TypeError("peg must be a string. Did you mean to use add_pegs?")
if role in self.roles:
self.pegs[peg] = role
if role not in self.roles_w_pegs:
self.roles_w_pegs[role] = []
self.roles_w_pegs[role].append(peg)
else:
raise KeyError("Role " + role + " not found")
def number_of_pegs(self):
"""
The number of pegs assocaited with this enzyme.
:rtype: int
"""
return len(self.pegs)
def number_of_roles_with_pegs(self):
"""
How many of our roles have pegs associated with them?
:rtype: int
"""
return len(self.roles_w_pegs)
def has_peg_for_role(self, role):
"""
Do we have at least one peg for this role?
:param role: The role we are looking for
:type role: str
:return: If a peg is present
:rtype: bool
"""
return role in self.roles_w_pegs
def add_reaction(self, reaction):
"""
Add a reaction that this enzyme is inolved in.
:param reaction: The reaction object that this is involved with
:type reaction: Reaction
"""
if not isinstance(reaction, str):
raise TypeError("reaction must be a string not a " + str(type(reaction)))
self.reactions.add(reaction)
def number_of_reactions(self):
"""
The number of reactions that this enzyme is involved in
:rtype: int
"""
return len(self.reactions)
def add_ec(self, ecnumber):
"""
Add an EC number to the Enzyme complex. We just store the 1.2.3.4 or 1.2.-.- part, not the EC part.
:param ecnumber: The EC number
:type ecnumber: str
"""
self.ec_number.add(ecnumber)
def probability(self):
"""
The probability that this reaction occurs in the cell.
Currently the number of pegs/number of roles. Thus if most of
the pegs are present then the enzyme is likely to function
:returns: the probability that this reaction is complete
:rtype: float
"""
# Initially we had this, but note that a peg can have two roles
# (joined together with " / " or " @ ", and so we can't just use
# this simple calculation. We need to know thenumber of pegroles
# / number of roles - roles with pegs!
return 1.0 * self.number_of_roles_with_pegs() / self.number_of_roles() | PyFBA/metabolism/enzyme.py | from . import Reaction
class Enzyme:
"""
The enzyme class has a few components:
* The subunit(s) that make up the enzyme
* The genes that encode those subunit(s)
* The reactions that this enzyme is connected to.
:ivar name: the name of the enzyme object
:type name: str
:ivar roles: the set of roles associated with the enzyme object
:type roles: set
:ivar pegs: a dict of pegs associated with the enzyme object and their associated roles
:type pegs: dict
:ivar roles_w_pegs: a dict of roles associated with the enzyme and their pegs
:type roles_w_pegs: dict
:ivar reactions: a set of reaction IDs that this enzyme connects to
:type reactions: set
:ivar ec_number: one or more EC numbers associated with this Enzyme. We only store the numeric part (not the EC part)
:type ec_number: set
"""
def __init__(self, name):
"""
Instantiate the enzyme
:param name: the name of the enzyme
:type name: str
"""
self.name = name # whatever name we give to this thing!
self.roles = set() # Roles (text strings)
self.pegs = {} # a hash that connects Roles to PEGs
self.roles_w_pegs = {} # which roles have pegs
self.reactions = set() # RIDs that the enzyme connects to
self.ec_number = set() # one or more EC numbers associated with this Enzyme. We only store the numeric part (not the EC part)
def __eq__(self, other):
"""
Is this enzyme the same as another one?
:param other: The other enzyme
:type other: Enzyme
:return: Whether the two enzymes are the same
:rtype: bool
"""
if isinstance(other, Enzyme):
return (self.name, self.roles) == (other.name, other.roles)
else:
return NotImplemented
def __ne__(self, other):
"""
Are these not equal?
:param other: The other enzyme
:type other: Enzyme
:return: Whether the two enzymes are not equal
:rtype: bool
"""
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
"""
The hash function is based on the name of the compound.
:rtype: int
"""
return hash(self.name)
def __str__(self):
"""
The string representation of the enzyme
:rtype: str
"""
return "ENZYME: " + self.name + " (roles: " + "; ".join([x for x in self.roles]) + ")"
def add_roles(self, roles):
"""
Add roles to this enzyme or complex
:param roles: A set of functional roles that encode the enzyme
:type roles: set
"""
if not isinstance(roles, set):
raise TypeError("Roles must be a set")
self.roles.update(roles)
def has_role(self, role):
"""
Does this enzyme have this role?
:param role: A functional role
:type role: str
:returns: A boolean
:rtype: bool
"""
return role in self.roles
def number_of_roles(self):
"""
How many roles does this enzyme have?
:rtype: int
"""
return len(self.roles)
def add_pegs(self, pegs):
"""
Add a hash of pegs and roles. Keys must be pegs, values must be roles.
Will throw a KeyError if the Role is not present
:param pegs: A hash of pegs and roles that encode the enzyme (e.g. from the assigned functions file)
:type pegs: dict
:raises: KeyError
"""
if not isinstance(pegs, dict):
raise TypeError("pegs must be a hash to add more than one")
for p in pegs:
if pegs[p] in self.roles:
self.pegs[p] = pegs[p]
if pegs[p] not in self.roles_w_pegs:
self.roles_w_pegs[pegs[p]] = []
self.roles_w_pegs[pegs[p]].append(p)
else:
raise KeyError("Role " + pegs[p] + " not found")
def add_a_peg(self, peg, role):
"""
Add a single peg and the role that it connects to.
:param peg: The peg id
:type peg: str
:param role: The role it connects to
:type role: str
:raises: KeyError
"""
if not isinstance(peg, str):
raise TypeError("peg must be a string. Did you mean to use add_pegs?")
if role in self.roles:
self.pegs[peg] = role
if role not in self.roles_w_pegs:
self.roles_w_pegs[role] = []
self.roles_w_pegs[role].append(peg)
else:
raise KeyError("Role " + role + " not found")
def number_of_pegs(self):
"""
The number of pegs assocaited with this enzyme.
:rtype: int
"""
return len(self.pegs)
def number_of_roles_with_pegs(self):
"""
How many of our roles have pegs associated with them?
:rtype: int
"""
return len(self.roles_w_pegs)
def has_peg_for_role(self, role):
"""
Do we have at least one peg for this role?
:param role: The role we are looking for
:type role: str
:return: If a peg is present
:rtype: bool
"""
return role in self.roles_w_pegs
def add_reaction(self, reaction):
"""
Add a reaction that this enzyme is inolved in.
:param reaction: The reaction object that this is involved with
:type reaction: Reaction
"""
if not isinstance(reaction, str):
raise TypeError("reaction must be a string not a " + str(type(reaction)))
self.reactions.add(reaction)
def number_of_reactions(self):
"""
The number of reactions that this enzyme is involved in
:rtype: int
"""
return len(self.reactions)
def add_ec(self, ecnumber):
"""
Add an EC number to the Enzyme complex. We just store the 1.2.3.4 or 1.2.-.- part, not the EC part.
:param ecnumber: The EC number
:type ecnumber: str
"""
self.ec_number.add(ecnumber)
def probability(self):
"""
The probability that this reaction occurs in the cell.
Currently the number of pegs/number of roles. Thus if most of
the pegs are present then the enzyme is likely to function
:returns: the probability that this reaction is complete
:rtype: float
"""
# Initially we had this, but note that a peg can have two roles
# (joined together with " / " or " @ ", and so we can't just use
# this simple calculation. We need to know thenumber of pegroles
# / number of roles - roles with pegs!
return 1.0 * self.number_of_roles_with_pegs() / self.number_of_roles() | 0.824144 | 0.665546 |
from wiserHeatingAPI import wiserHub
import json
import sys
dev="false" # set to true to see raw data
# Get Wiser Parameters from keyfile
try:
with open('wiserkeys.params', 'r') as f:
data = f.read().split('\n')
except FileNotFoundError as e:
print("{}, {}/{}".format(e.strerror, 'wiserkeys.params', keyfile) )
else:
wiserkey=""
wiserip=""
for lines in data:
line=lines.split('=')
if line[0]=='wiserkey':
wiserkey=line[1]
if line[0]=='wiserhubip':
wiserip=line[1]
try:
#
try:
wh = wiserHub.wiserHub(wiserip,wiserkey)
except:
print("Unable to connect to Wiser Hub {}".format(sys.exc_info()[1]) )
print (' Wiser Hub IP= {}'.format(wiserip))
print (' WiserKey= {}'.format(wiserkey))
else:
if dev=="true":
# Heating State
print("--------------------------------")
print ("System Data {} ".format(wh.getSystem()))
print("--------------------------------")
print("--------------------------------")
print ("Hub Data {} ".format(wh.getHubData()))
print("--------------------------------")
print("--------------------------------")
print ("Raw Room Data {} ".format(wh.getRooms()))
print("--------------------------------")
print("--------------------------------")
print ("Device Data {} ".format(wh.getDevices()))
print ("--------------------------------")
system=wh.getSystem()
print ("System\n {}".format(system.get("LocalDateAndTime") ) )
print (" Heating: {}, HeatingButtonOverride: {}".format(wh.getHeatingRelayStatus(),system.get("HeatingButtonOverrideState") ) )
if wh.getHotwater():
print (" Hot Water: {}, HotWaterButtonOverride: {}\n".format(wh.getHotwaterRelayStatus(),system.get("HotWaterButtonOverrideState") ) )
print (" Pairing: {}, CloudConnection: {}, OpenThermConnection: {}\n".format(system.get("PairingStatus"),system.get("CloudConnectionStatus"),system.get("OpenThermConnectionStatus") ) )
print ("Controller")
dev=wh.getDevice(0)
print (" {}, F/W: {}, Locked: {}".format(dev.get("ModelIdentifier"),system.get("ActiveSystemVersion"),dev.get("DeviceLockEnabled") ) )
print (" WiFi Signal: {}, ReceiveCont: {}".format(dev.get("DisplayedSignalStrength"),dev.get("ReceptionOfController") ) )
zig=wh.getHubData().get("Zigbee")
print (" Zigbee: {}".format(zig ) )
print (" UpgradeInfo:")
for firm in wh.getHubData().get("UpgradeInfo"):
print (" {}".format(firm))
# List all Rooms
findValve=0
roomName=None
print()
for room in wh.getRooms():
smartValves=room.get("SmartValveIds")
roomStat=room.get("RoomStatId")
print ("{} - setpoint: {}C, current temp: {}C, Demand: {}%, OutputState: {}".format(room.get("Name"),room.get("CurrentSetPoint")/10,room.get("CalculatedTemperature")/10,room.get("PercentageDemand"),room.get("ControlOutputState") ) )
if roomStat:
# print ("\troomStatId: {}".format(roomStat))
dev=wh.getDevice(roomStat)
bat = dev.get("BatteryVoltage")
if bat != None:
bat = bat/10
else:
bat = "?.?"
batlevel=dev.get("BatteryLevel")
if batlevel == None:
batlevel = "Unknown"
print (" {} H/W: {}, SerialNo: {}, F/W: {}, Batt: {}V {}, Locked: {}".format(dev.get("ProductType"),dev.get("HardwareVersion"),dev.get("SerialNumber"),dev.get("ActiveFirmwareVersion"),bat,batlevel,dev.get("DeviceLockEnabled") ) )
print (" Signal: {}, ReceiveCont: {}, 'ReceiveDev: {}".format(dev.get("DisplayedSignalStrength"),dev.get("ReceptionOfController"),dev.get("ReceptionOfDevice") ) )
if smartValves:
# print (" SmartValveIds: {}".format(smartValves))
for smartvalve in smartValves:
dev=wh.getDevice(smartvalve)
bat = dev.get("BatteryVoltage")
if bat != None:
bat = bat/10
else:
bat = "?.?"
batlevel=dev.get("BatteryLevel")
if batlevel == None:
batlevel = "Unknown"
print (" {} H/W: {}, SerialNo: {}, F/W: {}, Batt: {}V {}, Locked: {}".format(dev.get("ProductType"),dev.get("HardwareVersion"),dev.get("SerialNumber"),dev.get("ActiveFirmwareVersion"),bat,batlevel,dev.get("DeviceLockEnabled") ) )
print (" Signal: {}, ReceiveCont: {}, 'ReceiveDev: {}".format(dev.get("DisplayedSignalStrength"),dev.get("ReceptionOfController"),dev.get("ReceptionOfDevice") ) )
except json.decoder.JSONDecodeError as ex:
print("JSON Exception") | systemstatus.py | from wiserHeatingAPI import wiserHub
import json
import sys
dev="false" # set to true to see raw data
# Get Wiser Parameters from keyfile
try:
with open('wiserkeys.params', 'r') as f:
data = f.read().split('\n')
except FileNotFoundError as e:
print("{}, {}/{}".format(e.strerror, 'wiserkeys.params', keyfile) )
else:
wiserkey=""
wiserip=""
for lines in data:
line=lines.split('=')
if line[0]=='wiserkey':
wiserkey=line[1]
if line[0]=='wiserhubip':
wiserip=line[1]
try:
#
try:
wh = wiserHub.wiserHub(wiserip,wiserkey)
except:
print("Unable to connect to Wiser Hub {}".format(sys.exc_info()[1]) )
print (' Wiser Hub IP= {}'.format(wiserip))
print (' WiserKey= {}'.format(wiserkey))
else:
if dev=="true":
# Heating State
print("--------------------------------")
print ("System Data {} ".format(wh.getSystem()))
print("--------------------------------")
print("--------------------------------")
print ("Hub Data {} ".format(wh.getHubData()))
print("--------------------------------")
print("--------------------------------")
print ("Raw Room Data {} ".format(wh.getRooms()))
print("--------------------------------")
print("--------------------------------")
print ("Device Data {} ".format(wh.getDevices()))
print ("--------------------------------")
system=wh.getSystem()
print ("System\n {}".format(system.get("LocalDateAndTime") ) )
print (" Heating: {}, HeatingButtonOverride: {}".format(wh.getHeatingRelayStatus(),system.get("HeatingButtonOverrideState") ) )
if wh.getHotwater():
print (" Hot Water: {}, HotWaterButtonOverride: {}\n".format(wh.getHotwaterRelayStatus(),system.get("HotWaterButtonOverrideState") ) )
print (" Pairing: {}, CloudConnection: {}, OpenThermConnection: {}\n".format(system.get("PairingStatus"),system.get("CloudConnectionStatus"),system.get("OpenThermConnectionStatus") ) )
print ("Controller")
dev=wh.getDevice(0)
print (" {}, F/W: {}, Locked: {}".format(dev.get("ModelIdentifier"),system.get("ActiveSystemVersion"),dev.get("DeviceLockEnabled") ) )
print (" WiFi Signal: {}, ReceiveCont: {}".format(dev.get("DisplayedSignalStrength"),dev.get("ReceptionOfController") ) )
zig=wh.getHubData().get("Zigbee")
print (" Zigbee: {}".format(zig ) )
print (" UpgradeInfo:")
for firm in wh.getHubData().get("UpgradeInfo"):
print (" {}".format(firm))
# List all Rooms
findValve=0
roomName=None
print()
for room in wh.getRooms():
smartValves=room.get("SmartValveIds")
roomStat=room.get("RoomStatId")
print ("{} - setpoint: {}C, current temp: {}C, Demand: {}%, OutputState: {}".format(room.get("Name"),room.get("CurrentSetPoint")/10,room.get("CalculatedTemperature")/10,room.get("PercentageDemand"),room.get("ControlOutputState") ) )
if roomStat:
# print ("\troomStatId: {}".format(roomStat))
dev=wh.getDevice(roomStat)
bat = dev.get("BatteryVoltage")
if bat != None:
bat = bat/10
else:
bat = "?.?"
batlevel=dev.get("BatteryLevel")
if batlevel == None:
batlevel = "Unknown"
print (" {} H/W: {}, SerialNo: {}, F/W: {}, Batt: {}V {}, Locked: {}".format(dev.get("ProductType"),dev.get("HardwareVersion"),dev.get("SerialNumber"),dev.get("ActiveFirmwareVersion"),bat,batlevel,dev.get("DeviceLockEnabled") ) )
print (" Signal: {}, ReceiveCont: {}, 'ReceiveDev: {}".format(dev.get("DisplayedSignalStrength"),dev.get("ReceptionOfController"),dev.get("ReceptionOfDevice") ) )
if smartValves:
# print (" SmartValveIds: {}".format(smartValves))
for smartvalve in smartValves:
dev=wh.getDevice(smartvalve)
bat = dev.get("BatteryVoltage")
if bat != None:
bat = bat/10
else:
bat = "?.?"
batlevel=dev.get("BatteryLevel")
if batlevel == None:
batlevel = "Unknown"
print (" {} H/W: {}, SerialNo: {}, F/W: {}, Batt: {}V {}, Locked: {}".format(dev.get("ProductType"),dev.get("HardwareVersion"),dev.get("SerialNumber"),dev.get("ActiveFirmwareVersion"),bat,batlevel,dev.get("DeviceLockEnabled") ) )
print (" Signal: {}, ReceiveCont: {}, 'ReceiveDev: {}".format(dev.get("DisplayedSignalStrength"),dev.get("ReceptionOfController"),dev.get("ReceptionOfDevice") ) )
except json.decoder.JSONDecodeError as ex:
print("JSON Exception") | 0.145996 | 0.072472 |
import numpy as np
import PointwiseFunctions.AnalyticSolutions.Hydro.SmoothFlow as hydro
import Evolution.Systems.NewtonianEuler.TimeDerivative as flux
def soln_error(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
return None
_soln_pressure = 1.0
_soln_adiabatic_index = 5.0 / 3.0
_soln_perturbation_size = 0.2
def _soln_mean_velocity(dim):
mean_v = []
for i in range(0, dim):
mean_v.append(0.9 - i * 0.5)
return np.asarray(mean_v)
def _soln_wave_vector(dim):
wave_vector = []
for i in range(0, dim):
wave_vector.append(0.1 + i)
return np.asarray(wave_vector)
def soln_mass_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
return hydro.rest_mass_density(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
def soln_momentum_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
return hydro.rest_mass_density(
coords, time, _soln_mean_velocity(dim), _soln_wave_vector(dim),
_soln_pressure, _soln_adiabatic_index,
_soln_perturbation_size) * hydro.spatial_velocity(
coords, time, _soln_mean_velocity(dim), _soln_wave_vector(dim),
_soln_pressure, _soln_adiabatic_index, _soln_perturbation_size)
def soln_energy_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
velocity = hydro.spatial_velocity(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
int_energy = hydro.specific_internal_energy(coords, time,
_soln_mean_velocity(dim),
_soln_wave_vector(dim),
_soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
return hydro.rest_mass_density(
coords, time, _soln_mean_velocity(dim), _soln_wave_vector(dim),
_soln_pressure, _soln_adiabatic_index, _soln_perturbation_size) * (
0.5 * np.dot(velocity, velocity) + int_energy)
def soln_flux_mass_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
return soln_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords,
time, dim)
def soln_flux_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
velocity = hydro.spatial_velocity(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
pressure = hydro.pressure(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index, _soln_perturbation_size)
return flux.momentum_density_flux_impl(
soln_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim),
soln_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim), velocity, pressure)
def soln_flux_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
velocity = hydro.spatial_velocity(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
pressure = hydro.pressure(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index, _soln_perturbation_size)
return flux.energy_density_flux_impl(
soln_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim),
soln_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim), velocity, pressure)
def soln_velocity(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
return hydro.spatial_velocity(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
def soln_specific_internal_energy(face_mesh_velocity,
outward_directed_normal_covector, coords,
time, dim):
return hydro.specific_internal_energy(coords, time,
_soln_mean_velocity(dim),
_soln_wave_vector(dim),
_soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
def data_error(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
return None
_data_adiabatic_index = 1.4
_data_strip_bimedian_height = 0.5
_data_strip_thickness = 0.5
_data_strip_density = 2.0
_data_strip_velocity = 0.5
_data_background_density = 1.0
_data_background_velocity = -0.5
_data_pressure = 2.5
_data_perturb_amplitude = 0.1
_data_perturb_width = 0.03
def data_mass_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
if np.abs(coords[-1] -
_data_strip_bimedian_height) < 0.5 * _data_strip_thickness:
return _data_strip_density
else:
return _data_background_density
def data_velocity(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
velocity = np.zeros([dim])
if np.abs(coords[-1] -
_data_strip_bimedian_height) < 0.5 * _data_strip_thickness:
velocity[0] = _data_strip_velocity
else:
velocity[0] = _data_background_velocity
one_over_two_sigma_squared = 0.5 / (_data_perturb_width)**2
strip_lower_bound = (_data_strip_bimedian_height -
0.5 * _data_strip_thickness)
strip_upper_bound = (_data_strip_bimedian_height +
0.5 * _data_strip_thickness)
velocity[-1] = (np.exp(-one_over_two_sigma_squared *
(coords[-1] - strip_lower_bound)**2) +
np.exp(-one_over_two_sigma_squared *
(coords[-1] - strip_upper_bound)**2))
velocity[-1] *= _data_perturb_amplitude * np.sin(4.0 * np.pi * coords[0])
return np.asarray(velocity)
def data_momentum_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
return data_mass_density(
face_mesh_velocity,
outward_directed_normal_covector, coords, time, dim) * data_velocity(
face_mesh_velocity, outward_directed_normal_covector, coords, time,
dim)
def data_pressure(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
return _data_pressure
def data_specific_internal_energy(face_mesh_velocity,
outward_directed_normal_covector, coords,
time, dim):
return 1.0 / (_data_adiabatic_index - 1.0) * data_pressure(
face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim) / data_mass_density(
face_mesh_velocity, outward_directed_normal_covector, coords, time,
dim)
def data_energy_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
velocity = data_velocity(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
int_energy = data_specific_internal_energy(
face_mesh_velocity, outward_directed_normal_covector, coords, time,
dim)
return data_mass_density(
face_mesh_velocity, outward_directed_normal_covector, coords, time,
dim) * (0.5 * np.dot(velocity, velocity) + int_energy)
def data_flux_mass_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
return data_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords,
time, dim)
def data_flux_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
velocity = data_velocity(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
pressure = data_pressure(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
return flux.momentum_density_flux_impl(
data_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim),
data_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim), velocity, pressure)
def data_flux_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
velocity = data_velocity(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
pressure = data_pressure(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
return flux.energy_density_flux_impl(
data_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim),
data_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim), velocity, pressure) | tests/Unit/Evolution/Systems/NewtonianEuler/BoundaryConditions/DirichletAnalytic.py |
import numpy as np
import PointwiseFunctions.AnalyticSolutions.Hydro.SmoothFlow as hydro
import Evolution.Systems.NewtonianEuler.TimeDerivative as flux
def soln_error(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
return None
_soln_pressure = 1.0
_soln_adiabatic_index = 5.0 / 3.0
_soln_perturbation_size = 0.2
def _soln_mean_velocity(dim):
mean_v = []
for i in range(0, dim):
mean_v.append(0.9 - i * 0.5)
return np.asarray(mean_v)
def _soln_wave_vector(dim):
wave_vector = []
for i in range(0, dim):
wave_vector.append(0.1 + i)
return np.asarray(wave_vector)
def soln_mass_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
return hydro.rest_mass_density(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
def soln_momentum_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
return hydro.rest_mass_density(
coords, time, _soln_mean_velocity(dim), _soln_wave_vector(dim),
_soln_pressure, _soln_adiabatic_index,
_soln_perturbation_size) * hydro.spatial_velocity(
coords, time, _soln_mean_velocity(dim), _soln_wave_vector(dim),
_soln_pressure, _soln_adiabatic_index, _soln_perturbation_size)
def soln_energy_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
velocity = hydro.spatial_velocity(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
int_energy = hydro.specific_internal_energy(coords, time,
_soln_mean_velocity(dim),
_soln_wave_vector(dim),
_soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
return hydro.rest_mass_density(
coords, time, _soln_mean_velocity(dim), _soln_wave_vector(dim),
_soln_pressure, _soln_adiabatic_index, _soln_perturbation_size) * (
0.5 * np.dot(velocity, velocity) + int_energy)
def soln_flux_mass_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
return soln_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords,
time, dim)
def soln_flux_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
velocity = hydro.spatial_velocity(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
pressure = hydro.pressure(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index, _soln_perturbation_size)
return flux.momentum_density_flux_impl(
soln_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim),
soln_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim), velocity, pressure)
def soln_flux_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
velocity = hydro.spatial_velocity(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
pressure = hydro.pressure(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index, _soln_perturbation_size)
return flux.energy_density_flux_impl(
soln_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim),
soln_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim), velocity, pressure)
def soln_velocity(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
return hydro.spatial_velocity(coords, time, _soln_mean_velocity(dim),
_soln_wave_vector(dim), _soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
def soln_specific_internal_energy(face_mesh_velocity,
outward_directed_normal_covector, coords,
time, dim):
return hydro.specific_internal_energy(coords, time,
_soln_mean_velocity(dim),
_soln_wave_vector(dim),
_soln_pressure,
_soln_adiabatic_index,
_soln_perturbation_size)
def data_error(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
return None
_data_adiabatic_index = 1.4
_data_strip_bimedian_height = 0.5
_data_strip_thickness = 0.5
_data_strip_density = 2.0
_data_strip_velocity = 0.5
_data_background_density = 1.0
_data_background_velocity = -0.5
_data_pressure = 2.5
_data_perturb_amplitude = 0.1
_data_perturb_width = 0.03
def data_mass_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
if np.abs(coords[-1] -
_data_strip_bimedian_height) < 0.5 * _data_strip_thickness:
return _data_strip_density
else:
return _data_background_density
def data_velocity(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
velocity = np.zeros([dim])
if np.abs(coords[-1] -
_data_strip_bimedian_height) < 0.5 * _data_strip_thickness:
velocity[0] = _data_strip_velocity
else:
velocity[0] = _data_background_velocity
one_over_two_sigma_squared = 0.5 / (_data_perturb_width)**2
strip_lower_bound = (_data_strip_bimedian_height -
0.5 * _data_strip_thickness)
strip_upper_bound = (_data_strip_bimedian_height +
0.5 * _data_strip_thickness)
velocity[-1] = (np.exp(-one_over_two_sigma_squared *
(coords[-1] - strip_lower_bound)**2) +
np.exp(-one_over_two_sigma_squared *
(coords[-1] - strip_upper_bound)**2))
velocity[-1] *= _data_perturb_amplitude * np.sin(4.0 * np.pi * coords[0])
return np.asarray(velocity)
def data_momentum_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
return data_mass_density(
face_mesh_velocity,
outward_directed_normal_covector, coords, time, dim) * data_velocity(
face_mesh_velocity, outward_directed_normal_covector, coords, time,
dim)
def data_pressure(face_mesh_velocity, outward_directed_normal_covector, coords,
time, dim):
return _data_pressure
def data_specific_internal_energy(face_mesh_velocity,
outward_directed_normal_covector, coords,
time, dim):
return 1.0 / (_data_adiabatic_index - 1.0) * data_pressure(
face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim) / data_mass_density(
face_mesh_velocity, outward_directed_normal_covector, coords, time,
dim)
def data_energy_density(face_mesh_velocity, outward_directed_normal_covector,
coords, time, dim):
velocity = data_velocity(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
int_energy = data_specific_internal_energy(
face_mesh_velocity, outward_directed_normal_covector, coords, time,
dim)
return data_mass_density(
face_mesh_velocity, outward_directed_normal_covector, coords, time,
dim) * (0.5 * np.dot(velocity, velocity) + int_energy)
def data_flux_mass_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
return data_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords,
time, dim)
def data_flux_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
velocity = data_velocity(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
pressure = data_pressure(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
return flux.momentum_density_flux_impl(
data_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim),
data_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim), velocity, pressure)
def data_flux_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim):
velocity = data_velocity(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
pressure = data_pressure(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim)
return flux.energy_density_flux_impl(
data_momentum_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim),
data_energy_density(face_mesh_velocity,
outward_directed_normal_covector, coords, time,
dim), velocity, pressure) | 0.734596 | 0.369116 |
import torch
from torch import nn
import torch.nn.functional as F
import os
import numpy as np
from utility import *
import pandas as pd
from kornia.filters import filter2D
import imageio
import math
from collections import OrderedDict
import random
def save_models(gs, ds, location):
folder = create_folder("SavedModels", location)
path_to_save = os.path.join("SavedModels", folder)
print("Saving model to %s" % (path_to_save))
optimal_noises = {}
gen_states = {}
for i in range(len(gs)):
gen_states[str(i)] = gs[i].state_dict()
torch.save(gen_states, os.path.join(path_to_save, "SinGAN.generators"))
discrim_states = {}
for i in range(len(ds)):
discrim_states[str(i)] = ds[i].state_dict()
torch.save(discrim_states, os.path.join(path_to_save, "SinGAN.discriminators"))
def load_models(gs, ds, folder, device):
gen_params = torch.load(os.path.join(folder, "SinGAN.generators"),
map_location=device)
discrim_params = torch.load(os.path.join(folder, "SinGAN.discriminators"),
map_location=device)
for i in range(len(gs)):
gen_params_compat = OrderedDict()
gs[i].load_state_dict(gen_params[str(i)])
gs[i].to(device)
discrim_params_compat = OrderedDict()
ds[i].load_state_dict(discrim_params[str(i)])
return gs, ds
def laplace_pyramid_downscale2D(frame, level, downscale_per_level, device):
kernel_size = 5
sigma = 2 * (1 / downscale_per_level) / 6
x_cord = torch.arange(kernel_size)
x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = torch.transpose(x_grid, 0, 1)
xy_grid = torch.stack([x_grid, y_grid], dim=-1)
mean = (kernel_size - 1)/2.
variance = sigma**2.
gaussian_kernel = (1./(2.*math.pi*variance)) *\
torch.exp(
-torch.sum((xy_grid - mean)**2., dim=-1) /\
(2*variance)
)
# Make sure sum of values in gaussian kernel equals 1.
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
# Reshape to 2d depthwise convolutional weight
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size).to(device)
gaussian_kernel = gaussian_kernel.repeat(frame.shape[1], 1, 1, 1)
input_size = np.array(list(frame.shape[2:]))
with torch.no_grad():
for i in range(level):
s = (input_size * (downscale_per_level**(i+1))).astype(int)
frame = F.conv2d(frame, gaussian_kernel, groups=frame.shape[1])
frame = F.interpolate(frame, size = list(s), mode='nearest')
del gaussian_kernel
return frame
def calc_gradient_penalty(discrim, real_data, fake_data, device):
#print real_data.size()
alpha = torch.rand(1, 1, device=device)
alpha = alpha.expand(real_data.size())
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
#interpolates = interpolates.to(device)
interpolates = torch.autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discrim(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def feature_distance(img1, img2, device):
if(features_model is None):
model = models.vgg19(pretrained=True).to(device=device)
model.eval()
layer = model.features
if(img1.shape[1] == 1):
img1 = torch.repeat(img1, 3, axis=1)
if(img2.shape[1] == 1):
img2 = torch.repeat(img2, 3, axis=1)
img1_feature_vector = layer(img1_tensor)
img2_feature_vector = layer(img2_tensor)
return ((img1_feature_vector - img2_feature_vector) ** 2).mean()
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
elif type(m) == nn.Linear:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
elif classname.find('Norm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def generate(generators, batch_size, device):
with torch.no_grad():
generated_image = torch.zeros(batch_size, 1,
generators[0].resolution[0], generators[0].resolution[1]).to(device)
for i in range(0, len(generators)):
generated_image = F.interpolate(generated_image,
size=generators[i].resolution, mode="bilinear",
align_corners=False)
generated_image = generators[i](generated_image)
return generated_image
def init_scales(dataset, device, min_dim_size = 25, downscale_ratio = 0.75):
n = round(math.log(min_dim_size / \
dataset.resolution[0]) / math.log(downscale_ratio))+1
gs = []
ds = []
print("The model will have %i scales" % (n))
for i in range(n):
scaling = []
factor = downscale_ratio**(n - i - 1)
for j in range(len(dataset.resolution)):
x = int(dataset.resolution[j] * factor)
scaling.append(x)
print("Scale %i: %s" % (i, str(scaling)))
num_kernels = int((2 ** (5 + (i / 4))) / 3)
g = SinGAN_Generator(num_kernels, scaling, device).to(device)
g.apply(weights_init)
d = SinGAN_Discriminator(num_kernels, device).to(device)
d.apply(weights_init)
gs.append(g)
ds.append(d)
return n, gs, ds
class SinGAN_Generator(nn.Module):
def __init__ (self, num_kernels, resolution, device):
super(SinGAN_Generator, self).__init__()
self.resolution = resolution
self.num_kernels = num_kernels
self.device = device
modules = []
for i in range(5):
# The head goes from 1 channels to num_kernels
if i == 0:
modules.append(nn.Sequential(
nn.Conv2d(1, num_kernels, kernel_size=3,
stride=1, padding=0),
nn.BatchNorm2d(num_kernels),
nn.LeakyReLU(0.2, inplace=True)
))
# The tail will go from kernel_size to num_channels before tanh [-1,1]
elif i == 4:
tail = nn.Sequential(
nn.Conv2d(num_kernels, 1, kernel_size=3,
stride=1, padding=0),
nn.Tanh()
)
modules.append(tail)
# Other layers will have 32 channels for the 32 kernels
else:
modules.append(nn.Sequential(
nn.Conv2d(num_kernels, num_kernels, kernel_size=3,
stride=1, padding=0),
nn.BatchNorm2d(num_kernels),
nn.LeakyReLU(0.2, inplace=True)
))
self.model = nn.Sequential(*modules)
def forward(self, data, noise=None):
data_padded = F.pad(data, [5, 5, 5, 5])
noise = torch.randn(data_padded.shape, device=self.device)
noisePlusData = data_padded + noise
output = self.model(noisePlusData)
return output + data
class SinGAN_Discriminator(nn.Module):
def __init__ (self, num_kernels, device):
super(SinGAN_Discriminator, self).__init__()
self.device=device
self.num_kernels = num_kernels
modules = []
for i in range(5):
# The head goes from 3 channels (RGB) to num_kernels
if i == 0:
modules.append(nn.Sequential(
nn.Conv2d(1, num_kernels,
kernel_size=3, stride=1),
nn.BatchNorm2d(num_kernels),
nn.LeakyReLU(0.2, inplace=True)
))
# The tail will go from num_kernels to 1 channel for discriminator optimization
elif i == 4:
tail = nn.Sequential(
nn.Conv2d(num_kernels, 1,
kernel_size=3, stride=1)
)
modules.append(tail)
# Other layers will have 32 channels for the 32 kernels
else:
modules.append(nn.Sequential(
nn.Conv2d(num_kernels, num_kernels,
kernel_size=3, stride=1),
nn.BatchNorm2d(num_kernels),
nn.LeakyReLU(0.2, inplace=True)
))
self.model = nn.Sequential(*modules)
self.model = self.model.to(device)
def forward(self, x):
return self.model(x)
class Dataset(torch.utils.data.Dataset):
def __init__(self, dataset_location):
self.dataset_location = dataset_location
self.items = os.listdir(dataset_location)
self.resolution = [128, 128]
def __len__(self):
return len(self.items)
def __getitem__(self, index):
data = imageio.imread(os.path.join(self.dataset_location,
self.items[index])).astype(np.float32)
data = np2torch(data, "cpu")
data *= (2.0/255.0)
data -= 1
x = random.randint(0, data.shape[0]-128)
y = random.randint(0, data.shape[1]-128)
return data.unsqueeze(0)[:,x:x+128, y:y+128] | TestModels/SinGAN_model.py | import torch
from torch import nn
import torch.nn.functional as F
import os
import numpy as np
from utility import *
import pandas as pd
from kornia.filters import filter2D
import imageio
import math
from collections import OrderedDict
import random
def save_models(gs, ds, location):
folder = create_folder("SavedModels", location)
path_to_save = os.path.join("SavedModels", folder)
print("Saving model to %s" % (path_to_save))
optimal_noises = {}
gen_states = {}
for i in range(len(gs)):
gen_states[str(i)] = gs[i].state_dict()
torch.save(gen_states, os.path.join(path_to_save, "SinGAN.generators"))
discrim_states = {}
for i in range(len(ds)):
discrim_states[str(i)] = ds[i].state_dict()
torch.save(discrim_states, os.path.join(path_to_save, "SinGAN.discriminators"))
def load_models(gs, ds, folder, device):
gen_params = torch.load(os.path.join(folder, "SinGAN.generators"),
map_location=device)
discrim_params = torch.load(os.path.join(folder, "SinGAN.discriminators"),
map_location=device)
for i in range(len(gs)):
gen_params_compat = OrderedDict()
gs[i].load_state_dict(gen_params[str(i)])
gs[i].to(device)
discrim_params_compat = OrderedDict()
ds[i].load_state_dict(discrim_params[str(i)])
return gs, ds
def laplace_pyramid_downscale2D(frame, level, downscale_per_level, device):
kernel_size = 5
sigma = 2 * (1 / downscale_per_level) / 6
x_cord = torch.arange(kernel_size)
x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = torch.transpose(x_grid, 0, 1)
xy_grid = torch.stack([x_grid, y_grid], dim=-1)
mean = (kernel_size - 1)/2.
variance = sigma**2.
gaussian_kernel = (1./(2.*math.pi*variance)) *\
torch.exp(
-torch.sum((xy_grid - mean)**2., dim=-1) /\
(2*variance)
)
# Make sure sum of values in gaussian kernel equals 1.
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
# Reshape to 2d depthwise convolutional weight
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size).to(device)
gaussian_kernel = gaussian_kernel.repeat(frame.shape[1], 1, 1, 1)
input_size = np.array(list(frame.shape[2:]))
with torch.no_grad():
for i in range(level):
s = (input_size * (downscale_per_level**(i+1))).astype(int)
frame = F.conv2d(frame, gaussian_kernel, groups=frame.shape[1])
frame = F.interpolate(frame, size = list(s), mode='nearest')
del gaussian_kernel
return frame
def calc_gradient_penalty(discrim, real_data, fake_data, device):
#print real_data.size()
alpha = torch.rand(1, 1, device=device)
alpha = alpha.expand(real_data.size())
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
#interpolates = interpolates.to(device)
interpolates = torch.autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discrim(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def feature_distance(img1, img2, device):
if(features_model is None):
model = models.vgg19(pretrained=True).to(device=device)
model.eval()
layer = model.features
if(img1.shape[1] == 1):
img1 = torch.repeat(img1, 3, axis=1)
if(img2.shape[1] == 1):
img2 = torch.repeat(img2, 3, axis=1)
img1_feature_vector = layer(img1_tensor)
img2_feature_vector = layer(img2_tensor)
return ((img1_feature_vector - img2_feature_vector) ** 2).mean()
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
elif type(m) == nn.Linear:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
elif classname.find('Norm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def generate(generators, batch_size, device):
with torch.no_grad():
generated_image = torch.zeros(batch_size, 1,
generators[0].resolution[0], generators[0].resolution[1]).to(device)
for i in range(0, len(generators)):
generated_image = F.interpolate(generated_image,
size=generators[i].resolution, mode="bilinear",
align_corners=False)
generated_image = generators[i](generated_image)
return generated_image
def init_scales(dataset, device, min_dim_size = 25, downscale_ratio = 0.75):
n = round(math.log(min_dim_size / \
dataset.resolution[0]) / math.log(downscale_ratio))+1
gs = []
ds = []
print("The model will have %i scales" % (n))
for i in range(n):
scaling = []
factor = downscale_ratio**(n - i - 1)
for j in range(len(dataset.resolution)):
x = int(dataset.resolution[j] * factor)
scaling.append(x)
print("Scale %i: %s" % (i, str(scaling)))
num_kernels = int((2 ** (5 + (i / 4))) / 3)
g = SinGAN_Generator(num_kernels, scaling, device).to(device)
g.apply(weights_init)
d = SinGAN_Discriminator(num_kernels, device).to(device)
d.apply(weights_init)
gs.append(g)
ds.append(d)
return n, gs, ds
class SinGAN_Generator(nn.Module):
def __init__ (self, num_kernels, resolution, device):
super(SinGAN_Generator, self).__init__()
self.resolution = resolution
self.num_kernels = num_kernels
self.device = device
modules = []
for i in range(5):
# The head goes from 1 channels to num_kernels
if i == 0:
modules.append(nn.Sequential(
nn.Conv2d(1, num_kernels, kernel_size=3,
stride=1, padding=0),
nn.BatchNorm2d(num_kernels),
nn.LeakyReLU(0.2, inplace=True)
))
# The tail will go from kernel_size to num_channels before tanh [-1,1]
elif i == 4:
tail = nn.Sequential(
nn.Conv2d(num_kernels, 1, kernel_size=3,
stride=1, padding=0),
nn.Tanh()
)
modules.append(tail)
# Other layers will have 32 channels for the 32 kernels
else:
modules.append(nn.Sequential(
nn.Conv2d(num_kernels, num_kernels, kernel_size=3,
stride=1, padding=0),
nn.BatchNorm2d(num_kernels),
nn.LeakyReLU(0.2, inplace=True)
))
self.model = nn.Sequential(*modules)
def forward(self, data, noise=None):
data_padded = F.pad(data, [5, 5, 5, 5])
noise = torch.randn(data_padded.shape, device=self.device)
noisePlusData = data_padded + noise
output = self.model(noisePlusData)
return output + data
class SinGAN_Discriminator(nn.Module):
def __init__ (self, num_kernels, device):
super(SinGAN_Discriminator, self).__init__()
self.device=device
self.num_kernels = num_kernels
modules = []
for i in range(5):
# The head goes from 3 channels (RGB) to num_kernels
if i == 0:
modules.append(nn.Sequential(
nn.Conv2d(1, num_kernels,
kernel_size=3, stride=1),
nn.BatchNorm2d(num_kernels),
nn.LeakyReLU(0.2, inplace=True)
))
# The tail will go from num_kernels to 1 channel for discriminator optimization
elif i == 4:
tail = nn.Sequential(
nn.Conv2d(num_kernels, 1,
kernel_size=3, stride=1)
)
modules.append(tail)
# Other layers will have 32 channels for the 32 kernels
else:
modules.append(nn.Sequential(
nn.Conv2d(num_kernels, num_kernels,
kernel_size=3, stride=1),
nn.BatchNorm2d(num_kernels),
nn.LeakyReLU(0.2, inplace=True)
))
self.model = nn.Sequential(*modules)
self.model = self.model.to(device)
def forward(self, x):
return self.model(x)
class Dataset(torch.utils.data.Dataset):
def __init__(self, dataset_location):
self.dataset_location = dataset_location
self.items = os.listdir(dataset_location)
self.resolution = [128, 128]
def __len__(self):
return len(self.items)
def __getitem__(self, index):
data = imageio.imread(os.path.join(self.dataset_location,
self.items[index])).astype(np.float32)
data = np2torch(data, "cpu")
data *= (2.0/255.0)
data -= 1
x = random.randint(0, data.shape[0]-128)
y = random.randint(0, data.shape[1]-128)
return data.unsqueeze(0)[:,x:x+128, y:y+128] | 0.526586 | 0.371507 |
import gitFunctions
import actions
from termcolor import colored
path = "../data/paths.txt"
def print_all_paths():
try:
with open(path) as file:
for line in file:
print(line, end="")
except Exception as e:
print(e)
def close_app():
pass
def print_help():
for key, val in actions.actions.items():
print(str(key) + ": " + val["description"])
def add_path():
with open(path, "a") as file:
val = input("enter the path: ")
file.write(val + "\n")
def delete_path():
print("Select number of line you want to delete")
print("To delete all enter '000'")
dir_list = []
with open(path, "r") as file:
counter = 1
for line in file:
dir_list.append(line)
print(str(counter) + ". " + line, end="")
counter += 1
decision = input("Enter your choice: ")
if decision == "000":
print(colored("!!! YOU ARE ABOUT TO DELETE ALL PATHS !!!", "red"))
else:
try:
decision = int(decision)
if int(decision) <= len(dir_list):
print(
colored(
"You are about to delete: " + dir_list[int(decision) - 1], "red"
),
end="",
)
else:
print("Wrong input")
pass
except ValueError:
print("Wrong input")
agreement = input("(Y/n) ")
if agreement in ["Y", "y", "yes", "Yes", "YES"]:
if decision == "000":
file = open(path, "w")
file.close()
else:
dir_list.pop(int(decision - 1))
file = open(path, "w")
for line in dir_list:
file.write(line)
file.close()
def projects_pull():
counter = 0
counter_errors = 0
with open(path, "r") as file:
for line in file:
success, error = gitFunctions.pull_all(line[:-1])
counter += 1
if success:
print(colored("Pull - {}".format(line), "green"), end="")
else:
print(colored("ERROR - {}".format(line), "red"), end="")
counter_errors += 1
print("You are up to date!")
print("Correct operations: {}/{}".format(counter - counter_errors, counter))
def projects_status():
counter = 0
counter_errors = 0
with open(path, "r") as file:
for line in file:
success, error = gitFunctions.status_all(line[:-1])
counter += 1
if success:
pass
else:
print(colored("ERROR - {}".format(line), "red"), end="")
counter_errors += 1
print("Status checked!")
print("Correct operations: {}/{}".format(counter - counter_errors, counter))
def projects_push():
counter = 0
counter_errors = 0
with open(path, "r") as file:
for line in file:
success, error = gitFunctions.push_all(line[:-1])
counter += 1
if success:
print(colored("Push - {}".format(line), "green"), end="")
else:
print(colored("ERROR - {}".format(line), "red"), end="")
counter_errors += 1
print("All pushed!")
print("Correct operations: {}/{}".format(counter - counter_errors, counter))
def projects_commit():
counter = 0
counter_errors = 0
with open(path, "r") as file:
for line in file:
success, error = gitFunctions.commit_all_git(line[:-1])
counter += 1
if success:
print(colored("Commit - {}".format(line)), "green", end="")
else:
# print(colored("ERROR - {}".format(line), "red"), end="")
counter_errors += 1
print("Commits done!")
print("Correct operations: {}/{}".format(counter - counter_errors, counter)) | pythonGitManager/comands.py | import gitFunctions
import actions
from termcolor import colored
path = "../data/paths.txt"
def print_all_paths():
try:
with open(path) as file:
for line in file:
print(line, end="")
except Exception as e:
print(e)
def close_app():
pass
def print_help():
for key, val in actions.actions.items():
print(str(key) + ": " + val["description"])
def add_path():
with open(path, "a") as file:
val = input("enter the path: ")
file.write(val + "\n")
def delete_path():
print("Select number of line you want to delete")
print("To delete all enter '000'")
dir_list = []
with open(path, "r") as file:
counter = 1
for line in file:
dir_list.append(line)
print(str(counter) + ". " + line, end="")
counter += 1
decision = input("Enter your choice: ")
if decision == "000":
print(colored("!!! YOU ARE ABOUT TO DELETE ALL PATHS !!!", "red"))
else:
try:
decision = int(decision)
if int(decision) <= len(dir_list):
print(
colored(
"You are about to delete: " + dir_list[int(decision) - 1], "red"
),
end="",
)
else:
print("Wrong input")
pass
except ValueError:
print("Wrong input")
agreement = input("(Y/n) ")
if agreement in ["Y", "y", "yes", "Yes", "YES"]:
if decision == "000":
file = open(path, "w")
file.close()
else:
dir_list.pop(int(decision - 1))
file = open(path, "w")
for line in dir_list:
file.write(line)
file.close()
def projects_pull():
counter = 0
counter_errors = 0
with open(path, "r") as file:
for line in file:
success, error = gitFunctions.pull_all(line[:-1])
counter += 1
if success:
print(colored("Pull - {}".format(line), "green"), end="")
else:
print(colored("ERROR - {}".format(line), "red"), end="")
counter_errors += 1
print("You are up to date!")
print("Correct operations: {}/{}".format(counter - counter_errors, counter))
def projects_status():
counter = 0
counter_errors = 0
with open(path, "r") as file:
for line in file:
success, error = gitFunctions.status_all(line[:-1])
counter += 1
if success:
pass
else:
print(colored("ERROR - {}".format(line), "red"), end="")
counter_errors += 1
print("Status checked!")
print("Correct operations: {}/{}".format(counter - counter_errors, counter))
def projects_push():
counter = 0
counter_errors = 0
with open(path, "r") as file:
for line in file:
success, error = gitFunctions.push_all(line[:-1])
counter += 1
if success:
print(colored("Push - {}".format(line), "green"), end="")
else:
print(colored("ERROR - {}".format(line), "red"), end="")
counter_errors += 1
print("All pushed!")
print("Correct operations: {}/{}".format(counter - counter_errors, counter))
def projects_commit():
counter = 0
counter_errors = 0
with open(path, "r") as file:
for line in file:
success, error = gitFunctions.commit_all_git(line[:-1])
counter += 1
if success:
print(colored("Commit - {}".format(line)), "green", end="")
else:
# print(colored("ERROR - {}".format(line), "red"), end="")
counter_errors += 1
print("Commits done!")
print("Correct operations: {}/{}".format(counter - counter_errors, counter)) | 0.081182 | 0.126246 |
import os,unittest
import pandas as pd
from igf_data.illumina.samplesheet import SampleSheet
from igf_data.utils.fileutils import get_temp_dir,remove_dir
from igf_data.utils.samplesheet_utils import get_formatted_samplesheet_per_lane
from igf_data.utils.samplesheet_utils import samplesheet_validation_and_metadata_checking
class SamplesheetUtils_testA(unittest.TestCase):
def setUp(self):
self.temp_dir = get_temp_dir()
self.platform_name = 'HISEQ4000'
self.samplesheet_file = 'data/singlecell_data/SampleSheet_dual.csv'
self.sc_index_json = 'data/singlecell_data/chromium-shared-sample-indexes-plate_20180301.json'
self.sc_dual_index_json = 'data/singlecell_data/chromium_dual_indexes_plate_TT_NT_20210209.json'
def tearDown(self):
remove_dir(self.temp_dir)
def test_get_formatted_samplesheet_per_lane1(self):
output_list = \
get_formatted_samplesheet_per_lane(
samplesheet_file=self.samplesheet_file,
singlecell_barcode_json=self.sc_index_json,
singlecell_dual_barcode_json=self.sc_dual_index_json,
runinfo_file='data/singlecell_data/RunInfo_dual.xml',
output_dir=self.temp_dir,
platform=self.platform_name,
filter_lane=None,
single_cell_tag='10X',
index1_rule=None,
index2_rule=None)
df = pd.DataFrame(output_list)
sa = SampleSheet(df[df['lane_id']=='5']['samplesheet_file'].values[0])
sdf = pd.DataFrame(sa._data)
#print(sdf.to_dict(orient='records'))
self.assertEqual(df[df['lane_id']=='5']['bases_mask'].values[0],'y150n1,i10,i10,y150n1')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0009']['index'].values[0],'GTGGCCTCAT')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0009']['index2'].values[0],'TCACTTTCGA')
sa = SampleSheet(df[df['lane_id']=='3']['samplesheet_file'].values[0])
self.assertEqual(df[df['lane_id']=='3']['bases_mask'].values[0],'y150n1,i8n2,i8n2,y150n1')
sdf = pd.DataFrame(sa._data)
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0001']['index'].values[0],'ATTACTCG')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0001']['index2'].values[0],'AGGCTATA')
def test_get_formatted_samplesheet_per_lane2(self):
output_list = \
get_formatted_samplesheet_per_lane(
samplesheet_file=self.samplesheet_file,
singlecell_barcode_json=self.sc_index_json,
singlecell_dual_barcode_json=self.sc_dual_index_json,
runinfo_file='data/singlecell_data/RunInfo_dual.xml',
output_dir=self.temp_dir,
platform=self.platform_name,
filter_lane=None,
single_cell_tag='10X',
index1_rule=None,
index2_rule='REVCOMP')
df = pd.DataFrame(output_list)
#print(df.to_dict(orient='records'))
sa = SampleSheet(df[df['lane_id']=='5']['samplesheet_file'].values[0])
sdf = pd.DataFrame(sa._data)
#print(sdf.to_dict(orient='records'))
self.assertEqual(df[df['lane_id']=='5']['bases_mask'].values[0],'y150n1,i10,i10,y150n1')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0009']['index'].values[0],'GTGGCCTCAT')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0009']['index2'].values[0],'TCACTTTCGA')
sa = SampleSheet(df[df['lane_id']=='3']['samplesheet_file'].values[0])
self.assertEqual(df[df['lane_id']=='3']['bases_mask'].values[0],'y150n1,i8n2,i8n2,y150n1')
sdf = pd.DataFrame(sa._data)
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0001']['index'].values[0],'ATTACTCG')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0001']['index2'].values[0],'TATAGCCT')
if __name__=='__main__':
unittest.main() | test/utils/samplesheet_utils_test.py | import os,unittest
import pandas as pd
from igf_data.illumina.samplesheet import SampleSheet
from igf_data.utils.fileutils import get_temp_dir,remove_dir
from igf_data.utils.samplesheet_utils import get_formatted_samplesheet_per_lane
from igf_data.utils.samplesheet_utils import samplesheet_validation_and_metadata_checking
class SamplesheetUtils_testA(unittest.TestCase):
def setUp(self):
self.temp_dir = get_temp_dir()
self.platform_name = 'HISEQ4000'
self.samplesheet_file = 'data/singlecell_data/SampleSheet_dual.csv'
self.sc_index_json = 'data/singlecell_data/chromium-shared-sample-indexes-plate_20180301.json'
self.sc_dual_index_json = 'data/singlecell_data/chromium_dual_indexes_plate_TT_NT_20210209.json'
def tearDown(self):
remove_dir(self.temp_dir)
def test_get_formatted_samplesheet_per_lane1(self):
output_list = \
get_formatted_samplesheet_per_lane(
samplesheet_file=self.samplesheet_file,
singlecell_barcode_json=self.sc_index_json,
singlecell_dual_barcode_json=self.sc_dual_index_json,
runinfo_file='data/singlecell_data/RunInfo_dual.xml',
output_dir=self.temp_dir,
platform=self.platform_name,
filter_lane=None,
single_cell_tag='10X',
index1_rule=None,
index2_rule=None)
df = pd.DataFrame(output_list)
sa = SampleSheet(df[df['lane_id']=='5']['samplesheet_file'].values[0])
sdf = pd.DataFrame(sa._data)
#print(sdf.to_dict(orient='records'))
self.assertEqual(df[df['lane_id']=='5']['bases_mask'].values[0],'y150n1,i10,i10,y150n1')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0009']['index'].values[0],'GTGGCCTCAT')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0009']['index2'].values[0],'TCACTTTCGA')
sa = SampleSheet(df[df['lane_id']=='3']['samplesheet_file'].values[0])
self.assertEqual(df[df['lane_id']=='3']['bases_mask'].values[0],'y150n1,i8n2,i8n2,y150n1')
sdf = pd.DataFrame(sa._data)
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0001']['index'].values[0],'ATTACTCG')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0001']['index2'].values[0],'AGGCTATA')
def test_get_formatted_samplesheet_per_lane2(self):
output_list = \
get_formatted_samplesheet_per_lane(
samplesheet_file=self.samplesheet_file,
singlecell_barcode_json=self.sc_index_json,
singlecell_dual_barcode_json=self.sc_dual_index_json,
runinfo_file='data/singlecell_data/RunInfo_dual.xml',
output_dir=self.temp_dir,
platform=self.platform_name,
filter_lane=None,
single_cell_tag='10X',
index1_rule=None,
index2_rule='REVCOMP')
df = pd.DataFrame(output_list)
#print(df.to_dict(orient='records'))
sa = SampleSheet(df[df['lane_id']=='5']['samplesheet_file'].values[0])
sdf = pd.DataFrame(sa._data)
#print(sdf.to_dict(orient='records'))
self.assertEqual(df[df['lane_id']=='5']['bases_mask'].values[0],'y150n1,i10,i10,y150n1')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0009']['index'].values[0],'GTGGCCTCAT')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0009']['index2'].values[0],'TCACTTTCGA')
sa = SampleSheet(df[df['lane_id']=='3']['samplesheet_file'].values[0])
self.assertEqual(df[df['lane_id']=='3']['bases_mask'].values[0],'y150n1,i8n2,i8n2,y150n1')
sdf = pd.DataFrame(sa._data)
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0001']['index'].values[0],'ATTACTCG')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0001']['index2'].values[0],'TATAGCCT')
if __name__=='__main__':
unittest.main() | 0.106848 | 0.293354 |
from datetime import datetime
from . import db
from .relations import UserFeatures
from .mixins import GenericMixin, NameMixin
class Role(db.Model, GenericMixin, NameMixin):
'''User roles table'''
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), unique=True)
def __init__(self, name):
''' Add a new role.
Parameters
----------
name : str
new role's name.
'''
self.name = name
db.session.add(self)
class User(db.Model, GenericMixin, NameMixin):
'''Users table'''
__tablename__ = 'users'
props = ['role']
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
address = db.Column(db.String(200), nullable=True)
created = db.Column(db.DateTime(), default=datetime.utcnow)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
features = db.relationship('Feature',
secondary=UserFeatures,
lazy='dynamic',
backref=db.backref('users', lazy='dynamic'))
@property
def role(self):
role = Role.get(self.role_id)
if role:
return role.name
def __init__(self, name, role, address=None):
''' Add a new user.
Parameters
----------
name : str
new user's name.
role : str
new user's role.
address : str
new user's address.
'''
roleRecord = Role.get_by_name(role)
if not roleRecord:
raise AttributeError('Users role not found')
self.name = name
self.role_id = roleRecord.id
self.address = address
db.session.add(self)
class Feature(db.Model, GenericMixin, NameMixin):
'''Features table'''
__tablename__ = 'features'
props = ['users']
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
content = db.Column(db.String(3000), nullable=True)
created = db.Column(db.DateTime(), index=True, default=datetime.utcnow)
def add_users(self, ids):
''' Add users to feature by ids.
Parameters
----------
ids : list
list of users ids.
'''
users = []
for uid in ids:
user = User.get(uid)
if not user:
raise AttributeError("Wrong user's id entered.")
users += [user]
self.users = users
def __init__(self, name, users=[], content=None):
''' Add a new feature.
Parameters
----------
name : str
new feature's name.
users : list
new feature's user ids list.
content : str
new feature's content.
'''
self.name = name
self.content = content
self.add_users(users)
db.session.add(self)
class Token(db.Model, GenericMixin):
__tablename__ = 'tokens'
id = db.Column(db.Integer, primary_key=True)
token = db.Column(db.String(200), unique=True)
def __init__(self, token):
self.token = token | requester/database/models.py | from datetime import datetime
from . import db
from .relations import UserFeatures
from .mixins import GenericMixin, NameMixin
class Role(db.Model, GenericMixin, NameMixin):
'''User roles table'''
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), unique=True)
def __init__(self, name):
''' Add a new role.
Parameters
----------
name : str
new role's name.
'''
self.name = name
db.session.add(self)
class User(db.Model, GenericMixin, NameMixin):
'''Users table'''
__tablename__ = 'users'
props = ['role']
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
address = db.Column(db.String(200), nullable=True)
created = db.Column(db.DateTime(), default=datetime.utcnow)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
features = db.relationship('Feature',
secondary=UserFeatures,
lazy='dynamic',
backref=db.backref('users', lazy='dynamic'))
@property
def role(self):
role = Role.get(self.role_id)
if role:
return role.name
def __init__(self, name, role, address=None):
''' Add a new user.
Parameters
----------
name : str
new user's name.
role : str
new user's role.
address : str
new user's address.
'''
roleRecord = Role.get_by_name(role)
if not roleRecord:
raise AttributeError('Users role not found')
self.name = name
self.role_id = roleRecord.id
self.address = address
db.session.add(self)
class Feature(db.Model, GenericMixin, NameMixin):
'''Features table'''
__tablename__ = 'features'
props = ['users']
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
content = db.Column(db.String(3000), nullable=True)
created = db.Column(db.DateTime(), index=True, default=datetime.utcnow)
def add_users(self, ids):
''' Add users to feature by ids.
Parameters
----------
ids : list
list of users ids.
'''
users = []
for uid in ids:
user = User.get(uid)
if not user:
raise AttributeError("Wrong user's id entered.")
users += [user]
self.users = users
def __init__(self, name, users=[], content=None):
''' Add a new feature.
Parameters
----------
name : str
new feature's name.
users : list
new feature's user ids list.
content : str
new feature's content.
'''
self.name = name
self.content = content
self.add_users(users)
db.session.add(self)
class Token(db.Model, GenericMixin):
__tablename__ = 'tokens'
id = db.Column(db.Integer, primary_key=True)
token = db.Column(db.String(200), unique=True)
def __init__(self, token):
self.token = token | 0.770896 | 0.058696 |
import numpy
from tomo2D import phantom2D
from tomo2D import ellipse
import random
pi=numpy.pi
sin=numpy.sin
cos=numpy.cos
sqrt=numpy.sqrt
def bar_object():
ph=phantom2D()
e=ellipse(0.,0.,1.0,5.0,0.5,0.)
ph.add_component(e)
e=ellipse(0.1,0.,.5,.05,0.1,0.4)
ph.add_component(e)
e=ellipse(-0.5,-0.1,-0.2,.1,0.2,0.)
ph.add_component(e)
e=ellipse(0.5,0.2,.2,0.14,0.2,-1.)
ph.add_component(e)
return ph
def generate_random_spots(my_seed=0.3,\
num_ellipse=30,\
x0_range=[0.2,0.6],\
y0_range=[-0.3,0.3],\
half_axis_range=[0.01,0.04],\
atten_range=[0.9 ,1.1],\
angle_range=[0.,pi/2.]):
ph=phantom2D()
e=ellipse(0.1,0.,1.0,0.8,0.6,0.)
ph.add_component(e)
e=ellipse(0.4,0.,-1.0,0.3,0.4,0.)
ph.add_component(e)
e=ellipse(-0.4,0.1,0.05,0.25,0.15,0.)
ph.add_component(e)
random.seed(my_seed)
for i in range(num_ellipse):
x0=random.uniform(x0_range[0],x0_range[1])
y0=random.uniform(y0_range[0],y0_range[1])
atten=random.uniform(atten_range[0],atten_range[1])
ax=random.uniform(half_axis_range[0],half_axis_range[1])
ay=random.uniform(half_axis_range[0],half_axis_range[1])
angle=random.uniform(angle_range[0],angle_range[1])
e=ellipse(x0,y0,atten,ax,ay,angle)
ph.add_component(e)
return ph
def generate_random_ellipses(my_seed=0.77,\
num_ellipse=10,\
center_range=[0.3,0.6],\
half_axis_range=[0.02,0.3],\
atten_range=[0.01,0.1],\
angle_range=[0.,pi/2.]):
ph=phantom2D()
e=ellipse(0,0,1.0,0.9,0.9,0.)
ph.add_component(e)
random.seed(my_seed)
for i in range(num_ellipse):
r0=random.uniform(center_range[0],center_range[1])
a0=random.uniform(0.,2.*pi)
x0=r0*cos(a0)
y0=r0*sin(a0)
atten=random.uniform(atten_range[0],atten_range[1])
ax=random.uniform(half_axis_range[0],half_axis_range[1])
ay=random.uniform(half_axis_range[0],half_axis_range[1])
angle=random.uniform(angle_range[0],angle_range[1])
e=ellipse(x0,y0,atten,ax,ay,angle)
ph.add_component(e)
return ph
def generate_PET_discs():
ph=phantom2D()
e=ellipse(-75.,0.,1.0,25.,25.,0.)
ph.add_component(e)
e=ellipse(-75.,0.,10.,5.,5.,0.)
ph.add_component(e)
e=ellipse(0.,0.,1.0,25.,25.,0.)
ph.add_component(e)
e=ellipse(0.,0.,10.,5.,5.,0.)
ph.add_component(e)
e=ellipse(75.,0.,1.0,25.,25.,0.)
ph.add_component(e)
e=ellipse(75.,0.,10.,5.,5.,0.)
ph.add_component(e)
return ph
def generate_test_ellipse():
r=sqrt(0.5)
ph=phantom2D()
e=ellipse(-0.1,0.0,1.0,r,r,0.)
ph.add_component(e)
return ph
def generate_shepp_logan():
ph=phantom2D()
e=ellipse(0.,0.,2.0,0.92,0.69,pi/2.)
ph.add_component(e)
e=ellipse(0,-0.0184,-0.98,0.874,0.6624,pi/2.)
ph.add_component(e)
e=ellipse(0.22,0.,-0.02,0.31,0.11,pi*72./180.)
ph.add_component(e)
e=ellipse(-0.22,0.,-0.02,0.41,0.16,pi*108./180.)
ph.add_component(e)
e=ellipse(0.,0.35,0.01,0.25,0.21,pi/2.)
ph.add_component(e)
e=ellipse(0.,0.1,0.01,0.046,0.046,0.)
ph.add_component(e)
e=ellipse(0.,-0.1,0.01,0.046,0.046,0.)
ph.add_component(e)
e=ellipse(-0.08,-0.605,0.01,0.046,0.023,0.)
ph.add_component(e)
e=ellipse(0.0,-0.605,0.01,0.023,0.023,0.)
ph.add_component(e)
e=ellipse(0.06,-0.605,0.01,0.046,0.023,pi/2.)
ph.add_component(e)
return ph
def generate_shepp_logan_HC():
ph=phantom2D()
e=ellipse(0.,0.,2.0,0.92,0.69,pi/2.)
ph.add_component(e)
e=ellipse(0,-0.0184,-0.98,0.874,0.6624,pi/2.)
ph.add_component(e)
e=ellipse(0.22,0.,-0.08,0.31,0.11,pi*72./180.)
ph.add_component(e)
e=ellipse(-0.22,0.,-0.08,0.41,0.16,pi*108./180.)
ph.add_component(e)
e=ellipse(0.,0.35,0.04,0.25,0.21,pi/2.)
ph.add_component(e)
e=ellipse(0.,0.1,0.04,0.046,0.046,0.)
ph.add_component(e)
e=ellipse(0.,-0.1,0.04,0.046,0.046,0.)
ph.add_component(e)
e=ellipse(-0.08,-0.605,0.04,0.046,0.023,0.)
ph.add_component(e)
e=ellipse(0.0,-0.605,0.04,0.023,0.023,0.)
ph.add_component(e)
e=ellipse(0.06,-0.605,0.04,0.046,0.023,pi/2.)
ph.add_component(e)
return ph | largescale_code/phantoms_tomo2D.py | import numpy
from tomo2D import phantom2D
from tomo2D import ellipse
import random
pi=numpy.pi
sin=numpy.sin
cos=numpy.cos
sqrt=numpy.sqrt
def bar_object():
ph=phantom2D()
e=ellipse(0.,0.,1.0,5.0,0.5,0.)
ph.add_component(e)
e=ellipse(0.1,0.,.5,.05,0.1,0.4)
ph.add_component(e)
e=ellipse(-0.5,-0.1,-0.2,.1,0.2,0.)
ph.add_component(e)
e=ellipse(0.5,0.2,.2,0.14,0.2,-1.)
ph.add_component(e)
return ph
def generate_random_spots(my_seed=0.3,\
num_ellipse=30,\
x0_range=[0.2,0.6],\
y0_range=[-0.3,0.3],\
half_axis_range=[0.01,0.04],\
atten_range=[0.9 ,1.1],\
angle_range=[0.,pi/2.]):
ph=phantom2D()
e=ellipse(0.1,0.,1.0,0.8,0.6,0.)
ph.add_component(e)
e=ellipse(0.4,0.,-1.0,0.3,0.4,0.)
ph.add_component(e)
e=ellipse(-0.4,0.1,0.05,0.25,0.15,0.)
ph.add_component(e)
random.seed(my_seed)
for i in range(num_ellipse):
x0=random.uniform(x0_range[0],x0_range[1])
y0=random.uniform(y0_range[0],y0_range[1])
atten=random.uniform(atten_range[0],atten_range[1])
ax=random.uniform(half_axis_range[0],half_axis_range[1])
ay=random.uniform(half_axis_range[0],half_axis_range[1])
angle=random.uniform(angle_range[0],angle_range[1])
e=ellipse(x0,y0,atten,ax,ay,angle)
ph.add_component(e)
return ph
def generate_random_ellipses(my_seed=0.77,\
num_ellipse=10,\
center_range=[0.3,0.6],\
half_axis_range=[0.02,0.3],\
atten_range=[0.01,0.1],\
angle_range=[0.,pi/2.]):
ph=phantom2D()
e=ellipse(0,0,1.0,0.9,0.9,0.)
ph.add_component(e)
random.seed(my_seed)
for i in range(num_ellipse):
r0=random.uniform(center_range[0],center_range[1])
a0=random.uniform(0.,2.*pi)
x0=r0*cos(a0)
y0=r0*sin(a0)
atten=random.uniform(atten_range[0],atten_range[1])
ax=random.uniform(half_axis_range[0],half_axis_range[1])
ay=random.uniform(half_axis_range[0],half_axis_range[1])
angle=random.uniform(angle_range[0],angle_range[1])
e=ellipse(x0,y0,atten,ax,ay,angle)
ph.add_component(e)
return ph
def generate_PET_discs():
ph=phantom2D()
e=ellipse(-75.,0.,1.0,25.,25.,0.)
ph.add_component(e)
e=ellipse(-75.,0.,10.,5.,5.,0.)
ph.add_component(e)
e=ellipse(0.,0.,1.0,25.,25.,0.)
ph.add_component(e)
e=ellipse(0.,0.,10.,5.,5.,0.)
ph.add_component(e)
e=ellipse(75.,0.,1.0,25.,25.,0.)
ph.add_component(e)
e=ellipse(75.,0.,10.,5.,5.,0.)
ph.add_component(e)
return ph
def generate_test_ellipse():
r=sqrt(0.5)
ph=phantom2D()
e=ellipse(-0.1,0.0,1.0,r,r,0.)
ph.add_component(e)
return ph
def generate_shepp_logan():
ph=phantom2D()
e=ellipse(0.,0.,2.0,0.92,0.69,pi/2.)
ph.add_component(e)
e=ellipse(0,-0.0184,-0.98,0.874,0.6624,pi/2.)
ph.add_component(e)
e=ellipse(0.22,0.,-0.02,0.31,0.11,pi*72./180.)
ph.add_component(e)
e=ellipse(-0.22,0.,-0.02,0.41,0.16,pi*108./180.)
ph.add_component(e)
e=ellipse(0.,0.35,0.01,0.25,0.21,pi/2.)
ph.add_component(e)
e=ellipse(0.,0.1,0.01,0.046,0.046,0.)
ph.add_component(e)
e=ellipse(0.,-0.1,0.01,0.046,0.046,0.)
ph.add_component(e)
e=ellipse(-0.08,-0.605,0.01,0.046,0.023,0.)
ph.add_component(e)
e=ellipse(0.0,-0.605,0.01,0.023,0.023,0.)
ph.add_component(e)
e=ellipse(0.06,-0.605,0.01,0.046,0.023,pi/2.)
ph.add_component(e)
return ph
def generate_shepp_logan_HC():
ph=phantom2D()
e=ellipse(0.,0.,2.0,0.92,0.69,pi/2.)
ph.add_component(e)
e=ellipse(0,-0.0184,-0.98,0.874,0.6624,pi/2.)
ph.add_component(e)
e=ellipse(0.22,0.,-0.08,0.31,0.11,pi*72./180.)
ph.add_component(e)
e=ellipse(-0.22,0.,-0.08,0.41,0.16,pi*108./180.)
ph.add_component(e)
e=ellipse(0.,0.35,0.04,0.25,0.21,pi/2.)
ph.add_component(e)
e=ellipse(0.,0.1,0.04,0.046,0.046,0.)
ph.add_component(e)
e=ellipse(0.,-0.1,0.04,0.046,0.046,0.)
ph.add_component(e)
e=ellipse(-0.08,-0.605,0.04,0.046,0.023,0.)
ph.add_component(e)
e=ellipse(0.0,-0.605,0.04,0.023,0.023,0.)
ph.add_component(e)
e=ellipse(0.06,-0.605,0.04,0.046,0.023,pi/2.)
ph.add_component(e)
return ph | 0.299515 | 0.331877 |