seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22353446265 | from typing import List, Tuple, Union
import lightgbm as lgb
import numpy as np
import pandas as pd
import mlrun.errors
from .._ml_common import AlgorithmFunctionality, MLTypes, MLUtils
class LGBMTypes(MLTypes):
"""
Typing hints for the LightGBM framework.
"""
# A union of all LightGBM model base classes:
ModelType = Union[lgb.LGBMModel, lgb.Booster]
# A type for all the supported dataset types:
DatasetType = Union[MLTypes.DatasetType, lgb.Dataset]
# An evaluation result as packaged by the training in LightGBM:
EvaluationResultType = Union[
Tuple[str, str, float, bool], # As packaged in `lightgbm.train`
Tuple[str, str, float, bool, float], # As packaged in `lightgbm.cv`
]
# Detailed type for the named tuple `CallbackEnv` passed during LightGBM's training for the callbacks:
CallbackEnvType = Tuple[
lgb.Booster, dict, int, int, int, List[EvaluationResultType]
]
class LGBMUtils(MLUtils):
"""
Utilities functions for the LightGBM framework.
"""
@staticmethod
def to_array(dataset: LGBMTypes.DatasetType) -> np.ndarray:
"""
Convert the given dataset to np.ndarray.
:param dataset: The dataset to convert. Must be one of {lgb.Dataset, pd.DataFrame, pd.Series,
scipy.sparse.base.spmatrix, list, tuple, dict}.
:return: The dataset as a ndarray.
:raise MLRunInvalidArgumentError: If the dataset type is not supported.
"""
if isinstance(dataset, lgb.Dataset):
x = LGBMUtils.to_array(dataset=dataset.data)
if dataset.label is None:
return x
y = LGBMUtils.to_array(dataset=dataset.label)
return LGBMUtils.to_array(LGBMUtils.concatenate_x_y(x=x, y=y)[0])
try:
return MLUtils.to_array(dataset=dataset)
except mlrun.errors.MLRunInvalidArgumentError:
raise mlrun.errors.MLRunInvalidArgumentError(
f"Could not convert the given dataset into a numpy ndarray. Supporting conversion from: "
f"{LGBMUtils.get_union_typehint_string(LGBMTypes.DatasetType)}. The given dataset was of type: "
f"'{type(dataset)}'"
)
@staticmethod
def to_dataframe(dataset: LGBMTypes.DatasetType) -> pd.DataFrame:
"""
Convert the given dataset to pd.DataFrame.
:param dataset: The dataset to convert. Must be one of {lgb.Dataset, np.ndarray, pd.Series,
scipy.sparse.base.spmatrix, list, tuple, dict}.
:return: The dataset as a DataFrame.
:raise MLRunInvalidArgumentError: If the dataset type is not supported.
"""
if isinstance(dataset, lgb.Dataset):
x = LGBMUtils.to_dataframe(dataset=dataset.data)
if dataset.label is None:
return x
y = LGBMUtils.to_dataframe(dataset=dataset.label)
return LGBMUtils.concatenate_x_y(x=x, y=y)[0]
try:
return MLUtils.to_dataframe(dataset=dataset)
except mlrun.errors.MLRunInvalidArgumentError:
raise mlrun.errors.MLRunInvalidArgumentError(
f"Could not convert the given dataset into a pandas DataFrame. Supporting conversion from: "
f"{LGBMUtils.get_union_typehint_string(LGBMTypes.DatasetType)}. The given dataset was of type: "
f"'{type(dataset)}'"
)
@staticmethod
def get_algorithm_functionality(
model: MLTypes.ModelType = None,
y: MLTypes.DatasetType = None,
objective: str = None,
) -> AlgorithmFunctionality:
"""
Get the algorithm functionality of the LightGBM model. If SciKit-Learn API is used, pass the LGBBMModel and a y
sample. Otherwise, training API is used, so pass the objective of the params dictionary.
The objectives here are taken from the official docs of LightGBBM at:
https://lightgbm.readthedocs.io/en/latest/Parameters.html#core-parameters
:param model: The model to check if its a regression model or a classification model (SciKit-Learn API).
:param y: The ground truth values to check if its multiclass and / or multi output (SciKit-Learn API).
:param objective: The objective string (Training API).
:return: The objective's algorithm functionality.
"""
# Check if LightGBM is being used with SciKit-Learn API:
if objective is None:
return super().get_algorithm_functionality(model=model, y=y)
# Declare the conversion map according to the LightGBM docs:
objective_to_algorithm_functionality_map = {
# regression application:
"regression": AlgorithmFunctionality.REGRESSION,
"regression_l2": AlgorithmFunctionality.REGRESSION,
"l2": AlgorithmFunctionality.REGRESSION,
"mean_squared_error": AlgorithmFunctionality.REGRESSION,
"mse": AlgorithmFunctionality.REGRESSION,
"l2_root": AlgorithmFunctionality.REGRESSION,
"root_mean_squared_error": AlgorithmFunctionality.REGRESSION,
"rmse": AlgorithmFunctionality.REGRESSION,
"regression_l1": AlgorithmFunctionality.REGRESSION,
"l1": AlgorithmFunctionality.REGRESSION,
"mean_absolute_error": AlgorithmFunctionality.REGRESSION,
"mae": AlgorithmFunctionality.REGRESSION,
"huber": AlgorithmFunctionality.REGRESSION,
"fair": AlgorithmFunctionality.REGRESSION,
"poisson": AlgorithmFunctionality.REGRESSION,
"quantile": AlgorithmFunctionality.REGRESSION,
"mape": AlgorithmFunctionality.REGRESSION,
"mean_absolute_percentage_error": AlgorithmFunctionality.REGRESSION,
"gamma": AlgorithmFunctionality.REGRESSION,
"tweedie": AlgorithmFunctionality.REGRESSION,
# binary classification application:
"binary": AlgorithmFunctionality.BINARY_CLASSIFICATION,
# multi-class classification application:
"multiclass": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"softmax": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"multiclassova": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"multiclass_ova": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"ova": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"ovr": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
# cross-entropy application
"cross_entropy": AlgorithmFunctionality.BINARY_CLASSIFICATION,
"xentropy": AlgorithmFunctionality.BINARY_CLASSIFICATION,
"cross_entropy_lambda": AlgorithmFunctionality.BINARY_CLASSIFICATION,
"xentlambda": AlgorithmFunctionality.BINARY_CLASSIFICATION,
# ranking application
"lambdarank": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"rank_xendcg": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"xendcg": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"xe_ndcg": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"xe_ndcg_mart": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
"xendcg_mart": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,
}
# Return unknown if the objective is not in the map and otherwise return its functionality:
if objective not in objective_to_algorithm_functionality_map:
raise AlgorithmFunctionality.UNKNOWN
return objective_to_algorithm_functionality_map[objective]
| mlrun/mlrun | mlrun/frameworks/lgbm/utils.py | utils.py | py | 7,707 | python | en | code | 1,129 | github-code | 36 | [
{
"api_name": "_ml_common.MLTypes",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "lightgbm.LGBMModel",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "lightgbm.Boos... |
36081264181 | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
a = 2
#b = 1
def plotter(b):
func = lambda x : a - b*x - np.exp(-x)
guess = a/b
max_x = fsolve(func, guess)
x = np.arange(0.0, max_x*1.05, 0.01)
y1 = a - b*x
y2 = np.exp(-x)
y3 = y1 - y2
null = 0*x
plt.figure()
plt.fill_between(x, y1, y2)
plt.plot(x, y3, color="yellow", label="difference")
plt.plot(x, null, "--b")
plt.title('b = ' + str(b))
plt.legend(loc="best")
plt.xlabel('time')
plt.ylabel('P(t)')
plt.savefig("fig/SIR_b_" + str(b) + ".pdf")
def stability_plotter(b):
func = lambda x : a - b*x - np.exp(-x)
guess = a/b
max_x = fsolve(func, guess)
x = np.arange(0.0, max_x*1.2, 0.01)
y = a - b*x - np.exp(-x)
null = 0*x
# plt.figure()
# plt.fill_between(x, y1, y2)
plt.plot(x, y, label='b = ' + str(b))
plt.plot(x, null, "--b")
b_list = [0.2, 0.5, 0.8, 1, 1.2, 1.5, 2] #, 5, 10]
for b in b_list:
plotter(b)
plt.figure()
for b in b_list:
stability_plotter(b)
plt.title("Stability")
plt.legend(loc="best")
plt.xlabel('$u(\\tau)$')
plt.ylabel('$du/d\\tau$')
# plt.xscale("log")
plt.savefig("fig/SIR_stability.pdf")
# plot u* as func of b
u_list = []
b_max = 2
b_list = np.arange(0.01, b_max, 0.01)
for b in b_list:
func = lambda x : a - b*x - np.exp(-x)
guess = a/b
u_eq = fsolve(func, guess)
u_list.append(u_eq)
plt.figure()
plt.plot(b_list, u_list)
plt.title("equilibrium of u, parametrized by 'b'")
plt.legend(loc="best")
plt.xlabel('$b$')
plt.ylabel('$u*$')
# plt.xscale("log")
plt.savefig("fig/SIR_equilibrium_u_b.pdf")
| chrberrig/SEIR_age_diff | programming/sir_dynsys.py | sir_dynsys.py | py | 1,555 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.exp",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "scipy.optimize.fsolve",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_num... |
28299898397 | from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import os
import random
import matplotlib.pyplot as plt
from PIL import Image
import torch
from torchvision.models import resnet18, ResNet18_Weights
import torch.nn as nn
import numpy as np
class Mydata(Dataset):
def __init__(self, data_path, transform, label):
super(Mydata, self).__init__()
self.transform = transform
self.label = label
self.data_path = data_path
def __len__(self):
return len(self.data_path)
def __getitem__(self, idx):
img = Image.fromarray(plt.imread(self.data_path[idx]))
img = self.transform(img)
label = self.label[idx]
return img, label
def visualize(labels, data_path):
fig = plt.figure(figsize=(9, 9))
rows, cols = 4, 4
d_label = {
0: 'Cat',
1: 'Dog',
}
for i in range(1, rows * cols + 1):
random_idx = random.randint(0, len(data_path) - 1)
img = plt.imread(data_path[random_idx])
label = d_label[labels[random_idx]]
fig.add_subplot(rows, cols, i)
plt.imshow(img)
plt.title(label)
plt.axis(False)
plt.show()
def get_model():
model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)
model.fc = nn.Sequential(
nn.Linear(512, 1),
nn.Sigmoid()
)
for name, param in model.named_parameters():
if 'fc' not in name:
param.requires_grad = False
return model
def get_data():
train_path = [os.path.join("train", name) for name in os.listdir("train")]
cat_train_path = [x for x in train_path if 'cat' in x]
dog_train_path = [x for x in train_path if 'dog' in x]
train_data_path = cat_train_path[:int(len(cat_train_path) * 0.8)] + dog_train_path[:int(len(dog_train_path) * 0.8)]
val_data_path = cat_train_path[int(len(cat_train_path) * 0.8):] + dog_train_path[int(len(dog_train_path) * 0.8):]
random.shuffle(train_data_path)
random.shuffle(val_data_path)
labels_train = [0 if 'cat' in name else 1 for name in train_data_path]
labels_val = [0 if 'cat' in name else 1 for name in val_data_path]
return train_data_path, val_data_path, labels_train, labels_val
def get_dataloader():
train_data_path, val_data_path, labels_train, labels_val = get_data()
transform = transforms.Compose([
transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_data = Mydata(train_data_path, transform=transform, label=labels_train)
val_data = Mydata(val_data_path, transform=transform, label=labels_val)
train_loader = DataLoader(train_data, shuffle=True, batch_size=16, num_workers=2)
val_loader = DataLoader(val_data, shuffle=True, batch_size=16, num_workers=2)
return train_loader, val_loader
def train_step(model,
dataloader,
loss_fn,
optimizer,
device):
model.train()
train_loss, train_acc = 0, 0
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
y_pred = model(X)
y_pred = y_pred.squeeze(-1)
loss = loss_fn(y_pred, y.float())
train_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
y_pred_class = torch.tensor([0 if x < 0.5 else 1 for x in y_pred]).to(device)
train_acc += (y_pred_class == y).sum().item() / len(y_pred)
if batch % 100 == 0:
print(y_pred.squeeze(-1), y)
print()
print(
"Batch {} Loss {:.4f} Train Acc {}".format(batch, loss, (y_pred_class == y).sum().item() / len(y_pred)))
train_loss = train_loss / len(dataloader)
train_acc = train_acc / len(dataloader)
return train_loss, train_acc
def val_step(model,
dataloader,
loss_fn: torch.nn.Module,
device: torch.device):
model.eval()
test_loss, test_acc = 0, 0
with torch.no_grad():
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
test_pred_logits = model(X)
test_pred_logits = test_pred_logits.squeeze(-1)
loss = loss_fn(test_pred_logits, y.float())
test_loss += loss.item()
test_pred_labels = torch.tensor([0 if x < 0.5 else 1 for x in test_pred_logits]).to(device)
test_acc += ((test_pred_labels == y).sum().item() / len(test_pred_labels))
test_loss = test_loss / len(dataloader)
test_acc = test_acc / len(dataloader)
return test_loss, test_acc
if __name__ == '__main__':
# Config
model = get_model()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
num_epoches = 10
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=3e-5)
loss = nn.BCELoss()
model = model.to(device)
# Data
train_loader, val_loader = get_dataloader()
train_losses = []
train_accs = []
test_losses = []
test_accs = []
for epoch in range(num_epoches):
train_loss, train_acc = train_step(model, train_loader, loss, optimizer, device)
print("Epoch {} Train_loss {} Train_acc {}".format(epoch, train_loss, train_acc))
train_losses.append(train_loss)
train_accs.append(train_acc)
test_loss, test_acc = val_step(model, val_loader, loss, device)
print("Epoch {} Test_loss {} Test_acc {}".format(epoch, test_loss, test_acc))
test_losses.append(test_loss)
test_accs.append(test_acc)
print("Done Epoch {}".format(epoch))
# print("________________________________________________________________")
torch.save(model.state_dict(), "Weight/model.pt")
np.save('Accuracy/train_losses.npy', np.array(train_losses))
np.save('Accuracy/train_accs.npy', np.array(train_accs))
np.save('Accuracy/test_losses.npy', np.array(test_losses))
np.save('Accuracy/test_accs.npy', np.array(test_accs))
| kienptitit/Dog_Cat_Classification | train.py | train.py | py | 6,096 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyp... |
3763224892 | import cv2 as cv
import pandas as pd
df = pd.read_csv("Set03_video01.csv")
cam = cv.VideoCapture("Set03_video01.h264")
frame = 0
# initBB = (int(df.iloc[0]['x']),int(df.iloc[0]['y']),int(df.iloc[0]['w']),int(df.iloc[0]['h']))
# tracker = cv.TrackerCSRT_create()
# tracking = False
# while True:
# _ret, img = cam.read()
# if(_ret == False):
# break
# if(frame == df.iloc[0]['frame_start']):
# tracker.init(img, initBB)
# tracking = True
# if tracking:
# (success, box) = tracker.update(img)
# if success:
# (x, y, w, h) = [int(v) for v in box]
# img = cv.rectangle(img, (x, y), (x + w, y + h),(0, 255, 0), 2)
# cv.imshow("f",img)
# cv.waitKey(5)
# frame+=1
# if(frame>100):
# break
trackers = []
to_delete = []
boxes = []
while True:
_ret, img = cam.read()
if(_ret == False):
break
temp_df = df[(df.frame_start == frame)]
if(temp_df is not None):
for index,row in temp_df.iterrows():
x = int(row['x'])
if(x > 200):
x = int(row['x'])-200
y = int(row['y'])-400
w = 3*int(row['w'])
h = 6*int(row['h'])
print(x,y+400)
bbox = cv.selectROI(img, False)
print("selected")
initBB = (x,y,w,h)
speed = float(row['speed'])
tracker = cv.TrackerCSRT_create()
tracker.init(img, bbox)
failure_rate = 0
trackers.append([tracker,int(row['frame_end'])-10,speed,failure_rate])
if(len(trackers) > 0):
for idx, item in enumerate(trackers):
if(frame > item[1] or item[3]>4):
to_delete.append(idx)
if(len(to_delete) > 0):
for item in to_delete:
trackers.pop(item)
to_delete = []
for tracker_ in trackers:
(success, box) = tracker_[0].update(img)
if success:
(x, y, w, h) = [int(v) for v in box]
speed = tracker_[2]
boxes.append([frame,x,y,w,h,speed])
# img = cv.rectangle(img, (x, y), (x + w, y + h),(0, 255, 0), 2)
else:
tracker_[3] +=1
# cv.imshow('f',img)
# cv.waitKey(5)
frame += 1
if(frame%200==0):
print(frame)
# print("kength of trackers" + str(len(trackers)))
# print("kength of to_delete" + str(len(to_delete)))
df = pd.DataFrame(boxes)
df.to_csv("boxes_data_3.csv",index=False,header=['frame','x','y','w','h','speed']) | aaravpandya/optic_flow | tracker.py | tracker.py | py | 2,532 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.selectROI",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cv2.TrackerCSRT_create",... |
36391585173 | from .method import get_json_ret, json_response_zh
class AuthMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request, *args, **kwargs):
if "application/json" in request.headers.get("Content-Type"):
import json
request.json = json.loads(request.body)
else:
request.json = request.POST
if not request.path.startswith('/negotiate_key'):
shared_secret = request.session.get("shared_secret")
if shared_secret is None:
return json_response_zh(get_json_ret(42, msg="请先协商密钥"))
from Crypto.Util.number import long_to_bytes
request.DH_key = long_to_bytes(shared_secret)[:16].ljust(16, b'\x00')
if not request.path.startswith('/negotiate_key') and not request.path == '/dynamicauth_api3/':
request.data = request.json.get("data")
if request.data is None:
return json_response_zh(get_json_ret(40, msg="请传递 data 参数"))
user_name = request.session.get("user_name")
if user_name:
from UserModel.models import UserModel
request.user = UserModel.objects.get(user_name=user_name)
if request.user is None:
request.session["user_name"] = None
response = self.get_response(request)
return response
| CryptoCompetition2019-RNG/AuthServer | AuthServer/middleware.py | middleware.py | py | 1,435 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "method.json_response_zh",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "method.get_json_ret",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "Crypto.Util.nu... |
32751323841 | from raspberry_pi.adapter import Adapter
from raspberry_pi.human_sensor import HumanSensor
from raspberry_pi.humidity_sensor import HumiditySensor
from raspberry_pi.target import Target
from raspberry_pi.temperature_sensor import TemperatureSensor
import json
# 根据不同的key,获取需要被适配的类
def get_adaptee_class(key):
adaptees = {
'target': Target(),
'temperature_sensor': TemperatureSensor(),
'humidity_sensor': HumiditySensor(),
'human_sensor': HumanSensor()
}
return adaptees.get(key, None)
if __name__ == "__main__":
# 选择传感器类型
adaptee_type = 'human_sensor'
# 适配器进行适配
adapter = Adapter(get_adaptee_class(adaptee_type))
# 适配后的采集数据操作
data = adapter.request()
# 格式化为json格式
json_data = json.dumps(data)
print(json_data)
# 传输到kafka | qihonggang/leetcode | python_code/raspberry_pi/main.py | main.py | py | 902 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "raspberry_pi.target.Target",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "raspberry_pi.temperature_sensor.TemperatureSensor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "raspberry_pi.humidity_sensor.HumiditySensor",
"line_number": 13,... |
26610403483 | import pandas as pd
from simulation_model.create_params_grid import default_params, path_experiment_table
from utilities.small_functions import mkDir_if_not
import os
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from simulation_model.gather_results import bilinear_name
def get_results_experiment_name_path(experiment_name):
results_dir = mkDir_if_not("simulation_model/results")
return os.path.join(results_dir, "{}_results_cv.pkl".format(experiment_name))
def min_of_gt_0(x):
return np.min(x[x > 0])
acronyms_dict = {"optirank": "optirank",
"logistic_regression_on_ranks": "rank-lr",
"logistic_regression": "lr"}
def acronym(serie):
"""df is a dataframe with column classifier_name. Add column classifier_acronym"""
fun = lambda x: acronyms_dict[x]
return serie.apply(fun)
dict_param_name_to_latex = {"d": "$d$", # correspondences to the paper
"n_perturbing": "$d_{P}$",
"n_samples": "$n_{samples}$",
"tau": "$\\tau$",
"sigma": "$\sigma$"}
if __name__ == "__main__":
experiment_table = pd.read_pickle(path_experiment_table)
sns.set(font_scale=1.5) # was 1.5
sns.set_style("whitegrid")
for i_row, row in experiment_table.iterrows():
experiment_name = row["experiment_name"]
n_params = row["n_params"]
params_grid = list(row["param_grid"])
results = pd.read_pickle(get_results_experiment_name_path(experiment_name))
results["classifier"] = acronym(results["classifier_name"])
for with_legend in [True, False]:
output_dir = "simulation_model/results"
if with_legend:
legend = "full"
location = "upper left"
output_dir_plots = mkDir_if_not(os.path.join(output_dir, "plots", "legend_on"))
else:
legend = False
location = "best" # should have no effect
output_dir_plots = mkDir_if_not(os.path.join(output_dir, "plots", "legend_off"))
plt.figure()
p = sns.lineplot(data=results.reset_index(), x="param_value", y="test_balanced_accuracy", hue="classifier",
legend=legend)
lgd = plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2), fancybox=True, shadow=False, ncol=3)
p.axes.set_xlabel(dict_param_name_to_latex[pd.unique(results["param_name"]).item()], )
p.axes.set_ylabel("test balanced accuracy (%)")
# if pd.unique(results["param_name"]).item() == "sigma_m":
# p.axes.set_xscale("symlog", linthresh=min_of_gt_0(results[["param_value"]].values))
figpath = os.path.join(output_dir_plots, "results_{}.pdf".format(experiment_name))
plt.tight_layout()
if with_legend:
lgd.set_visible(True)
else:
lgd.set_visible(False)
plt.savefig(figpath)
plt.close()
# overlap figure
plt.figure()
results_of_interest = results.loc[results.classifier_name == bilinear_name]
p = sns.lineplot(data=results_of_interest.reset_index(), x="param_value", y="overlap", legend=legend)
lgd = plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2), fancybox=True, shadow=False, ncol=3)
p.axes.set_xlabel(dict_param_name_to_latex[pd.unique(results["param_name"]).item()], )
p.axes.set_ylabel("overlap")
p.axes.set_ylim([0, 1])
# if pd.unique(results["param_name"]).item() == "sigma_m":
# p.axes.set_xscale("symlog", linthresh=min_of_gt_0(results[["param_value"]].values))
figpath = os.path.join(output_dir_plots, "results_overlap_{}.pdf".format(
experiment_name))
plt.tight_layout()
if with_legend:
lgd.set_visible(True)
else:
lgd.set_visible(False)
plt.savefig(figpath)
plt.close()
# results for default parameters (taken from the experiment on d for instance)
experiment_name = "different_d"
results = pd.read_pickle(get_results_experiment_name_path(experiment_name))
results_per_classifier = results.loc[
results.param_value == default_params["d"], ["test_balanced_accuracy", "classifier_name", "overlap"]].groupby(
["classifier_name"]).agg(['mean', 'sem'])
results_per_classifier.to_csv("simulation_model/results/results_default_parameters.csv")
#output file for markdown
outfile_md = "simulation_model/results/results_default_parameters.md"
out_md = (100*results_per_classifier[("test_balanced_accuracy", "mean")]).map('{:,.0f}'.format) + " ± " + (100*results_per_classifier[("test_balanced_accuracy", "sem")]).map('{:,.0f}'.format)
out_md = out_md.to_frame(name="test balanced accuracy")
markdown_table = out_md.to_markdown()
f = open(outfile_md, "w")
f.write(markdown_table)
f.close()
| paolamalsot/optirank | simulation_model/plot_results.py | plot_results.py | py | 5,116 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utilities.small_functions.mkDir_if_not",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "nump... |
18914948323 | import pytest
from src.guess_number_higher_or_lower import Solution
@pytest.mark.parametrize(
"n,pick,expected",
[
(10, 6, 6),
(1, 1, 1),
(2, 1, 1),
],
)
def test_solution(n, pick, expected, monkeypatch):
monkeypatch.setenv("SECRET", str(pick))
assert Solution().guessNumber(n) == expected
| lancelote/leetcode | tests/test_guess_number_higher_or_lower.py | test_guess_number_higher_or_lower.py | py | 337 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "src.guess_number_higher_or_lower.Solution",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 6,
"usage_type": "attribute"
}
] |
4654671965 | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
from utilities import functional_annotation
from utilities import toGFF3
from utilities import clustering
from utilities import mapping
from utilities import mergeAll_to_gff
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--faa", required = True, help="The location of the directory containing all .faa files")
parser.add_argument("-n", "--fna", required = True, help = "The location of the directory containing all .fna files")
parser.add_argument("-a", "--asem", required = True, help="The location of the directory containing all assembly files")
parser.add_argument("-oa", "--annotationFolder",required = True, help="name of output containing annotation results")
parser.add_argument("-og", "--gffFolder",required = True, help="name of output containing gff files of each annotation tool")
parser.add_argument("-om", "--mergeFolder",required = True, help="name of output containing all merged gff files")
args = parser.parse_args()
""" In order to run DOOR2 and VFDB, we need a database, so have to check if the database is provided in current working folder """
operonDB="operonDB.fasta"
vfdbDB="VFDBdb.fasta"
if not os.path.exists(operonDB) or not os.path.exists(vfdbDB):
raise SystemExit("missing database for DOOR2 or VFDB, cannot running DOOR2/VFDB.Exit")
""" if there is no missing required files, then run annotation """
faa_dir=args.faa
""" cluster for eggNOG """
mergeFile_faa="merge.txt" #file that merge all 50 genomes
clusterFolder="cluster_CDHIT"
clusterFile="clusterFile.txt"
if not os.path.exists(clusterFolder):
clustering.main(faa_dir,mergeFile_faa,clusterFolder,clusterFile,0.95)
""" name the output folder for each tool """
outputFolder=args.annotationFolder
tmhmm_out="{}/tmhmm_result".format(outputFolder)
singalP_out="{}/signalP_result".format(outputFolder)
door2_out="{}/door2_result".format(outputFolder)
vfdb_out ="{}/VFDB_result".format(outputFolder)
card_out="{}/card_result".format(outputFolder)
piler_seq="{}/pilercr_seq".format(outputFolder)
piler_out="{}/pilercr_result".format(outputFolder)
eggNOG_out="{}/eggnog_result_oneFile".format(outputFolder) #contain 1 output , from annotated 1 cluster with eggnog
eggNOG_map_out="{}/eggnog_result_allFiles".format(outputFolder)
if not outputFolder in os.listdir():
subprocess.call(["mkdir",outputFolder])
try:
if not door2_out.split("/")[1] in os.listdir(outputFolder):
#functional_annotation.door2blast_local_all50(faa_dir,operonDB,"blast_operon_db",door2_out)
functional_annotation.door2blast_server_all50(faa_dir,operonDB,"blast_operon_db",door2_out)
if not vfdb_out.split("/")[1] in os.listdir(outputFolder):
#functional_annotation.vfdbblast_local_all50(args.fna,vfdbDB,"blast_vfdb_db",vfdb_out)
functional_annotation.vfdbblast_server_all50(args.fna,vfdbDB,"blast_vfdb_db",vfdb_out)
if not card_out.split("/")[1] in os.listdir(outputFolder):
functional_annotation.rgi_all50(faa_dir,card_out)
if not tmhmm_out.split("/")[1] in os.listdir(outputFolder):
functional_annotation.tmhmm_all50(faa_dir,tmhmm_out)
if not singalP_out.split("/")[1] in os.listdir(outputFolder):
functional_annotation.signalP_all50(faa_dir,singalP_out)
if not piler_out.split("/")[1] in os.listdir(outputFolder):
functional_annotation.piler_all50(args.asem,piler_seq,piler_out)
if not eggNOG_out.split("/")[1] in os.listdir(outputFolder):
subprocess.call(["python2","utilities/eggnog.py",eggNOG_out])
except Exception as e:
print(e)
raise SystemExit("please fix the error. Pipeline is terminated")
""" EGGNOG mapping: from one output of EGGNOG, map to 50 output files for 50 faa files"""
try:
if not eggNOG_map_out.split("/")[1] in os.listdir(outputFolder):
names_file="allfiles.txt" #contains 50 id number of each genome
id_file_faa="title.txt" # contains header line in merge file that concatenate all 50 genomes faa/fna
mapping.main(faa_dir,names_file,id_file_faa,mergeFile_faa,eggNOG_out,clusterFolder,eggNOG_map_out)
except Exception as e:
print(e)
sys.exit(-1)
""" after annotation all 50 files, then convert to gff for each annotation tool"""
try:
gff_out_folder=args.gffFolder #folder containing gff files for each annotation tool
if not gff_out_folder in os.listdir():
subprocess.call(["mkdir",gff_out_folder])
tmhmm_gff,signalP_gff,piler_gff,card_gff,door2_gff,vfdb_gff,eggnog_gff="{}/tmhmm_to_gff".format(gff_out_folder),"{}/signalP_to_gff".format(gff_out_folder)\
,"{}/pilercf_to_gff".format(gff_out_folder),"{}/card_to_gff".format(gff_out_folder),\
"{}/door2_to_gff".format(gff_out_folder),"{}/vfdb_to_gff".format(gff_out_folder),"{}/eggnog_to_gff".format(gff_out_folder)
toGFF3.main(tmhmm_out,tmhmm_gff,singalP_out,signalP_gff,door2_out,door2_gff,vfdb_out,vfdb_gff,card_out,card_gff,piler_out,piler_gff,eggNOG_map_out,eggnog_gff,outputFolder,gff_out_folder)
except Exception as e:
print(e)
sys.exit(-1)
#if there is exist all gff folders for all tool, we can begin to merge
try:
all_gff_faa=args.mergeFolder
if all_gff_faa not in os.listdir():
mergeAll_to_gff.main(tmhmm_gff,signalP_gff,piler_gff,card_gff,door2_gff,vfdb_gff,eggnog_gff,all_gff_faa)
except Exception as e:
print(e)
sys.exit(-1)
if __name__ == "__main__":
main()
| compgenomics2019/Team1-FunctionalAnnotation | FA_pipeline_final.py | FA_pipeline_final.py | py | 5,822 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
... |
72692461223 | #!/usr/bin/env python3
import asyncio
import unittest
from click.testing import CliRunner
from base_cli import _handle_debug, async_main, main
class TestCLI(unittest.TestCase):
def test_async_main(self) -> None:
self.assertEqual(0, asyncio.run(async_main(True)))
def test_debug_output(self) -> None:
self.assertTrue(_handle_debug(None, None, True))
def test_help(self) -> None:
runner = CliRunner()
result = runner.invoke(main, ["--help"])
assert result.exit_code == 0
if __name__ == "__main__": # pragma: no cover
unittest.main()
| cooperlees/base_clis | py/tests.py | tests.py | py | 597 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "asyncio.run",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "base_cli.async_main",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "base_cli._hand... |
24682883982 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common import NoSuchElementException
from pages.utils import write_file
class BasePage:
def __init__(self, driver):
self.driver = driver
self.waiter = WebDriverWait(driver=driver, timeout=5)
def wait_until_displayed(self, by, xpath):
"""Waits until element displayed and return it, else raise an exception"""
return self.waiter.until(
method=expected_conditions.visibility_of_element_located(
(by, xpath)
)
)
def wait_until_clickable(self, by, xpath):
"""Waits until element clickable and return it, else raise an exception"""
return self.waiter.until(
method=expected_conditions.element_to_be_clickable((by, xpath)))
def is_element_exist(self, xpath):
"""Waits until element exist, else raise an exception"""
try:
self.driver.find_element(by=By.XPATH, value=xpath)
return True
except (TimeoutError, NoSuchElementException):
return False
def is_element_visible(self, xpath):
"""Waits until element exist, else raise an exception"""
try:
self.wait_until_displayed(by=By.XPATH, xpath=xpath)
return True
except (TimeoutError, NoSuchElementException):
return False
def fill_field(self, xpath, value):
"""Fill field using provided value"""
element = self.wait_until_clickable(by=By.XPATH, xpath=xpath)
element.clear()
element.send_keys(value)
def fill_field_with_submit(self, xpath, value):
"""Fill field using provided value"""
element = self.wait_until_clickable(by=By.XPATH, xpath=xpath)
element.clear()
element.send_keys(value)
element.submit()
def click(self, xpath):
"""Find and click on the element by providing xpath"""
# self.wait_until_displayed(by=By.XPATH, xpath=xpath).click()
self.driver.find_element(by=By.XPATH, value=xpath).click()
def move_mouse_on_element(self, xpath):
"""Moves mouse on provided element"""
try:
action = webdriver.ActionChains(self.driver)
element = self.driver.find_element(by=By.XPATH, value=xpath)
action.move_to_element(element)
action.perform()
except (BaseException, Exception) as ex:
write_file('move_mouse_on_element() Exception = ', ex)
def switch_to_alert(self, alert_accept_dismiss):
"""Moves focus to Alert window"""
self.waiter.until(
method=expected_conditions.alert_is_present()
)
if alert_accept_dismiss:
self.driver.switch_to.alert.accept()
else:
self.driver.switch_to.alert.dismiss()
def get_element_value(self, xpath):
"""Get element attribute value"""
if self.is_element_exist(xpath=xpath):
element = self.driver.find_element(By.XPATH, xpath).get_attribute('value')
return element
def compare_element_text(self, text, xpath):
"""Compare element's text with provided text """
element = self.wait_until_displayed(by=By.XPATH, xpath=xpath)
return element.text == text
| Flibustyer/TicketsBoard | pages/base.py | base.py | py | 3,434 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.sup... |
43763780283 | # -*-coding:utf8-*-
################################################################################
#
#
#
################################################################################
"""
模块用法说明:达人推荐详情页
Authors: Turinblueice
Date: 2016/9/10
"""
from base import base_frame_view
from util import log
from gui_widgets.basic_widgets import frame_layout
from gui_widgets.basic_widgets import image_view
from gui_widgets.basic_widgets import text_view
from gui_widgets.basic_widgets import linear_layout
from gui_widgets.basic_widgets import recycler_view
from gui_widgets.basic_widgets import radio_button
from appium.webdriver import WebElement
from appium.webdriver.common import touch_action
from selenium.webdriver.common.touch_actions import TouchActions
from activities import activities
from appium.webdriver.common.mobileby import MobileBy
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import time
class TalentRecommendActivity(base_frame_view.BaseFrameView):
"""
Summary:
达人推荐页面
Attributes:
"""
name = '.module.square.activity.EretarActivity' # 裁剪图片activity名称
def __init__(self, parent):
super(TalentRecommendActivity, self).__init__(parent)
# 等待初始化
self.wait_for_element_present(self.base_parent, id='com.jiuyan.infashion:id/login_tv_title')
self._scroll_view = recycler_view.RecyclerView(self.parent, id='com.jiuyan.infashion:id/square_rv_tag')
@property
def talent_recommend(self):
"""
Summary:
达人推荐按钮
"""
id_ = 'com.jiuyan.infashion:id/login_tv_title'
return text_view.TextView(self.parent, id=id_)
@property
def talent_apply(self):
"""
Summary:
申请达人按钮
"""
id_ = 'com.jiuyan.infashion:id/login_tv_title_right'
return text_view.TextView(self.parent, id=id_)
@property
def back_button(self):
"""
Summary:
返回按钮
"""
id_ = 'com.jiuyan.infashion:id/login_tv_title_left'
return text_view.TextView(self.parent, id=id_)
@property
def talent_list(self):
"""
Summary:
达人列表
:return:
"""
xpath_ = '//android.support.v7.widget.RecyclerView[@resource-id="com.jiuyan.infashion:id/square_rv_tag"]/' \
'android.widget.LinearLayout'
return TalentContainerList(self.base_parent, xpath=xpath_).item_list
@property
def category_list(self):
"""
Summary:
种类列表
:return:
"""
xpath_ = '//android.widget.ListView[@resource-id="com.jiuyan.infashion:id/square_rv_menu"]/' \
'android.widget.LinearLayout'
return TalentCategoryList(self.base_parent, xpath=xpath_).item_list
# **************************操作方法*****************************
def wait_for_talent(self, timeout=10):
"""
Summary:
显示等待达人加载完毕
:return:
"""
xpath_ = '//android.support.v7.widget.RecyclerView[@resource-id="com.jiuyan.infashion:id/square_rv_tag"]/' \
'android.widget.LinearLayout'
if self.wait_for_element_present(self.base_parent, timeout=timeout, xpath=xpath_):
log.logger.info("达人列表已记载")
return True
log.logger.error("达人列表加载失败")
return False
def tap_back_button(self):
"""
Summary:
点击返回按钮
"""
log.logger.info("开始点击返回按钮")
self.back_button.tap()
log.logger.info("完成返回按钮点击")
if self.wait_activity(activities.ActivityNames.SQUARE_CATEGORY, 10):
log.logger.info("成功返回到话题分类页面")
return True
log.logger.error("返回失败")
return False
class TalentContainer(base_frame_view.BaseFrameView):
"""
Summary:
达人推荐
"""
def __init__(self, parent, item=None, index=None, **kwargs):
super(TalentContainer, self).__init__(parent)
self._layout_view = item if isinstance(item, WebElement) else self.find_element(**kwargs)
self._index = index
self._xpath = '//android.support.v7.widget.RecyclerView[@resource-id="com.jiuyan.infashion:id/square_rv_tag"]/' \
'android.widget.LinearLayout[{}]'.format(self._index+1)
@property
def talent_name(self):
"""
Summary:
达人姓名
"""
id_ = 'com.jiuyan.infashion:id/square_tv_name'
return text_view.TextView(self._layout_view, id=id_).text
@property
def talent_avatar(self):
"""
Summary:
达人头像
"""
id_ = 'com.jiuyan.infashion:id/transition_avatar_id'
return image_view.ImageView(self._layout_view, id=id_)
@property
def follow_button(self):
"""
Summary:
关注按钮
"""
id_ = 'com.jiuyan.infashion:id/square_tv_attention'
return radio_button.RadioButton(self._layout_view, id=id_)
@property
def image_list(self):
"""
Summary:
图片列表
"""
return image_view.ImageViewList(self._layout_view, id='com.jiuyan.infashion:id/login_iv_pic').image_list
# ********************操作方法*************************
def tap_avatar(self):
"""
Summary:
点击头像
"""
curr_name = self.talent_name
log.logger.info("开始点击\"{}\"的头像".format(curr_name))
self.talent_avatar.tap()
log.logger.info("点击完毕")
if self.base_parent.wait_activity(activities.ActivityNames.DIARY_INFO, 10):
log.logger.info("成功进入好友in记页面")
return True
log.logger.error("进入好友in记页面失败")
return False
def tap_image(self, index):
"""
点击图片
Args:
index:
图片序号
Returns:
"""
log.logger.info("点击第{}张".format(index+1))
self.image_list[index].tap()
log.logger.info("完成点击")
if self.base_parent.wait_activity(activities.ActivityNames.FRIEND_PHOTO_DETAIL, 10):
log.logger.info("成功进入好友照片页面")
return True
log.logger.error("进入好友照片页面失败")
return False
def tap_follow_button(self):
"""
Summary:
点击关注按钮
"""
log.logger.info("开始点击关注")
self.follow_button.tap()
time.sleep(2)
log.logger.info("点击完毕")
try:
WebDriverWait(self.base_parent, 10).until(
EC.element_located_to_be_selected(
(MobileBy.XPATH, self._xpath+'/android.widget.RelativeLayout[1]/android.widget.RadioButton[1]')
)
)
return True
except:
return False
class TalentContainerList(base_frame_view.BaseFrameView):
def __init__(self, parent, **kwargs):
super(TalentContainerList, self).__init__(parent)
self.__list = self.find_elements(**kwargs)
@property
def item_list(self):
if self.__list:
return [TalentContainer(item.parent, item, index) for index, item in enumerate(self.__list)]
return None
class TalentCategory(base_frame_view.BaseFrameView):
"""
Summary:
达人分类
"""
def __init__(self, parent, item=None, **kwargs):
super(TalentCategory, self).__init__(parent)
self._layout_view = item if isinstance(item, WebElement) else self.find_element(**kwargs)
@property
def title(self):
"""
Summary:
达人分类的名称
"""
id_ = 'com.jiuyan.infashion:id/square_tv_tag_menu'
return text_view.TextView(self._layout_view, id=id_).text
# ********************操作方法*************************
def tap(self):
"""
Summary:
点击分类
"""
title_ = self.title
log.logger.info("开始点击\"{}\"".format(title_))
self._layout_view.click()
xpath_ = '//android.support.v7.widget.RecyclerView[@resource-id="com.jiuyan.infashion:id/square_rv_tag"]/' \
'android.widget.LinearLayout'
if self.wait_for_element_present(self.base_parent, xpath=xpath_):
# 点击左侧不同的达人类别后,等待右侧达人初始化加载
log.logger.info("\"{}\"的达人已加载成功".format(title_))
return True
log.logger.error("达人列表初始化失败")
return False
class TalentCategoryList(base_frame_view.BaseFrameView):
def __init__(self, parent, **kwargs):
super(TalentCategoryList, self).__init__(parent)
self.__list = self.find_elements(**kwargs)
@property
def item_list(self):
if self.__list:
return [TalentCategory(item.parent, item) for item in self.__list]
return None
| turinblueice/androidUIAutoTest | activities/discover_details_activities/talent_recommend_activity.py | talent_recommend_activity.py | py | 9,563 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "base.base_frame_view.BaseFrameView",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "base.base_frame_view",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "gui_widgets.basic_widgets.recycler_view.RecyclerView",
"line_number": 54,
"u... |
11687518350 | import numpy as np
import matplotlib
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.metrics import structural_similarity as ssim
from skimage.metrics import mean_squared_error
from skimage.transform import rescale, resize, downscale_local_mean
from skimage import io
im1 = io.imread('frac3_run2_sp5_map16.pgm')
im2 = io.imread('frac2_run4_sp5_map2.pgm')
im3 = io.imread('frac1A_run2_map5.pgm')
badim = io.imread('frac9A_run1_map2.pgm')
imfull = io.imread('frac3A_run2_map8.pgm')
plt.imshow(im1, plt.cm.gray)
plt.savefig("mygraph.png")
rows, cols = im1.shape
print("Size of im1: ",rows,cols)
print("Size of im2: ", im2.shape)
print("Size of im3: ", im3.shape)
img1 = resize(im1, (520, 500) )
img2 = resize(im2, (520, 500) )
img3 = resize(im3, (520, 500) )
imgFull = resize(imfull, (520, 500) )
badimg = resize(badim, (520, 500) )
plt.imshow(img1, plt.cm.gray)
plt.savefig("mygraph1.png")
plt.imshow(img2, plt.cm.gray)
plt.savefig("mygraph2.png")
plt.imshow(img3, plt.cm.gray)
plt.savefig("mygraph3.png")
plt.imshow(imgFull, plt.cm.gray)
plt.savefig("mygraph.png")
mse_11 = mean_squared_error(img1,img1)
mse_12 = mean_squared_error(img1,img2)
mse_22 = mean_squared_error(img2,img2)
mse_23 = mean_squared_error(img2, img3)
mse_13 = mean_squared_error(img1, img3)
print("MSE Error 11: %f, 12: %f, 22: %f, 23: %f, 13: %f"%(mse_11, mse_12, mse_22, mse_23, mse_13))
ssim11 = ssim(img1, img1, data_range=img1.max()-img1.min())
ssim22 = ssim(img2, img2, data_range=img2.max()-img2.min())
ssim12 = ssim(img1, img2, data_range=img1.max()-img1.min())
ssim23 = ssim(img2, img3, data_range=img1.max()-img1.min())
ssim13 = ssim(img1, img3, data_range=img1.max()-img1.min())
print("SSIM 12: ", ssim12, "ssim11 %f, ssim22 %f, ssim23 %f, ssim13: %f"%(ssim11, ssim22, ssim23, ssim13) )
print("Comparing with FULL map:")
print("MSE 1full: %f, 2full: %f, 3full: %f, badimg-full: %f"%(mean_squared_error(img1, imgFull) ,mean_squared_error(img2, imgFull) ,mean_squared_error(img3, imgFull), mean_squared_error(badimg, imgFull) ) )
print("SSIM with full: 1f: %f, 2f: %f, 3f: %f, badimg-full %f"%( ssim(img1, imgFull, data_range=imgFull.max()-imgFull.min()), ssim(img2, imgFull, data_range=imgFull.max()-imgFull.min()), ssim(img3, imgFull, data_range=imgFull.max()-imgFull.min()), ssim(badimg, imgFull, data_range=imgFull.max()-imgFull.min()) ) )
| aditi741997/robotics_project | plot_nav2d_mapQuality.py | plot_nav2d_mapQuality.py | py | 2,399 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "skimage.io.imread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "skimage.io",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "skimage.io.imread",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "skimage.io",
"line... |
9480946225 | import random
from collections import defaultdict
import torch
from ltp import LTP
from .base_func import BaseFunc
class NerFunc(BaseFunc):
def __init__(self, config):
super(NerFunc, self).__init__(config)
self.augment_num = config.ner_func.augment_num
self.combine_dict = self.load_ner_files()
self.model = LTP(config.ner_func.ner_tool_name)
if torch.cuda.is_available():
self.model.to("cuda")
@staticmethod
def load_ner_files():
combine_dict = defaultdict(set)
# Nh file
for line in open("files/ner/people_name.txt", "r", encoding="utf-8"):
combine_dict["Nh"].add(line.strip())
# Ns file
for line in open("files/ner/place_name.txt", "r", encoding="utf-8"):
combine_dict["Ns"].add(line.strip())
# Ni file
for line in open("files/ner/company_name.txt", "r", encoding="utf-8"):
combine_dict["Ni"].add(line.strip())
return combine_dict
def process(self, sentence):
final_augment_sentence = []
seg_list = self.cut_words(sentence)
result = self.model.pipeline(seg_list, tasks=["ner"])
if len(result.ner) == 0:
return final_augment_sentence
for _ in range(self.augment_num):
n, word = random.choice(result.ner)
if n in self.combine_dict.keys():
new_word = random.choice(list(self.combine_dict[n]))
old_index = seg_list.index(word)
seg_list[old_index] = new_word
new_sentence = ''.join(seg_list)
final_augment_sentence.append(new_sentence)
return final_augment_sentence
| shawn0wang/Text_Augment | function/ner_func.py | ner_func.py | py | 1,703 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "base_func.BaseFunc",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "ltp.LTP",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"... |
10392845560 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import os
import copy
import time
import pickle
import numpy as np
from tqdm import tqdm
import yaml
import argparse
import torch
from tensorboardX import SummaryWriter
# from src.options import args_parser
from update import LocalUpdate
from utils import get_dataset, average_weights, exp_details
from deepctr_torch.inputs import get_feature_names
from sklearn.metrics import log_loss, roc_auc_score
from deepctr_torch.models import DeepFM
if __name__ == '__main__':
start_time = time.time()
# define paths
path_project = os.path.abspath('..')
logger = SummaryWriter('../logs')
parser = argparse.ArgumentParser(description='PPDL')
parser.add_argument('--params', dest='params', default='utils/params.yaml')
params = parser.parse_args()
with open(f'../{params.params}', 'r') as f:
args = yaml.load(f)
exp_details(args)
exit(0)
if args['gpu']:
torch.cuda.set_device(args['gpu'])
device = 'cuda' if args['gpu'] else 'cpu'
# load dataset and user groups # prepare feature for model
(train_dataset, test_dataset, user_groups),fixlen_feature_columns = get_dataset(args)
# count #unique features for each sparse field,and record dense feature field name
dnn_feature_columns = fixlen_feature_columns
linear_feature_columns = fixlen_feature_columns
feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
# BUILD MODEL
if args['model'].lower() == 'deepfm':
# 4.Define Model,train,predict and evaluate
global_model = DeepFM(linear_feature_columns, dnn_feature_columns, task='binary')
global_model.compile("adam", "binary_crossentropy",
metrics=['binary_crossentropy'], )
else:
exit('Error: unrecognized model')
# # Set the model to train and send it to device.
# global_model.to(device)
# global_model.train() # torch claim
# print(global_model)
# copy weights
global_weights = global_model.state_dict()
# print(global_weights.keys())
# Training
# train_loss, train_accuracy = [], []
# val_acc_list, net_list = [], []
# cv_loss, cv_acc = [], []
# print_every = 2
# val_loss_pre, counter = 0, 0
# temp test data
test_model_input = {name: test_dataset[name] for name in feature_names}
# for comparsion
# best_model = copy.deepcopy(global_model)
min_loss = 1000.0
max_auc = -1.0
for epoch in tqdm(range(args['epochs'])):
local_weights= [] #, local_losses , []
print(f'\n | Global Training Round : {epoch+1} |\n')
# frac default 0.1; num_users default 100
m = max(int(args['frac'] * args['num_users']), 1)
# 100 randomly select 10 as training client
idxs_users = np.random.choice(range(args['num_users']), m, replace=False)
for idx in idxs_users: # 10 random users
local_model = LocalUpdate(args=args, dataset=train_dataset,
idxs=user_groups[idx], logger=logger)
w = local_model.update_weights(
model=copy.deepcopy(global_model), features=feature_names)
local_weights.append(copy.deepcopy(w))
# local_losses.append(copy.deepcopy(loss))
# update global weights
global_weights = average_weights(local_weights)
global_model.load_state_dict(global_weights)
# temp test
pred_ans = global_model.predict(test_model_input, batch_size=256)
logloss = log_loss(test_dataset['label'].values, pred_ans)
aucscore = roc_auc_score(test_dataset['label'].values, pred_ans)
print("test LogLoss", round(logloss, 4))
print("test AUC", round(aucscore, 4))
if aucscore > max_auc:
# best_model = copy.deepcopy(global_model)
min_loss = logloss
max_auc = aucscore
print("|---- Min log loss: {:.4f}%".format(min_loss))
print("|---- Best AUC: {:.4f}%".format(max_auc))
print("test done")
| gongzhimin/Trojan-Attack-Against-Structural-Data-in-Federated-Learning | src/federated_main_nonattack.py | federated_main_nonattack.py | py | 4,103 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "tensorboardX.SummaryWriter",
... |
12210284781 | import openpyxl
from openpyxl.utils import cell
def read(config):
try:
workbook = openpyxl.load_workbook(config.source_path, data_only=True)
datasets = {}
for source_tab_name in config.source_tabs:
datasets[source_tab_name] = extract_dataset(workbook, source_tab_name, config)
return datasets
finally:
workbook.close()
def extract_dataset(workbook, source_tab_name, config):
worksheet = workbook.get_sheet_by_name(source_tab_name)
last_data_row = find_last_data_row(worksheet, config.start_search_row, config.column_range_start)
copy_range_start = "{}{}".format(config.column_range_start, config.start_search_row)
copy_range_end = "{}{}".format(config.column_range_stop, last_data_row)
return worksheet[copy_range_start:copy_range_end]
def find_last_data_row(worksheet, start_row, column_range_start):
column_index = cell.column_index_from_string(column_range_start)
for current_row in range(start_row, worksheet.max_row):
val = worksheet.cell(row = current_row, column = column_index).value
if worksheet.cell(row = current_row, column = column_index).value == None:
return current_row - 1
return worksheet.max_row | mcweglowski/codegens | codegen/excel/excel_reader.py | excel_reader.py | py | 1,239 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "openpyxl.utils.cell.column_index_from_string",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "openpyxl.utils.cell",
"line_number": 22,
"usage_type": "name"
}
] |
3184473921 | # -*- coding: utf-8 -*-
import yaml
from . import instructions
from . import value_containers
from .exceptions import ParserException
from .method import Method
def _process_func(method, func):
if not func or not isinstance(func, dict):
raise ParserException('"func" not defined')
method.function_name = func['name']
method.return_type = value_containers.types[func['type'].lower()]()
_process_vars(method, func['args'], inc_arg_count=True)
def _add_label(method, label, index):
if not isinstance(label, str):
raise ParserException('label %s needs to be a string' % label)
if label is None:
raise ParserException('every local variable needs a label')
if label in method.labels:
raise ParserException('labels has to be unique: duplicate %s' % label)
method.labels[label] = index
def _process_vars(method, args, inc_arg_count=False):
for arg in args:
t = arg['type'].lower()
if inc_arg_count:
method.argument_count += 1
method.variables.append(value_containers.types[t]())
l = arg.get('label', None)
_add_label(method, l, len(method.variables) - 1)
def _process_ins(method, ins):
# pass1: collect labels
offset = 0
label = None
label_current = None
for i in ins:
if isinstance(i, dict):
if not len(i) == 1:
raise ParserException('bad syntax for data %s' % i)
label_current = i.get('label', None)
if label and label_current:
raise ParserException('label cannot follow label: %s, %s' % (label, label_current))
label = label_current
else:
label = None
label_current = None
if label:
_add_label(method, label, offset)
else:
offset += 1
else:
if label:
raise ParserException('label cannot be as last instruction %s' % i)
# pass2: use labels and collect instructions
offset = 0
for i in ins:
if isinstance(i, dict) and i.get('label', None):
continue
else:
offset += 1
if isinstance(i, str):
inst = instructions.keywords[i.lower()]
if not issubclass(inst, instructions.InsNoArgument):
raise ParserException('instruction %s requires argument' % i)
method.code.append(inst())
elif isinstance(i, dict):
kw, value = i.popitem()
inst = instructions.keywords[kw.lower()]
if issubclass(inst, instructions.InsNoArgument):
raise ParserException('instruction %s takes no argument' % i)
if issubclass(inst, instructions.InsArgILabel):
if isinstance(value, str):
try:
value = method.labels[value]
except KeyError:
raise ParserException('label %s is not defined' % value)
else:
raise ParserException('instruction %s requires label as argument' % i)
elif issubclass(inst, instructions.InsArgInteger):
if value != int(value):
raise ParserException('instruction %s requires integer argument' % i)
elif issubclass(inst, instructions.InsArgFloat):
if value != float(value):
raise ParserException('instruction %s requires float argument' % i)
method.code.append(inst(instructions.contain_value(inst, value)))
else:
raise ParserException('unknown instruction format %s' % i)
def process_yaml(structure):
m = Method()
_process_func(m, structure.get('func', {}))
_process_vars(m, structure.get('lvars', []))
_process_ins(m, structure.get('ins', []))
return m
def parse_string(data):
structure = yaml.safe_load(data)
return process_yaml(structure)
def parse_file(fname):
with open(fname, 'r') as f:
return parse_string(f)
| lukleh/Tiny-Stackbased-Virtual-Machine-in-Python | TSBVMIP/code_parser.py | code_parser.py | py | 4,024 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "exceptions.ParserException",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "method.function_name",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "method.return_type",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_... |
73192051625 | import argparse
import os
from distutils.util import strtobool
import random
import time
import numpy as np
import torch
import gym
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
from stable_baselines3.common.buffers import ReplayBuffer
def parse_args():
parser = argparse.ArgumentParser()
# Experiment settings
parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"),
help="the name of this experiment")
parser.add_argument("--seed", type=int, default=1,
help="seed of the experiment")
parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="if toggled, `torch.backends.cudnn.deterministic=False`")
parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="if toggled, cuda will be enabled by default")
parser.add_argument("--track", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True,
help="if toggled, this experiment will be tracked with Weights and Biases")
parser.add_argument("--wandb-project-name", type=str, default="cfrl",
help="the wandb's project name")
parser.add_argument("--wandb-entity", type=str, default=None,
help="the entity (team) of wandb's project")
parser.add_argument("--capture-video", type=lambda x: bool(strtobool(x)), default=False, nargs="?", const=True,
help="whether to capture videos of the agent performances (check out `videos` folder)")
parser.add_argument("--env-id", type=str, default="CartPole-v1",
help="the id of the environment")
# Hyperparameters
parser.add_argument("--v-max", type=float, default=100,
help="the number of atoms")
parser.add_argument("--v-min", type=float, default=-100,
help="the number of atoms")
parser.add_argument("--n-atoms", type=int, default=101,
help="the number of atoms")
parser.add_argument("--replay-memory-size", type=int, default=1000000,
help="SGD updates are sampled from this number of most recent frames")
parser.add_argument("--agent-history-length", type=int, default=4,
help="The number of most recent frames experienced by the agent that are given as input to the Q network")
parser.add_argument("--action-repeat", type=int, default=4,
help="repeate each action selected by the agent this many times")
parser.add_argument("--minibatch-size", type=int, default=128,
help="the batch size of sample from the reply memory")
parser.add_argument("--update-frequency", type=int, default=4,
help="the number of actions selected by the agent between successive SGD updates")
parser.add_argument("--gamma", type=float, default=0.99,
help="the discount factor gamma")
parser.add_argument("--learning-rate", type=float, default=2.5e-4,
help="the learning rate of the optimizer")
parser.add_argument("--target-network-frequency", type=int, default=10000,
help="the timesteps it takes to update the target network")
parser.add_argument("--initial-exploration", type=float, default=1,
help="the starting epsilon for exploration")
parser.add_argument("--final-exploration", type=float, default=0.1,
help="the ending epsilon for exploration")
parser.add_argument("--final-exploration-frame", type=int, default=1000000,
help="the number of frames over which the initial value of epsilon is linearly annealed to its final value")
parser.add_argument("--replay-start-size", type=int, default=50000,
help="A uniform random policy is run for this number of frames before learning starts and the resulting experience is used to populate the replay memory")
parser.add_argument("--noop-max", type=int, default=30,
help="maximum number of doing nothing action to be performed by the agent at the start of an episode")
parser.add_argument("--total-timesteps", type=int, default=10000000,
help="total timesteps of the experiments")
args = parser.parse_args()
# fmt: on
return args
def make_env(env_id, seed, idx, capture_video, run_name):
def thunk():
env = gym.make(env_id)
env = gym.wrappers.RecordEpisodeStatistics(env)
if capture_video:
if idx == 0:
env = gym.wrappers.RecordVideo(env, f"videos/{run_name}")
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
return env
return thunk
class QNetwork(nn.Module):
def __init__(self, env, n_atoms=101, v_min=-100, v_max=100):
super().__init__()
self.env = env
self.n_atoms = n_atoms
self.register_buffer("atoms", torch.linspace(
v_min, v_max, steps=n_atoms))
self.n = env.single_action_space.n
self.network = nn.Sequential(
nn.Linear(np.array(env.single_observation_space.shape).prod(), 120),
nn.ReLU(),
nn.Linear(120, 84),
nn.ReLU(),
nn.Linear(84, self.n * n_atoms),
)
def forward(self, x):
return self.network(x)
def get_action(self, x, action=None):
logits = self.network(x)
# probability mass function for each action
pmfs = torch.softmax(logits.view(len(x), self.n, self.n_atoms), dim=2)
q_values = (pmfs * self.atoms).sum(2)
if action is None:
action = torch.argmax(q_values, 1)
return action, pmfs[torch.arange(len(x)), action]
def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
slope = (end_e - start_e) / duration
return max(slope * t + start_e, end_e)
if __name__ == "__main__":
args = parse_args()
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
if args.track:
import wandb
wandb.init(
project=args.wandb_project_name,
entity=args.wandb_entity,
sync_tensorboard=True,
config=vars(args),
name=run_name,
monitor_gym=True,
save_code=True,
)
writer = SummaryWriter(f"runs/{run_name}")
writer.add_text(
"hyperparameters",
"|param|value|\n|-|-|\n%s" % (
"\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
)
# TRY NOT TO MODIFY: seeding
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
device = torch.device(
"cuda" if torch.cuda.is_available() and args.cuda else "cpu")
# env setup
envs = gym.vector.SyncVectorEnv(
[make_env(args.env_id, args.seed, 0, args.capture_video, run_name)])
assert isinstance(envs.single_action_space,
gym.spaces.Discrete), "only discrete action space is supported"
q_network = QNetwork(envs, n_atoms=args.n_atoms,
v_min=args.v_min, v_max=args.v_max).to(device)
optimizer = optim.Adam(q_network.parameters(
), lr=args.learning_rate, eps=0.01 / args.minibatch_size)
target_network = QNetwork(envs).to(device)
target_network.load_state_dict(q_network.state_dict())
rb = ReplayBuffer(
args.replay_memory_size,
envs.single_observation_space,
envs.single_action_space,
device,
optimize_memory_usage=True,
handle_timeout_termination=True,
)
start_time = time.time()
# TRY NOT TO MODIFY: start the game
obs = envs.reset()
for global_step in range(args.total_timesteps):
# ALGO LOGIC: put action logic here
epsilon = linear_schedule(
args.initial_exploration, args.final_exploration, args.final_exploration_frame, global_step)
if random.random() < epsilon:
actions = np.array([envs.single_action_space.sample()
for _ in range(envs.num_envs)])
else:
actions, pmf = q_network.get_action(torch.Tensor(obs).to(device))
actions = actions.cpu().numpy()
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, rewards, dones, infos = envs.step(actions)
# TRY NOT TO MODIFY: record rewards for plotting purposes
for info in infos:
if "episode" in info.keys():
print(
f"global_step={global_step}, episodic_return={info['episode']['r']}")
writer.add_scalar("charts/episodic_return",
info["episode"]["r"], global_step)
writer.add_scalar("charts/episodic_length",
info["episode"]["l"], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
break
# TRY NOT TO MODIFY: save data to reply buffer; handle `terminal_observation`
real_next_obs = next_obs.copy()
for idx, d in enumerate(dones):
if d:
real_next_obs[idx] = infos[idx]["terminal_observation"]
rb.add(obs, real_next_obs, actions, rewards, dones, infos)
# TRY NOT TO MODIFY: CRUCIAL step easy to overlook
obs = next_obs
# ALGO LOGIC: training.
if global_step > args.replay_start_size and global_step % args.update_frequency == 0:
data = rb.sample(args.minibatch_size)
with torch.no_grad():
_, next_pmfs = target_network.get_action(
data.next_observations)
next_atoms = data.rewards + args.gamma * \
target_network.atoms * (1 - data.dones)
# projection
delta_z = target_network.atoms[1] - target_network.atoms[0]
tz = next_atoms.clamp(args.v_min, args.v_max)
b = (tz - args.v_min) / delta_z
l = b.floor().clamp(0, args.n_atoms - 1)
u = b.ceil().clamp(0, args.n_atoms - 1)
# (l == u).float() handles the case where bj is exactly an integer
# example bj = 1, then the upper ceiling should be uj= 2, and lj= 1
d_m_l = (u + (l == u).float() - b) * next_pmfs
d_m_u = (b - l) * next_pmfs
target_pmfs = torch.zeros_like(next_pmfs)
for i in range(target_pmfs.size(0)):
target_pmfs[i].index_add_(0, l[i].long(), d_m_l[i])
target_pmfs[i].index_add_(0, u[i].long(), d_m_u[i])
_, old_pmfs = q_network.get_action(
data.observations, data.actions.flatten())
loss = (-(target_pmfs * old_pmfs.clamp(min=1e-5,
max=1 - 1e-5).log()).sum(-1)).mean()
if global_step % 100 == 0:
writer.add_scalar("losses/td_loss", loss, global_step)
old_val = (old_pmfs * q_network.atoms).sum(1)
writer.add_scalar("losses/q_values",
old_val.mean().item(), global_step)
print("SPS:", int(global_step / (time.time() - start_time)))
writer.add_scalar(
"charts/SPS", int(global_step / (time.time() - start_time)), global_step)
# optimize the model
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update the target network
if global_step % args.target_network_frequency == 0:
target_network.load_state_dict(q_network.state_dict())
envs.close()
writer.close()
| ChufanSuki/cfrl | examples/c51.py | c51.py | py | 12,157 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "distutils.util.... |
417669106 | from array import *
import os
from PIL import Image
Buttons=[0x300fd40bf,0x300fdc03f,0x300fd20df,0x300fda05f,0x300fd609f,0x300fde01f,0x300fd10ef,0x300fd906f,0x300fd50af,0x300fd30cf,0x300fdb24d,0x300fd728d,0x300fdf20d,0x300fd8877,0x300fd48b7]
ButtonsNames=["One","Two","Three","Four","Five","Six","Seven","Eight","Nine","Zero","MENU","TITLE","L/R","Left_key","Right_key"]
distantToFolderMappings = {
12: "C:/Users/rakib/OneDrive/Desktop/Optoshi/12 Feet",
10: "C:/Users/rakib/OneDrive/Desktop/Optoshi/10 Feet",
8: "C:/Users/rakib/OneDrive/Desktop/Optoshi/8 Feet"
}
menuMapping = {
"One": "e",
"Two": "pediatric"
}
distanceMapping = {
"Eight": 8,
"Ten": 10
}
def convertHex(binaryValue):
tmpB2 =int(str(binaryValue),2)
return hex(tmpB2)
class Folder:
menuId = ""
images = []
name = ""
currentIndex = 0
def __init__(self, distance, menuId):
self.name = menuMapping[menuId]
distancePath = distantToFolderMappings[distance]
actualFolderPath = distancePath+"/"+self.name
folderImages = self.get_files_in_directory(actualFolderPath)
self.images = [file for file in folderImages if file.endswith(".png") or file.endswith(".jpg")]
self.currentIndex = 0
self.menuId = menuId
self.openImage()
#print(self.images)
def openImage(self):
print("Opening from Folder "+self.name+" with index "+str(self.currentIndex))
img = Image.open(self.images[self.currentIndex])
self.currentImage = img
img.show()
def moveIndexToLeft(self):
if self.currentIndex-1<0:
self.currentIndex = len(self.images)-1
else:
self.currentIndex = self.currentIndex-1
print("Processing done")
def moveIndexToRight(self):
if self.currentIndex+1<len(self.images):
self.currentIndex = self.currentIndex + 1
else:
self.currentIndex = 0
def processIrSignal(self, signal):
status = 1
if signal == "Left":
self.moveIndexToLeft()
print(signal + " Pressed on " + self.name)
elif signal == "Right":
self.moveIndexToRight()
print(signal + " Pressed on " + self.name)
elif signal == self.findKey(menuMapping, self.name):
print("Menu button pressed. Opening Next image")
self.moveIndexToRight()
else:
status = -1
print("Command not found inside class and status is -1")
if status == 1:
self.openImage()
return status
def getImagePath(self, distance):
return distantToFolderMappings[distance]+self.name
def get_files_in_directory(self, directory):
return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
def findKey(self, dictionary, value):
for dict_key, dict_value in dictionary.items():
if dict_value == value:
return dict_key
return None
currentFolder = None
def createFolder(distance, value):
if value in menuMapping:
return Folder(distance, value)
else:
return None
def process_signal(distance, value):
selButton = value
global currentFolder
if currentFolder is not None:
status = currentFolder.processIrSignal(selButton)
if status == -1:
print("Status found -1 from processSignal")
newFolder = createFolder(distance, value)
if newFolder is not None:
currentFolder = newFolder
else:
currentFolder = createFolder(distance, value)
if currentFolder is not None:
print(menuMapping[value]+" Menu Selected")
else:
print("Command not found")
while True:
distance = 10
while True:
inData = input("Enter a string: ")
if inData == "Exit" and currentFolder is None:
print("Exiting to select distance")
currentFolder = None
break
elif inData == "Exit" and currentFolder is not None:
print("Exiting to Select folder")
currentFolder = None
elif inData in distanceMapping:
distance = distanceMapping[inData]
print(str(distance) + " Selected")
currentFolder = None
else:
process_signal(distance, inData)
print("Action taken with "+str(distance)+" feet distance")
| Rakibuz/Robotics_HCI | Raspberry Pi/hypothetical_final_0.1.py | hypothetical_final_0.1.py | py | 4,522 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 78... |
9416483162 | # Cemantix game solver
import logging
import os
import yaml
from src import *
os.chdir(os.path.abspath(os.path.dirname(__file__)))
with open("config.yaml", "r") as config_file:
config = yaml.load(config_file, Loader=yaml.FullLoader)
logging.basicConfig(filename=f"./logs/cemantix_{dt.datetime.now().strftime(format='%Y-%m-%d_%Hh%M')}.log", level=logging.INFO)
def main():
model = load_model(embedding=config['word2vec'])
game = CemantixGame(executable_path="./artifacts/chromedriver.exe",game_url=config["game_url"])
if config["agent_type"] == "bandit":
agent = CemantixBandit(model=model, **config["agent_params"])
elif config["agent_type"] == "gangster":
agent = CemantixGangster(model=model, **config["agent_params"])
else:
raise ValueError("Unknown agent_type")
game.play_strategy(agent, max_iter=config["max_iter"])
game.save_result()
game.end()
main()
| CorentinMary/cemantix | main.py | main.py | py | 922 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number... |
12530669262 | import torch
from torch import nn
from .strategy import Strategy
from .utils import ner_predict, re_predict
class EntropySampling(Strategy):
def __init__(self, annotator_config_name, pool_size, setting: str='knn', engine: str='gpt-35-turbo-0301',
reduction: str='mean'):
super().__init__(annotator_config_name, pool_size, setting, engine)
assert reduction in ['mean', 'sum', 'max']
self.reduction = reduction
def query(self, args, k: int, model: nn.Module, features):
pool_indices = self._get_pool_indices()
pool_features = [features[i] for i in pool_indices]
if self.task_type == 'ner':
pred_logits = ner_predict(args, pool_features, model)
uncertainties = []
for logit in pred_logits:
prob = torch.softmax(logit, dim=-1)
entropy = torch.special.entr(prob).sum(dim=-1) # entropy over each token
if self.reduction == 'mean':
uncertainties.append(entropy.mean())
elif self.reduction == 'sum':
uncertainties.append(entropy.sum())
elif self.reduction == 'max':
uncertainties.append(entropy.max())
uncertainties = torch.stack(uncertainties)
elif self.task_type == 're':
pred_logits = re_predict(args, pool_features, model)
prob = torch.softmax(pred_logits, dim=-1)
entropy = torch.special.entr(prob).sum(dim=-1)
uncertainties = entropy
else:
raise ValueError('tbd.')
lab_indices = torch.topk(uncertainties, k=k)[1]
lab_indices = [pool_indices[i] for i in lab_indices]
return lab_indices | ridiculouz/LLMaAA | src/active_learning/entropy_sampling.py | entropy_sampling.py | py | 1,745 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "strategy.Strategy",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "utils.ner_predict",
... |
23971763808 | from replit import db
from util import str_to_arr
from person import Person
def matches_to_string(matches):
string = "List of matches:\n"
for match in matches:
string += match + "\n"
return string
async def make_connections(message, person_calling):
matches = []
person_1 = Person.str_to_person(db[f"{person_calling}"])
person_1_interests = str_to_arr(person_1.interests)
person_1_courses = str_to_arr(person_1.courses)
for person in db.keys():
similarities = 0
if str(person) == str(person_calling):
continue
person_2 = Person.str_to_person(db[f"{person}"])
person_2_interests = str_to_arr(person_2.interests)
person_2_courses = str_to_arr(person_2.courses)
if person_1.year == person_2.year:
similarities += 1
for interest in person_1_interests:
if interest in person_2_interests:
similarities += 1
for course in person_1_courses:
if course in person_2_courses:
similarities += 1
if int(similarities/2) >= person_1.fit:
matches.append(person_2.user)
await message.author.send(matches_to_string(matches))
return
| Sharjeeliv/monty-bot | connect.py | connect.py | py | 1,083 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "person.Person.str_to_person",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "person.Person",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "replit.db",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "util.str_to_arr",... |
40222438483 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
'''
Ideje kako poboljšati kod:
1. Kreirati novu klasu satelit koja je pod gravitacijskim utjecajem ostalih planeta ali ona ne utječe na njih
2. Ta klasa ima metodu boost koja ju odjednom ubrza
3. Pogledati i probati dolje zakomentiranu metodu za optimizaciju
4. Dodati tragove orbita
'''
class Body(object):
def __init__(self, name, colour, mass, init_position, init_velocity):
self.name = name
self.colour = colour
self.mass = mass
self.position = init_position
self.velocity = init_velocity
self.current_acceleration = 0 # TODO: think if better initial acceleration can be found
self.previous_acceleration = 0
def update_position(self, timestep):
"""Updates the position of the body"""
new_position = self.position + self.velocity * timestep + 1 / 6 * (4 * self.current_acceleration
- self.previous_acceleration) * timestep ** 2
self.position = new_position
def update_velocity(self, new_acceleration, timestep):
"""New acceleration is the acceleration in the next timestep. Updates the body velocity"""
new_velocity = self.velocity + 1 / 6 * (2 * new_acceleration + 5 * self.current_acceleration
- self.previous_acceleration) * timestep
self.velocity = new_velocity
def calc_KE(self):
"""Returns the kinetic energy of the body"""
KE = 1 / 2 * self.mass * np.linalg.norm(self.velocity) ** 2
return KE
class SmallBody(Body):
def __init__(self, name, colour, mass, init_position, init_velocity, bodyOfInterest, bodyOfInterestPosition):
super().__init__(name, colour, mass, init_position, init_velocity)
self.closestDistance = np.linalg.norm(bodyOfInterestPosition - init_position)
self.timeToBodyOfInterest = 0
self.bodyOfInterest = bodyOfInterest
def distance_from_body(body1, body2):
distancevec = body1.position - body2.position
distance = np.linalg.norm(distancevec)
return distance
class Simulation(object):
def __init__(self, timestep, num_iterations):
self.timestep = timestep
self.num_iterations = num_iterations
self.patches = []
self.timeElapsed = 0
def read_input_data(self, file):
"""Opens the specific file and reads the input data. File has to be in a specific format"""
df = open(file, 'r')
data = df.readlines()
data.pop(0) # pop the first two lines of the file, they describe how the file is to be formated
data.pop(0)
bodies = []
smallBodies = []
for line in data:
line = line.strip()
line = line.split(',')
line[3] = line[3].split(';')
line[4] = line[4].split(';')
name, color, mass, init_position, init_velocity = line[0].strip(), line[1].strip(), float(line[2]), \
np.array([float(line[3][0].strip()),
float(line[3][1].strip())]), \
np.array([float(line[4][0].strip()),
float(line[4][1].strip())])
if line[-1].strip() == 'Body':
bodies.append(Body(name, color, mass, init_position, init_velocity))
elif line[-1].strip() == 'SmallBody':
bodyOfInterest = line[-2].strip()
for body in bodies:
if body.name == bodyOfInterest:
bodyOfInterestPosition = body.position
smallBodies.append(SmallBody(name, color, mass, init_position, init_velocity, bodyOfInterest,
bodyOfInterestPosition))
self.body_list = bodies
self.smallBodies = smallBodies
for body in self.body_list:
# create patches for each body of the system
xpos = body.position[0]
ypos = body.position[1]
if body.name == 'Sun':
self.patches.append(plt.Circle((xpos, ypos), radius=10000000000, color=body.colour, animated=True))
else:
for i in range(10):
self.patches.append(
plt.Circle((xpos, ypos), radius=(5000000000 / (10 - i)), color=body.colour, animated=True))
for smallBody in self.smallBodies:
xpos = smallBody.position[0]
ypos = smallBody.position[1]
for i in range(10):
self.patches.append(
plt.Circle((xpos, ypos), radius=(5000000000 / (10 - i)), color=smallBody.colour, animated=True)
)
xmax = 0
for body in self.body_list:
# find the axes range
if body.position[0] > xmax:
xmax = body.position[0] * 1.5
if body.position[1] > xmax:
xmax = body.position[1] * 1.5
self.xmax = xmax
def run_simulation(self):
# running the simulation for the inputed number of iterations
for i in range(self.num_iterations):
self.step_forward()
def step_forward(self):
# Move the bodies one timestep
# New positions of all the bodies are calculated first
self.timeElapsed += self.timestep
for body in self.body_list:
body.update_position(self.timestep)
for smallBody in self.smallBodies:
smallBody.update_position(self.timestep)
for body in self.body_list:
new_acceleration = self.calc_acceleration(body)
body.update_velocity(new_acceleration, self.timestep)
body.previous_acceleration = body.current_acceleration
body.current_acceleration = new_acceleration
for smallBody in self.smallBodies:
new_acceleration = self.calc_acceleration(smallBody)
smallBody.update_velocity(new_acceleration, self.timestep)
smallBody.previous_acceleration = smallBody.current_acceleration
smallBody.current_acceleration = new_acceleration
for body in self.body_list:
if smallBody.bodyOfInterest == body.name:
distance = distance_from_body(smallBody, body)
if distance < smallBody.closestDistance:
smallBody.closestDistance = distance
smallBody.timeToBodyOfInterest = self.timeElapsed
def calc_acceleration(self, body):
# find the acceleration on a single body. Returns a np array of acceleration
forceOnBody = np.array([0.0, 0.0])
for secondBody in self.body_list:
if secondBody.name != body.name:
displacementVec = secondBody.position - body.position
distance = np.linalg.norm(displacementVec)
displacementVec = displacementVec / distance
magnitude = G * body.mass * secondBody.mass / (distance ** 2)
force = magnitude * displacementVec
forceOnBody += force
acceleration = forceOnBody / body.mass
return acceleration
def update_display(self, i):
# single timestep change in display
self.step_forward()
j = 0
for body in self.body_list:
if body.name == 'Sun':
self.patches[j].center = (body.position[0], body.position[1])
else:
for i in range(1, 10):
self.patches[(j - 1) * 10 + i].center = self.patches[(j - 1) * 10 + i + 1].center
self.patches[j * 10].center = (body.position[0], body.position[1])
j += 1
for smallBody in self.smallBodies:
for i in range(1, 10):
self.patches[(j - 1) * 10 + i].center = self.patches[(j - 1) * 10 + i + 1].center
self.patches[j * 10].center = (smallBody.position[0], smallBody.position[1])
j += 1
return self.patches
def animate(self):
# animate the bodies for the duration of the simulation
plt.style.use('dark_background')
fig = plt.figure(1)
ax = plt.axes()
for patch in self.patches:
ax.add_patch(patch)
ax.axis('scaled')
ax.set_xlim(-self.xmax, self.xmax)
ax.set_ylim(-self.xmax, self.xmax)
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.axis('off')
anim = FuncAnimation(fig, self.update_display, frames=self.num_iterations, repeat=False, interval=50, blit=True)
plt.show()
def calc_PE(self):
# Calculates the total potential energy. Returns a float of the energy
PE = 0
for body in self.body_list:
for secondBody in self.body_list:
if body.name != secondBody.name:
displacementVec = secondBody.position - body.position
distance = np.linalg.norm(displacementVec)
PE += -1 / 2 * G * body.mass * secondBody.mass / distance
return PE
def calc_tot_energy(self):
# Calculates the total energy. Returns a float
PE = self.calc_PE
KE = 0
for body in self.body_list:
KE += body.calc_KE()
return KE + PE
def check_orbital_period(self, body):
# Finds the orbital period of a given body using trigonometric functions. Returns a float
orbital_period = 0
while not 0 > np.arctan2(body.position[1], body.position[0]) > -0.01:
self.step_forward()
orbital_period += self.timestep
orbital_period = orbital_period / 86400
return orbital_period
def launch_sattelite(self, name, colour, mass, launchBodyName, radius, initVelocity, launchOrientation,
interestBody):
"""This is a function that launches a satellite from a given body. Input parameters are name, colour, mass,
name of the body from which the satellite is to be launched, distance from the center of the body where the
satellite is launched, initial velocity, orientation of the launch - inner if launching from the side facing the
Sun, outer otherwise, and name of the body the satellite is trying to reach"""
for body in self.body_list:
if interestBody == body.name:
interestBodyName = body.name
interestBodyPosition = body.position
if launchBodyName == body.name:
launchBody = body
xBodyPos = launchBody.position[0]
yBodyPos = launchBody.position[1]
angle = np.arctan2(yBodyPos, xBodyPos)
if launchOrientation == 'inner':
xOffset = -1 * np.tan(angle) * radius
yOffset = -1 * np.tan(angle) * radius
launchPosition = np.array([xBodyPos + xOffset, yBodyPos + yOffset])
self.smallBodies.append(SmallBody(name, colour, mass, launchPosition, initVelocity, interestBodyName,
interestBodyPosition))
for i in range(10):
self.patches.append(
plt.Circle((launchPosition[0], launchPosition[1]), radius=(2000000000 / (10 - i)),
color=colour, animated=True))
elif launchOrientation == 'outer':
xOffset = np.tan(angle) * radius
yOffset = np.tan(angle) * radius
launchPosition = np.array([xBodyPos + xOffset, yBodyPos + yOffset])
self.smallBodies.append(SmallBody(name, colour, mass, launchPosition, initVelocity, interestBodyName,
interestBodyPosition))
for i in range(10):
self.patches.append(
plt.Circle((launchPosition[0], launchPosition[1]), radius=(2000000000 / (10 - i)),
color=colour, animated=True))
G = 6.67408e-11
Universe = Simulation(200000, 2000)
Universe.read_input_data('Parameters.txt')
Universe.animate()
# print('Orbital period of the Earth is: ' + str(Universe.check_orbital_period(Universe.body_list[3])) + ' days')
# print(str(Universe.calc_tot_energy()))
# Universe.run_simulation()
| MatejVe/Solar-System-Simulation | Solar system for testing new code.py | Solar system for testing new code.py | py | 12,798 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linalg.norm",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
... |
43552704116 | import os
from bot.misc.util import download, calculate_hash
from bot.functions import lessonsToday
from bot.database.main import filesDB
import hashlib
import datetime
files = filesDB()
def filesCheck(urls) -> list:
done = []
filesHash = []
for name, url in urls.items():
h = files.get(name)
if h == -1:
download(url, name)
rez = calculate_hash(f'{name}')
filesHash.append({'name': name, 'hash': rez, 'date': datetime.datetime.now()})
else:
done.append({'file_id': h['file_id'],
'url': url})
return done, filesHash
| i3sey/EljurTelegramBot | bot/functions/files.py | files.py | py | 651 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "bot.database.main.filesDB",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bot.misc.util.download",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bot.misc.util.calculate_hash",
"line_number": 19,
"usage_type": "call"
},
{
"api_... |
43372121821 | import cv2
import numpy as np
video_path = '/Users/bigphess/Desktop/omnidirection/res/rabbit_250fps.mp4'
cap = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture(video_path)
while True:
ret, frame = cap2.read()
# image = cv2.imread('/Users/bigphess/Downloads/IMG_6453.JPG')
debug = frame
if cv2.waitKey(100) & 0xFF == ord('q'):
cv2.imwrite('/Users/bigphess/Desktop/xu.jpg',debug)
print("successful write")
break
# h,w,ch = debug.shape
print('the size of the frame is {} and {}'.format(debug.shape[1],debug.shape[0]))
cv2.imshow('??', frame)
cv2.waitKey(0)
break
# debug = cv2.logPolar(frame,(debug.shape[0]/2,debug.shape[1]/2),100,cv2.WARP_FILL_OUTLIERS)
# cv2.waitKey(1)
| Bigphess/Notes | OpenCV/polor.py | polor.py | py | 703 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_n... |
2532407157 | import re
import sys
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OneHotEncoder
class PreprocessDataframe(TransformerMixin):
def fit(self, df, _):
# Drop random/singular columns
# self.mask = [bool(pd.Series(col).dropna().size) for col, s in X.T]
self.drop_cols = df.columns[df.isna().all(axis=0)]
print('Drop cols', self.drop_cols, file=sys.stderr)
return self
def transform(self, df: pd.DataFrame):
assert (df.iloc[:, 0].str.count(' ') == 2).all()
df = df.drop(columns=df.columns[0]) # process pid, container id/image
return df.drop(columns=self.drop_cols)
class PreprocessText(TransformerMixin):
fit = lambda x, *_: x
def _fix(self, s):
s = re.sub(r'(?<![.:\d])[a-f\d\[\]]+(:+[a-f\d\[\]]+)+|\d+([.:-]+\d+){2,4}', ' ', s) # IP:port
return s
assert not _fix(None, '''
123.123.123.123:1234
123:[fab:12e:123:123]:1234
''').strip()
def transform(self, X):
Xt = [self._fix(s) for s in X]
return Xt
class Transformer(TransformerMixin):
CATEGORICAL_WEIGHT = 30
def fit(self, df, _):
transformers = []
for i, (_, s) in enumerate(df.items()):
n_words = s.fillna('').astype(str).str.count(r'\b\w+')
assert n_words.mean() > 0
if n_words.mean() <= 1:
print(i, n_words.mean(), np.unique(n_words), sep='\t')
transformers.append((
str(i),
OneHotEncoder(handle_unknown='ignore', dtype=np.int8),
[i] * self.CATEGORICAL_WEIGHT,
))
else:
n_features = round(max(5, min(50, n_words.median() * 5)))
print(i, f'{n_words.mean():.2f}', n_features, sep='\t')
transformers.append((
str(i),
make_pipeline(
PreprocessText(),
HashingVectorizer(
n_features=n_features,
token_pattern=r'[>\w-]+',
ngram_range=(1, 2),
lowercase=False,
alternate_sign=False,
norm=None,
dtype=np.uint8,
),
),
i
))
self.transformer = ColumnTransformer(
transformers=transformers,
# n_jobs=-1,
)
X = df.fillna('').astype(str).values
self.transformer.fit(X)
return self
def transform(self, df):
X = df.fillna('').astype(str).values
Xt = self.transformer.transform(X)
assert Xt.size, Xt
return Xt
| kernc/Containersec | lib.py | lib.py | py | 3,012 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.base.TransformerMixin",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "sys.stderr",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "s... |
32409336010 | import time
from torch import optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from ..models import *
from ..utils import AverageMeter, calculate_accuracy, Logger, MyDataset
from visdom import Visdom
DatasetsList = ['CIFAR10', 'CIFAR100']
ModelList = {'AlexNet': AlexNet, 'alexnet': alexnet,
'DenseNet': DenseNet, 'densenet121': densenet121, 'densenet169': densenet169,
'densenet201': densenet201, 'densenet161': densenet161,
'Inception3': Inception3, 'inception_v3': inception_v3,
'ResNet': ResNet, 'resnet18': resnet18, 'resnet34': resnet34,
'resnet50': resnet50, 'resnet101': resnet101, 'resnet152': resnet152,
'SqueezeNet': SqueezeNet, 'squeezenet1_0': squeezenet1_0, 'squeezenet1_1': squeezenet1_1,
'VGG': VGG, 'vgg11': vgg11, 'vgg11_bn': vgg11_bn, 'vgg13': vgg13, 'vgg13_bn': vgg13_bn,
'vgg16': vgg16, 'vgg16_bn': vgg16_bn, 'vgg19_bn': vgg19_bn, 'vgg19': vgg19,
'se_resnet18': se_resnet18, 'se_resnet34': se_resnet34, 'se_resnet50': se_resnet50,
'se_resnet101': se_resnet101, 'se_resnet152': se_resnet152,
'hr18_net': hr18_net,
'mobilenetv3': mobilenetv3,
'shufflenetv2': shufflenetv2}
class TrainPipline(object):
def __init__(self, opt):
self.root_path = opt['path']['root_path']
self.result_path = os.path.join(self.root_path, opt['path']['result_path'])
self.datasets_path = os.path.join(self.root_path, opt['path']['datasest_path'])
self.n_classes = opt['model']['n_classes']
self.momentum = opt['model']['momentum']
self.weight_decay = opt['model']['weight_decay']
self.nesterov = opt['model']['nesterov']
self.n_epochs = opt['train']['n_epochs']
self.batch_size = opt['train']['batch_size']
self.learning_rate = opt['train']['learning_rate']
self.n_threads = opt['train']['n_threads']
self.checkpoint = opt['train']['checkpoint']
self.no_cuda = opt['cuda']['no_cuda']
self.model_name = ''
self.model_ft = ''
self.visdom_log_file = os.path.join(self.result_path, 'log_files', 'visdom.log')
self.vis = Visdom(port=8097,
log_to_filename=self.visdom_log_file,
env='myTest_1')
self.vis_loss_opts = {'xlabel': 'epoch',
'ylabel': 'loss',
'title': 'losses',
'legend': ['train_loss', 'val_loss']}
self.vis_tpr_opts = {'xlabel': 'epoch',
'ylabel': 'tpr',
'title': 'val_tpr',
'legend': ['tpr@fpr10-2', 'tpr@fpr10-3', 'tpr@fpr10-4']}
self.vis_epochloss_opts = {'xlabel': 'epoch',
'ylabel': 'loss',
'title': 'epoch_losses',
'legend': ['train_loss', 'val_loss']}
def datasets(self, data_name=None):
assert data_name in DatasetsList
if data_name == 'CIFAR10':
training_data = datasets.CIFAR10(root='./modelzoo/datasets/', train=True, download=False,
transform=transforms.Compose([
# transforms.RandomResizedCrop(224),
transforms.Pad(96),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
val_data = datasets.CIFAR10(root='./modelzoo/datasets/', train=False, download=False,
transform=transforms.Compose([
# transforms.RandomResizedCrop(224),
transforms.Pad(96),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
elif data_name == 'CIFAR100':
training_data = datasets.CIFAR100(root='./modelzoo/datasets/', train=True, download=True,
transform=transforms.Compose([
# transforms.RandomResizedCrop(224),
transforms.Pad(96),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
val_data = datasets.CIFAR100(root='./modelzoo/datasets/', train=False, download=True,
transform=transforms.Compose([
# transforms.RandomResizedCrop(224),
transforms.Pad(96),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
else:
train_txt_path = os.path.join(self.datasets_path, 'train.txt')
val_txt_path = os.path.join(self.datasets_path, 'val.txt')
my_transform = transforms.Compose([transforms.Resize(224),
transforms.ToTensor()])
training_data = MyDataset(train_txt_path, transform=my_transform)
val_data = MyDataset(val_txt_path, transform=my_transform)
return training_data, val_data
def model(self, model_name='resnet18', model_path=None):
assert model_name in ModelList
self.model_name = model_name
# model_ft = resnet18(pretrained=True)
# num_ftrs = model_ft.fc.in_features
# model_ft.fc = nn.Linear(num_ftrs, 10)
self.model_ft = ModelList[self.model_name](num_classes=self.n_classes)
if model_path is not None:
self.model_ft.load_state_dict(model_path)
else:
self.model_ft.apply(weights_init)
return self.model_ft
def train(self, training_data, val_data, model):
# data init
train_loader = DataLoader(training_data,
batch_size=self.batch_size,
shuffle=True,
# num_workers=self.n_threads,
pin_memory=True)
# result writer
train_logger = Logger(os.path.join(self.result_path, self.model_name + '_train.log'),
['epoch', 'loss', 'acc', 'lr'])
train_batch_logger = Logger(os.path.join(self.result_path, self.model_name + '_train_batch.log'),
['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])
val_logger = Logger(os.path.join(self.result_path, self.model_name + '_test.log'),
['time', 'loss', 'acc'])
# optimizer init
optimizer = optim.SGD(model.parameters(),
lr=self.learning_rate,
momentum=self.momentum,
weight_decay=self.weight_decay,
nesterov=self.nesterov)
# loss init
criterion = nn.CrossEntropyLoss()
print(model)
if not self.no_cuda:
model = nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
# start train
for i in range(0, self.n_epochs + 1):
self.train_epoch(i, train_loader, model, criterion, optimizer,
train_logger, train_batch_logger)
self.validation(val_data, model, criterion, val_logger)
def train_epoch(self, epoch, data_loader, model, criterion, optimizer,
epoch_logger, batch_logger):
print('train at epoch {}'.format(epoch))
# set model to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
end_time = time.time()
for i, (inputs, targets) in enumerate(data_loader):
data_time.update(time.time() - end_time)
if not self.no_cuda:
model = model.cuda()
inputs = inputs.cuda()
targets = targets.cuda()
# inputs = Variable(inputs)
# targets = Variable(targets)
outputs = model(inputs)
loss = criterion(outputs, targets)
acc = calculate_accuracy(outputs, targets)
losses.update(loss.data, inputs.size(0))
accuracies.update(acc, inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end_time)
end_time = time.time()
batch_logger.log({
'epoch': epoch,
'batch': i + 1,
'iter': (epoch - 1) * len(data_loader) + (i + 1),
'loss': losses.val,
'acc': accuracies.val,
'lr': optimizer.param_groups[0]['lr']
})
self.vislog_batch(i, losses.val)
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch,
i + 1,
len(data_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accuracies))
epoch_logger.log({
'epoch': epoch,
'loss': losses.avg,
'acc': accuracies.avg,
'lr': optimizer.param_groups[0]['lr']
})
if epoch % self.checkpoint == 0:
save_file_path = os.path.join(self.result_path, self.model_name+'save_{}.pth'.format(epoch))
states = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(states, save_file_path)
def validation(self, val_data, model, criterion, val_logger):
val_loader = torch.utils.data.DataLoader(
val_data,
batch_size=self.batch_size,
shuffle=False,
# num_workers=self.n_threads,
pin_memory=True)
model.eval()
losses = AverageMeter()
accuracies = AverageMeter()
end_time = time.time()
for i, (inputs, targets) in enumerate(val_loader):
if not self.no_cuda:
inputs = inputs.cuda()
targets = targets.cuda()
outputs = model(inputs)
loss = criterion(outputs, targets)
acc = calculate_accuracy(outputs, targets)
losses.update(loss.data, inputs.size(0))
accuracies.update(acc, inputs.size(0))
test_time = time.time() - end_time
val_logger.log({'time': test_time,
'loss': losses.avg,
'acc': accuracies.avg})
print('TestTime {test_time:.3f}\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.3f} ({acc.avg:.3f})'.format(
test_time=test_time,
loss=losses,
acc=accuracies))
def vislog_batch(self, batch_idx,loss):
x_value = batch_idx
y_value = loss
self.vis.line([y_value], [x_value],
name='train_loss',
win='losses',
update='append')
self.vis.line([2], [x_value],
name='test_loss',
win='losses',
update='append')
self.vis.update_window_opts(win='losses', opts=self.vis_loss_opts)
def weights_init(m):
# weight initialization
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
# nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
| jimmy0087/model_zoo_torch | modelzoo/libs/train/train.py | train.py | py | 13,106 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "visdom.Visdom",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "torch... |
957425679 | from flask import render_template, flash, redirect, url_for, request, current_app
from flask_login import login_required, current_user
from apps.app import db
from apps.model import Task, Kind
from apps.todolist import todolist
from apps.todolist.course import AddCategory, AddToDoList, ChangeToDoList
# User View Task List
@todolist.route('/list/', methods=['GET', 'POST'])
@login_required
def list_1():
list = AddToDoList()
page = int(request.args.get('page', 1))
todolistPage = Task.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['PER_PAGE'],
error_out=False)
return render_template('todolist/list.html', todoObj=todolistPage, form=list)
@todolist.route('/add/', methods=['POST', 'GET'])
@login_required
def add_list():
list = AddToDoList()
page = int(request.args.get('page', 1))
todolistPage = Task.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['PER_PAGE'],
error_out=False)
if list.validate_on_submit():
# get users submission
Title = list.Title.data
content = list.content.data
kind_id = list.category.data
urgent = list.urgent.data
deadline = list.deadline.data
# add to database
status = False
add = Task(task_Title=Title, task_content=content,
category_id=kind_id,
task_urgent=urgent, task_deadline=deadline, user_id=current_user.id, task_status=status)
db.session.add(add)
flash('Add task successfully!', category='success')
return redirect(url_for('todolist.add_list'))
else:
flash('Cannot add this task!', category='error')
return render_template('todolist/list.html', todoObj=todolistPage, form=list)
@todolist.route('/change/<int:id>', methods=['GET', 'POST'])
def change(id):
list = ChangeToDoList()
page = int(request.args.get('page', 1))
todolistPage = Task.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['PER_PAGE'],
error_out=False)
task = Task.query.filter_by(id=id).first()
list.content.data = task.task_content
# list.category.data = task.category_id
# list.urgent.data = task.task_urgent
# list.deadline.data = task.task_deadline
list.Title.data = task.task_moduleTitle
list.urgent.data = task.task_urgent
list.category.data = task.category_id
if list.validate_on_submit():
content = request.form.get('content')
Title = request.form.get('Title')
category_id = request.form.get('category')
urgent = request.form.get('urgent')
task.task_content = content
task.category_id = category_id
task.urgent = urgent
task.task_Title = Title
db.session.add(task)
flash('Task has been changed', category='success')
return redirect(url_for('todolist.list_1'))
else:
flash('Changed failed', category='error')
return render_template('todolist/change.html', form=list, todoObj=todolistPage)
@todolist.route('/delete/<int:id>/')
@login_required
def delete(id):
task = Task.query.filter_by(id=id).first()
# task.task_status = True
db.session.delete(task)
flash("Task has been deleted successfully.", category='success')
return redirect(url_for('todolist.list_1'))
@todolist.route('/deletec/<int:id>/')
@login_required
def deletec(id):
kind = Kind.query.filter_by(id=id).first()
# task.task_status = True
db.session.delete(kind)
flash("Category has been deleted successfully.", category='success')
category_list = AddCategory()
KindPage = Kind.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['PER_PAGE'],
error_out=False)
return render_template('todolist/category.html', kindObj=KindPage, form=category_list)
@todolist.route('/hasdone/<int:id>/')
@login_required
def hasdone(id):
task = Task.query.filter_by(id=id).first()
task.task_status = True
db.session.add(task)
flash("Task status has been changed successfully.", category='success')
return redirect(url_for('todolist.list_1'))
@todolist.route('/hasnotdone/<int:id>/')
@login_required
def hasnotdone(id):
task = Task.query.filter_by(id=id).first()
task.task_status = False
db.session.add(task)
flash("Task status has been changed successfully.", category='success')
return redirect(url_for('todolist.list_1'))
@todolist.route('/urgent/<int:id>/')
@login_required
def urgent(id):
task = Task.query.filter_by(id=id).first()
task.task_urgent = 1
db.session.add(task)
flash("Task priority has been changed successfully.", category='success')
return redirect(url_for('todolist.list_1'))
@todolist.route('/noturgent/<int:id>/')
@login_required
def noturgent(id):
task = Task.query.filter_by(id=id).first()
task.task_urgent = 2
db.session.add(task)
flash("Task priority has been changed successfully.", category='success')
return redirect(url_for('todolist.list_1'))
@todolist.route('/search/<int:id>/', methods=['POST', 'GET'])
@login_required
def search(id):
task = Task.query.filter_by(id=id).all()
return render_template('todolist/search.html', todoObj=task)
@todolist.route('/searchTitle/<int:id>/', methods=['POST', 'GET'])
@login_required
def search_Title(id):
Title = request.form.get('Title')
task = Task.query.filter_by(user_id=id, task_Title=Title).paginate(per_page=current_app.config['C_PAGE'],
error_out=False)
return render_template('todolist/search.html', todoObj=task)
@todolist.route('/searchContent/<int:id>/', methods=['POST', 'GET'])
@login_required
def search_Content(id):
content = request.form.get('content')
task = Task.query.filter_by(user_id=id, task_content=content).paginate(per_page=current_app.config['C_PAGE'],
error_out=False)
return render_template('todolist/search.html', todoObj=task)
@todolist.route('/searchPriority/<int:id>/', methods=['POST', 'GET'])
@login_required
def search_Urgent(id):
priority = request.form.get('priority')
task = Task.query.filter_by(user_id=id, task_urgent=priority).paginate(per_page=current_app.config['C_PAGE'],
error_out=False)
return render_template('todolist/search.html', todoObj=task)
@todolist.route('/category/', methods=['POST', 'GET'])
@login_required
def category():
category_list = AddCategory()
KindPage = Kind.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['PER_PAGE'],
error_out=False)
return render_template('todolist/category.html', kindObj=KindPage, form=category_list)
@todolist.route('/addcategory/', methods=['POST', 'GET'])
@login_required
def add_category():
list = AddCategory()
KindPage = Kind.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['PER_PAGE'],
error_out=False)
if list.validate_on_submit():
# get category submission
name = list.category.data
user_id = current_user.id
# add to database
add = Kind(name=name, user_id=user_id)
db.session.add(add)
flash('Add category successfully!', category='success')
return redirect(url_for('todolist.category'))
else:
flash('Cannot add this category', category='error')
return render_template('todolist/category.html', kindObj=KindPage, form=list)
@todolist.route('/complete/<int:id>/', methods=['POST', 'GET'])
@login_required
def complete(id):
todolistPage = Task.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['C_PAGE'],
error_out=False)
return render_template('todolist/complete.html', todoObj=todolistPage)
@todolist.route('/uncomplete/<int:id>/', methods=['POST', 'GET'])
@login_required
def uncomplete(id):
todolistPage = Task.query.filter_by(user_id=current_user.id).paginate(per_page=current_app.config['C_PAGE'],
error_out=False)
return render_template('todolist/uncomplete.html', todoObj=todolistPage)
| INversionNan/Flask | apps/todolist/base.py | base.py | py | 8,802 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "apps.todolist.course.AddToDoList",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 15,
"usage_type": "attribute"
},
{
"a... |
20822639893 | import json
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import euclidean_distances
import numpy as np
from sklearn.linear_model import LogisticRegression
def load():
with open('C:/Users/Administrator/Desktop/backend-interview-1/samples/generated_test_cases.txt', 'r',
encoding='utf-8') as json_file:
data = json.load(json_file)
return data
def get_word_embeddings():
embeddings = {}
with open('C:/Users/Administrator/Desktop/numberbatch-en.txt', encoding='utf-8') as em:
for embed in em:
em_line = embed.split(' ')
if len(em_line) > 2:
word = em_line[0]
embedding = np.array(em_line[1:])
embeddings[word] = embedding
print('Word embeddings:', len(embeddings))
return embeddings
def get_min(target_list):
min_num = target_list[0]
for i in target_list:
if i < min_num:
min_num = i
return min_num
def get_min_index(target_list):
min_num = target_list[0]
for i in target_list:
if i < min_num:
min_num = i
return target_list.index(min_num)
def find_match_document_distance(query, candidates, W):
if len(query) == 0:
return "The value of query is null."
if len(candidates) == 0:
return "The value of candidates is null."
vec1 = CountVectorizer(stop_words="english").fit([str(query)])
vec2 = CountVectorizer(stop_words="english").fit([str(candidates)])
if len(vec1.get_feature_names()) == 0 or len(vec2.get_feature_names()) == 0:
return -1
# print("Query Features:", ", ".join(vec1.get_feature_names()))
# print("Candidates Features:", ", ".join(vec2.get_feature_names()))
W1 = [W[w] for w in vec1.get_feature_names()]
W2 = [W[w] for w in vec2.get_feature_names()]
result = []
for i in range(0, len(W1)):
for j in range(0, len(W2)):
res = {}
res['q'] = vec1.get_feature_names()[i]
res['c'] = vec2.get_feature_names()[j]
res['r'] = float(euclidean_distances([W1[i]], [W2[j]])[0][0])
result.append(res)
t_list = []
for i in range(0, len(result)):
t_list.append(float(result[i]['r']))
return get_min(t_list)
def get_model(W):
json_data = load()
print(len(json_data))
list_X = []
list_Y = []
try:
for i in range(0, len(json_data)):
aaa = find_match_document_distance(str(json_data[i]['query']), str(json_data[i]['candidates']), W)
list_X.append(aaa)
if json_data[i]['correct_index'] >= 0:
list_Y.append("1")
else:
list_Y.append("-1")
except Exception as e:
print(i)
print("word_embeddings中未含有该词")
print(len(list_X))
print(len(list_Y))
x = np.array(list_X).reshape(-1, 1)
y = np.array(list_Y)
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(x, y)
# print(lr.predict(x))
from sklearn.model_selection import cross_val_score
acy = cross_val_score(lr, x, y)
print(acy.mean())
return lr
def find_best_match_with_threshold(query, candidates, lr, W):
if len(query) == 0:
return "The value of query is null."
if len(candidates) == 0:
return "The value of candidates is null."
vec1 = CountVectorizer(stop_words="english").fit([query])
vec2 = CountVectorizer(stop_words="english").fit([str(candidates)])
if len(vec1.get_feature_names()) == 0 or len(vec2.get_feature_names()) == 0:
return -1
print("Features:", ", ".join(vec1.get_feature_names()))
print("Features:", ", ".join(vec2.get_feature_names()))
W1 = [W[w] for w in vec1.get_feature_names()]
W2 = [W[w] for w in vec2.get_feature_names()]
result = []
for i in range(0, len(W1)):
for j in range(0, len(W2)):
res = {}
res['q'] = vec1.get_feature_names()[i]
res['c'] = vec2.get_feature_names()[j]
res['r'] = float(euclidean_distances([W1[i]], [W2[j]])[0][0])
result.append(res)
t_list = []
for i in range(0, len(result)):
t_list.append(float(result[i]['r']))
# print(t_list)
# print(lr.predict(get_min(t_list))[0])
if lr.predict(get_min(t_list))[0] == "-1":
return -1
else:
return candidates.index(result[get_min_index(t_list)]['c'])
W = get_word_embeddings()
lr = get_model(W)
query = "i am really hungry"
candidates = ['story', 'song', 'wake up', 'restart']
print(find_best_match_with_threshold(query, candidates, lr, W))
| Qt7mira/LenovoIVProblem | mira/part3.py | part3.py | py | 4,656 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": ... |
31563331171 | import requests
from bs4 import BeautifulSoup
url = input("Entrer l'URL du site : ")
response = requests.get(url)
if response.status_code == 200:
html_content = response.content
else:
print("Erreur lors de la récupération de la page.")
soup = BeautifulSoup(html_content, "html.parser")
# Extraire le titre de la page
title = soup.title.text
print("Titre de la page :", title)
# Élément de la page à extraire
element = input("Entrez l'élément a extraire du site : ")
elements = soup.find_all(f"{element}")
for item in elements:
print(item)
# Exemple : Extraire tous les liens de la page
# links = soup.find_all("a")
# for link in links:
# print(link.get("href"))
| Lenked/ScrappingApp | main.py | main.py | py | 690 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
}
] |
899076815 | import sqlite3
import os
import bamnostic_mod as bn
import argparse
import bisect
import time
#Downloads\Lung\47b982b3-c7ce-4ca7-8c86-c71c15979620\G28588.NCI-H1915.1.bam
#Downloads\Lung\98a0206b-29f5-42d3-957b-6480e2fde185\G20483.HCC-15.2.bam
#Downloads\Lung\18004fb1-89a2-4ba1-a321-a0aa854e98c3\G25210.NCI-H510.1.bam
#Downloads\Lung\47030e40-acbd-4905-939c-d57441d7042e\G25222.NCI-H2171.1.bam
#Downloads\Lung\1357785f-f84b-4688-9b4c-0c2b5472ef51\G27281.RERF-LC-MS.1.bam
#Downloads\Lung\e48ea2ee-1dda-4061-a199-6e22fd2df382\G25212.NCI-H661.1.bam
#Downloads\Lung\f03dbfee-a523-438f-8459-f47f2ff1880f\G25224.NCI-H2066.1.bam
#Downloads\HeadAndNeck\0e67231f-97be-447c-b3b0-a656fc30a62d\G27454.PE_CA-PJ15.2.bam
#Downloads\HeadAndNeck\1acf65a0-0268-4288-9904-33bff618a31d\G27515.PE_CA-PJ41__clone_D2_.2.bam
#Downloads\HeadAndNeck\1f290458-df28-4c78-b73d-0202fb53bb0e\G27220.SCC-4.1.bam
#Downloads\HeadAndNeck\2b507086-977b-4cb7-abd9-83ee4ce9a893\G27489.PE_CA-PJ34__clone_C12_.2.bam
#Downloads\HeadAndNeck\7ed3e895-6826-430d-a39d-338111f16083\G27512.SNU-1214.2.bam
#Downloads\HeadAndNeck\c11aa745-72ea-44ca-b70d-7811c2f244b7\G27533.SNU-1066.2.bam
#Downloads\HeadAndNeck\dc8393c0-7d9e-4040-a91a-5783544cac35\G28853.HSC-4.3.bam
parser = argparse.ArgumentParser(description='takes the given .bam file and looks through all the reads to construct a count of all exons and splices in the reference splice graphs', usage='splicerSampleProcessor database_directory bam_file sample_name novelSplicesToggle(True|False)')
parser.add_argument("Database", help='The path to where you want to store the database file.')
parser.add_argument("Bam", help='The .bam file to count the reads from')
parser.add_argument("sampleName", help='Name for current sample in the sample table')
parser.add_argument("novelSplices", choices=['True', 'False'], help='Controls whether the program tries to find new splices')
args = parser.parse_args()
os.chdir("D:\\")
#get connection to the sqlite database
conn = sqlite3.connect(args.Database + os.path.sep + 'splice.sqlite', isolation_level=None)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS Sample
(Sample_ID INTEGER PRIMARY KEY NOT NULL DEFAULT NULL,
Sample_Name varchar(40) NOT NULL DEFAULT NULL,
Total_Reads INTEGER NOT NULL DEFAULT NULL,
Transcript_Reads INTEGER NOT NULL DEFAULT NULL);''')
c.execute("CREATE INDEX IF NOT EXISTS idx_sample_name ON Sample(sample_name);")
c.execute('''CREATE TABLE IF NOT EXISTS Sample_Attribs
(Sample_ID INTEGER NOT NULL DEFAULT NULL,
Attribute varchar(255) NOT NULL DEFAULT NULL,
Value varchar(255) NOT NULL DEFAULT NULL);''')
c.execute('''CREATE TABLE IF NOT EXISTS Exon_Counts
(Sample_ID INTEGER NOT NULL DEFAULT NULL,
SG_Exon_ID INTEGER NOT NULL DEFAULT NULL,
Count INTEGER NOT NULL DEFAULT NULL);''')
c.execute("CREATE INDEX IF NOT EXISTS idx_ec_sample_id ON Exon_Counts(sample_id);")
c.execute("CREATE INDEX IF NOT EXISTS idx_ec_exon_id ON Exon_Counts(SG_Exon_ID);")
c.execute('''CREATE TABLE IF NOT EXISTS Splice_Counts
(Sample_ID INTEGER NOT NULL DEFAULT NULL,
SG_Splice_ID INTEGER NOT NULL DEFAULT NULL,
Count INTEGER NOT NULL DEFAULT NULL);''')
c.execute("CREATE INDEX IF NOT EXISTS idx_sc_sample_id ON Splice_Counts(sample_id);")
c.execute("CREATE INDEX IF NOT EXISTS idx_sc_splice_id ON Splice_Counts(SG_Splice_ID);")
#Find out the next assignable ID for this sample
c.execute("Select MAX(Sample_ID) FROM Sample")
ret = c.fetchone()
prevId = ret[0]
if prevId:
Sample_Id = int(prevId)+1
else:
Sample_Id = 1
novelSplices = args.novelSplices
#initialize the splice dictionary
sDict = {}
eDict = {}
epDict = {}
ecDict = {}
scDict = {}
discoverySplices = {}
start_time = time.time()
c.execute("SELECT SG_Splice_ID, Start_Position, Stop_Position, Chromosome FROM SG_Splice")
ret = c.fetchall()
#load the splice dictionary keyed on start-stop with the sg id as the value
for y in range(len(ret)):
key = str(ret[y][1])+'-'+str(ret[y][2])
chrom = ret[y][3]
if not chrom.startswith("chr"):
chrom = "chr"+chrom
if chrom == "chrMT":
chrom = "chrM"
if chrom not in sDict:
sDict[chrom] = {}
if novelSplices == 'True':
discoverySplices[chrom] = {}
sDict[chrom][key] = ret[y][0]
c.execute("SELECT SG_Exon_ID, Start_Position, Stop_Position, Chromosome FROM SG_Exon")
ret = c.fetchall()
#load the exon dictionary keyed on the start and the stop with the sg id as the value
for y in range(len(ret)):
chrom = ret[y][3]
if not chrom.startswith("chr"):
chrom = "chr"+chrom
if chrom == "chrMT":
chrom = "chrM"
if chrom not in eDict:
eDict[chrom] = {}
epDict[chrom] = []
#add start
eDict[chrom][ret[y][1]] = ret[y][0]
#add stop
eDict[chrom][ret[y][2]] = ret[y][0]
#add to tuple exon positions list (flip start and stop to correct if negative strand)
if ret[y][1] < ret[y][2]:
epDict[chrom].append((ret[y][1], ret[y][2]))
else:
epDict[chrom].append((ret[y][2], ret[y][1]))
#sorted list of all exon start stop tuples keyed on chromosome
for key in epDict:
epDict[key] = sorted(epDict[key])
#"hg19test.bam"
samfile = bn.AlignmentFile(args.Bam, "rb")
def exonIncrement(start, stop, chro):
exonIds = []
try:
pList = epDict[chro]
#flip start in stop to correct for negative strand
if start > stop:
temp = start
start = stop
stop = temp
#find the index that start belongs at
idx = bisect.bisect(pList, (start,stop))
i = idx
if i == len(pList):
return([])
#move down the exons adding the ids of those included in the read
while (i > -1 and start <= pList[i][1]):
exonIds.append(eDict[chro][pList[i][0]])
i-=1
#ISSUE IF NEVER LOOP****************
#if it goes off the end of a known exon add none and scrap the read
if start < pList[i+1][0]:
return([])
i = idx
looped = False
#move up the exons adding ids of those included in the read
while (i < len(pList) and i > -1 and stop >= pList[i][0]):
exonIds.append(eDict[chro][pList[i][1]])
i+=1
looped = True
#if it goes of the end of a known exon add none and scrap the read
if looped and stop > pList[i-1][1]:
return([])
except Exception:
pass
return(exonIds)
fns = open ('novelSplices.txt', 'w')
i = 0
totalCount = 0
missingAttrCount = 0
tranCount = 0
totalDupeCount = 0
tranDupeCount = 0
totalJRcount = 0
tranJRcount = 0
totalNJRcount = 0
tranNJRcount = 0
exceptionCount = 0
prevRead = ""
prevExons = ""
prevSplices = ""
for read in samfile:
#read does not have an alignment
if (
not hasattr(read, 'reference_name') or read.reference_name == None or
not hasattr(read, 'reference_start') or read.reference_start == None or
not hasattr(read, 'reference_end') or read.reference_end == None or
not hasattr(read, 'cigarstring') or read.cigarstring == None or
not hasattr(read, 'cigar') or read.cigar == None
):
missingAttrCount += 1
continue
dupeTag = False
exonSet = set()
spliceSet = set()
readR_S = read.reference_start
readR_E = read.reference_end
i+=1
totalCount += 1
if totalCount % 1000000 == 0:
print(f"{totalCount:,d}")
break
tranBool = False
cigarString = read.cigarstring
cigar = read.cigar
chro = read.reference_name
if not chro.startswith("chr"):
chro = "chr"+chro
if str(readR_S)+"-"+str(readR_E)+"-"+cigarString+"-"+chro == prevRead:
dupeTag = True
totalDupeCount += 1
for exon in prevExons:
tranBool = True
ecDict[exon] += 1
for splice in prevSplices:
tranBool = True
scDict[splice] += 1
if tranBool == True:
tranDupeCount += 1
elif "N" in cigarString:
totalJRcount += 1
#initialize the start and stop based on the first junction
start = readR_S+cigar[0][1]
stop = start+cigar[1][1]+1
#exon check from the start of the read to the start of the first splice
exonSet.update(exonIncrement(readR_S+1, start, chro))
for x in range(int(len(cigar)/2)):
#if this is not the first junction adjust the start and stop
if x != 0:
start = stop+cigar[x*2][1]-1
#exon check from the end of the last splice to the beginning of this splice
exonSet.update(exonIncrement(stop, start, chro))
stop = start+cigar[x*2+1][1]+1
#check if the splice is known and count it if so
try:
if str(start)+"-"+str(stop) in sDict[chro] or str(stop)+"-"+str(start) in sDict[chro]:
spliceID = sDict[chro][str(start)+"-"+str(stop)]
spliceSet.add(spliceID)
tranBool = True
if spliceID in scDict:
scDict[spliceID] += 1
else:
scDict[spliceID] = 1
elif novelSplices == 'True':
if start in eDict[chro] and stop in eDict[chro]:
if str(start)+"-"+str(stop) in discoverySplices[chro]:
discoverySplices[chro][str(start)+"-"+str(stop)]+=1
else:
discoverySplices[chro][str(start)+"-"+str(stop)]=1
experiSplicect = 1
except Exception as e:
exceptionCount += 1
exonID = ""
exonSet.update(exonIncrement(stop, readR_E, chro))
if (tranBool or len(exonSet) != 0):
tranJRcount += 1
else:
totalNJRcount += 1
start = readR_S+1
stop = start+cigar[0][1]
exonSet.update(exonIncrement(start, stop, chro))
if (len(exonSet) != 0):
tranNJRcount += 1
else:
print("Missing: " + chro + ' ' + str(start) + ' ' + str(stop))
#add in all the sets
for exon in exonSet:
tranBool = True
#print("exon: "+str(exon))
if exon in ecDict:
ecDict[exon] += 1
else:
ecDict[exon] = 1
if tranBool == True:
tranCount += 1
#set this line to prevRead
if dupeTag == False:
prevRead = str(readR_S)+"-"+str(readR_E)+"-"+cigarString+"-"+chro
prevExons = exonSet
prevSplices = spliceSet
#if i == 5000000:
# break
c.execute('begin')
for key in scDict:
c.execute("INSERT INTO Splice_Counts VALUES("+str(Sample_Id)+", "+str(key)+", "+str(scDict[key])+")")
for key in ecDict:
c.execute("INSERT INTO Exon_Counts VALUES("+str(Sample_Id)+", "+str(key)+", "+str(ecDict[key])+")")
#add this sample to the sample table
c.execute("INSERT INTO Sample VALUES("+str(Sample_Id)+", "+args.sampleName+", "+str(totalCount)+", "+str(tranCount)+")")
c.execute('commit')
for chromkey in discoverySplices:
for skey in discoverySplices[chromkey]:
fns.write(skey + ", Count: " + str(discoverySplices[chromkey][skey])+'\n')
fns.close()
print("missing attribute reads: " + str(missingAttrCount))
print("transcript junction reads: "+str(tranJRcount))
print("total junction reads: "+str(totalJRcount))
print("transcript duplicate reads: "+str(tranDupeCount))
print("total duplicate reads: "+str(totalDupeCount))
print("transcript Non junction reads: "+str(tranNJRcount))
print("total Non junction reads: "+str(totalNJRcount))
print("number of exceptions caught: "+ str(exceptionCount))
print("--- %.2f seconds---" % (time.time() - start_time))
print('Done')
| InSilicoSolutions/Splicer | Splicer/splicerSampleProcessor.py | splicerSampleProcessor.py | py | 12,134 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_... |
800426611 | import mtcnn
from mtcnn.mtcnn import MTCNN
import cv2
detector = MTCNN() # MTCNN is CNN based algorithm
video = cv2.VideoCapture(0)
video.set(3,2000)
video.set(4,3000) # Same as previous technique
while (True):
ret, frame = video.read()
if ret == True:
location = detector.detect_faces(frame) # dectect faces frame by frame
if len(location) > 0:
for face in location:
x, y, width, height = face['box']
x2, y2 = x + width, y + height
cv2.rectangle(frame, (x, y), (x2, y2), (0, 0, 255), 4) # Bounding box width and height
cv2.imshow("Output",frame) # its will show frame's and update it frame by frame to same output file named as "Output"
if cv2.waitKey(1) & 0xFF == ord(' '): # same as previous
break
else:
break
video.release() # releasing camera port
cv2.destroyAllWindows() # destroying all windows
# Lets run and see , Results
# as you can see, it is giving precise result
# so we will go with MTCNN Algorithm
| Sagar-Khode/Face-Detection | MTCNN.py | MTCNN.py | py | 1,089 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mtcnn.mtcnn.MTCNN",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line... |
71656882025 | # Based on https://github.com/NATSpeech/NATSpeech
import utils.commons.single_thread_env # NOQA
import json
import numpy as np
import os
import random
import traceback
from functools import partial
from resemblyzer import VoiceEncoder
from tqdm import tqdm
from utils.audio.align import get_mel2note
from utils.audio.mel_processing import torch_wav2spec
from utils.audio.pitch.utils import f0_to_coarse
from utils.audio.pitch_extractors import extract_pitch_simple
from utils.commons.hparams import hparams
from utils.commons.indexed_datasets import IndexedDatasetBuilder
from utils.commons.multiprocess_utils import multiprocess_run_tqdm
from utils.os_utils import remove_file, copy_file
np.seterr(divide="ignore", invalid="ignore")
class BinarizationError(Exception):
pass
class BaseBinarizer:
def __init__(self, processed_data_dir=None):
if processed_data_dir is None:
processed_data_dir = hparams["processed_data_dir"]
self.processed_data_dir = processed_data_dir
self.hparams = hparams
self.binary_data_dir = hparams["binary_data_dir"]
self.preprocess_args = hparams["preprocess_args"]
self.binarization_args = hparams["binarization_args"]
self.items = {}
self.item_names = []
if self.binarization_args["with_spk_f0_norm"]:
self.spk_pitch_map = {}
def load_meta_data(self):
processed_data_dir = self.processed_data_dir
item_list = json.load(open(f"{processed_data_dir}/metadata.json"))
for r in tqdm(item_list, desc="Loading meta data."):
item_name = r["item_name"]
self.items[item_name] = r
self.item_names.append(item_name)
if self.binarization_args["shuffle"]:
random.seed(1234)
random.shuffle(self.item_names)
@property
def train_index_item_names(self):
range_ = self._convert_range(self.binarization_args["train_range"])
return self.item_names[range_[0]:range_[1]]
@property
def valid_index_item_names(self):
range_ = self._convert_range(self.binarization_args["valid_range"])
return self.item_names[range_[0]:range_[1]]
@property
def test_index_item_names(self) -> list:
range_ = self._convert_range(self.binarization_args["test_range"])
return self.item_names[range_[0]:range_[1]]
def _convert_range(self, range_: list):
if range_[1] == -1:
range_[1] = len(self.item_names)
return range_
@property
def train_title_item_names(self):
return [item_name for item_name in self.item_names \
if any(title in item_name for title in self.binarization_args["train_range"])]
@property
def valid_title_item_names(self):
return [item_name for item_name in self.item_names \
if any(title in item_name for title in self.binarization_args["valid_range"])]
@property
def test_title_item_names(self):
return [item_name for item_name in self.item_names \
if any(title in item_name for title in self.binarization_args["test_range"])]
def meta_data(self, prefix: str, dataset_range):
"""
Parameter
---------
prefix: str
Choose one of ["train", "valid", "test"]
"""
if prefix == "valid":
if dataset_range == "index":
item_names = self.valid_index_item_names
elif dataset_range == "title":
item_names = self.valid_title_item_names
elif prefix == "test":
if dataset_range == "index":
item_names = self.test_index_item_names
elif dataset_range == "title":
item_names = self.test_title_item_names
else:
if dataset_range == "index":
item_names = self.train_index_item_names
elif dataset_range == "title":
item_names = self.train_title_item_names
for item_name in item_names:
yield self.items[item_name]
def process(self):
self.load_meta_data()
os.makedirs(hparams["binary_data_dir"], exist_ok=True)
for fn in ["phone_set.json", "spk_map.json"]:
remove_file(f"{hparams['binary_data_dir']}/{fn}")
copy_file(f"{hparams['processed_data_dir']}/{fn}", f"{hparams['binary_data_dir']}/{fn}")
self.note_pitch_map = self.build_pitch_map()
self.note_dur_map = self.build_dur_map()
self.note_tempo_map = self.build_tempo_map()
self.process_data("valid")
self.process_data("test")
self.process_data("train")
def process_data(self, prefix: str):
"""
Parameter
---------
prefix: str
Choose one of ["train", "valid", "test"]
"""
data_dir = hparams["binary_data_dir"]
meta_data = list(self.meta_data(prefix, self.binarization_args["dataset_range"]))
process_item = partial(self.process_item, preprocess_args=self.preprocess_args,
binarization_args=self.binarization_args)
builder = IndexedDatasetBuilder(f"{data_dir}/{prefix}")
ph_lengths = []
mel_lengths = []
total_sec = 0
max_sec = 0
total_file = 0
items = []
args = [{"item": item, "note_pitch_map": self.note_pitch_map, "note_dur_map": self.note_dur_map,
"note_tempo_map": self.note_tempo_map} for item in meta_data[:len(meta_data)]]
# Get information from audio and transcript
for item_id, item in multiprocess_run_tqdm(process_item, args, desc="Processing data"):
if item is not None:
items.append(item)
if self.binarization_args["with_spk_f0_norm"] and prefix == "train":
self.calculate_spk_stats(item["f0"], item["spk_id"])
# Use pre-trained speaker embeddings
if self.binarization_args["with_spk_embed"]:
args = [{"wav": item["wav"]} for item in items]
for item_id, spk_embed in multiprocess_run_tqdm(
self.get_spk_embed, args,
init_ctx_func=lambda wid: {"voice_encoder": VoiceEncoder().cuda()}, num_workers=4,
desc="Extracting spk embed"):
items[item_id]["spk_embed"] = spk_embed
for item in items:
if not self.binarization_args["with_wav"] and "wav" in item:
del item["wav"]
mel_lengths.append(item["len"])
assert item["len"] > 0, (item["item_name"], item["text"], item["mel2ph"])
if "ph_len" in item:
ph_lengths.append(item["ph_len"])
if max_sec < item["sec"]:
max_sec = item["sec"]
total_sec += item["sec"]
if "midi_info" in item:
del item["midi_info"]
del item["sec"]
del item["others"]
if not self.binarization_args["with_mel"] and "mel" in item:
del item["mel"]
builder.add_item(item)
total_file += len(items)
builder.finalize()
if os.path.exists(f"{data_dir}/{prefix}_lengths.npy"):
mel_lengths_ = np.load(f"{data_dir}/{prefix}_lengths.npy").tolist()
mel_lengths_.extend(mel_lengths)
mel_lengths = mel_lengths_
np.save(f"{data_dir}/{prefix}_lengths.npy", mel_lengths)
if len(ph_lengths) > 0:
if os.path.exists(f"{data_dir}/{prefix}_ph_lenghts.npy"):
ph_lengths_ = np.load(f"{data_dir}/{prefix}_ph_lenghts.npy").tolist()
ph_lengths.extend(ph_lengths_)
np.save(f"{data_dir}/{prefix}_ph_lenghts.npy", ph_lengths)
if self.binarization_args["with_spk_f0_norm"] and prefix == "train":
self.build_spk_pitch_map()
print(f"| {prefix} total files: {total_file}, total duration: {total_sec:.3f}s, max duration: {max_sec:.3f}s")
@classmethod
def process_item(cls, item: dict, note_pitch_map, note_dur_map, note_tempo_map, preprocess_args, binarization_args: dict):
item["ph_len"] = len(item["ph_token"])
item_name = item["item_name"]
wav_fn = item["wav_fn"]
# Get Waveform and Mel-spectrogram information
wav, mel = cls.process_audio(wav_fn, item, binarization_args)
if mel.shape[0] > 2:
try:
n_bos_frames, n_eos_frames = 0, 0
if preprocess_args["use_text"] and preprocess_args["use_midi"]:
if binarization_args["with_midi_align"]:
# Align text information
cls.process_midi_align(item)
# Mapping pitch and dur map of note
cls.process_note(item, note_pitch_map, note_dur_map, note_tempo_map, preprocess_args, binarization_args)
if binarization_args["trim_eos_bos"]:
n_bos_frames = item["duration"][0] if preprocess_args["use_midi"] else 0
n_eos_frames = item["duration"][-1] if preprocess_args["use_midi"] else mel.shape[0]
T = len(mel)
item["mel"] = mel[n_bos_frames:T - n_eos_frames]
item["mel2ph"] = item["mel2ph"][n_bos_frames:T - n_eos_frames]
item["duration"] = item["duration"][1:-1]
item["duration_midi"] = item["duration_midi"][1:-1]
item["len"] = item["mel"].shape[0]
item["wav"] = wav[n_bos_frames * hparams["hop_size"]:len(wav) - n_eos_frames * hparams["hop_size"]]
if binarization_args["with_f0"]:
# Get pitch information
cls.process_pitch(item, n_bos_frames, n_eos_frames)
except BinarizationError as e:
print(f"| Skip item ({e}). item_name: {item_name}, wav_fm: {wav_fn}")
return None
except Exception as e:
traceback.print_exc()
print(f"| Skip item. item_name: {item_name}, wav_fm: {wav_fn}")
return None
return item
@classmethod
def process_audio(cls, wav_fn: str, res: dict, binarization_args: dict):
# Get Mel-spectrogram
wav2spec_dict = torch_wav2spec(wav_fn,
fft_size=hparams["fft_size"],
hop_size=hparams["hop_size"],
win_length=hparams["win_size"],
num_mels=hparams["num_mel_bins"],
fmin=hparams["fmin"],
fmax=hparams["fmax"],
sample_rate=hparams["sample_rate"])
mel = wav2spec_dict["mel"]
wav = wav2spec_dict["wav"].astype(np.float16)
# Check Linear-spectrogram
if binarization_args["with_linear"]:
res["linear"] = wav2spec_dict["linear"]
if "wav_norm" in wav2spec_dict:
res["wav_norm"] = wav2spec_dict["wav_norm"]
res.update({"mel": mel, "wav": wav, "sec": len(wav) / hparams["sample_rate"], "len": mel.shape[0]})
return wav, mel
@staticmethod
def process_midi_align(item: dict):
mel = item["mel"]
midi_info = item["midi_info"]
# Get align information and duration
mel2phone, mel2note, duration, ph_token, ph_list, _, item["midi_info"] = get_mel2note(midi_info, mel, hparams["hop_size"],
hparams["sample_rate"], item["silence"])
item["ph_token"] = ph_token
item["text"] = ph_list
if len(ph_list) < hparams["binarization_args"]["min_text"] or ph_list is None:
raise BinarizationError(
f"| Less than min text sequence: {len(item['ph_token'])}")
if np.array(mel2phone).max() - 1 >= len(ph_token):
raise BinarizationError(
f"Align does not match: mel2ph.max - 1 != {np.array(mel2phone).max() - 1}, len(ph_token): {len(ph_token)}")
item["mel2ph"] = mel2phone
item["mel2note"] = mel2note
item["duration"] = duration
# Get phoneme to word information
assert len(ph_token) == len(duration), "| phoneme : {len(ph_token)}, ph_duration : {len(duration)}"
@staticmethod
def process_note(item, note_pitch_map, note_dur_map, note_tempo_map, preprocess_args, binarization_args):
dur_enc = list()
dur_dec = list()
for i in range(binarization_args["max_durations"]):
for _ in range(binarization_args["pos_resolution"]):
dur_dec.append(len(dur_enc))
for _ in range(2 ** i):
dur_enc.append(len(dur_dec) - 1)
def d2e(x):
return dur_enc[x] if x < len(dur_enc) else dur_enc[-1]
if preprocess_args["use_midi"]:
item["note_duration"] = [note_dur_map[str(d2e(note[3]))] for note in item["midi_info"]]
item["note_pitch"] = [note_pitch_map[str(note[2])] for note in item["midi_info"]]
item["note_tempo"] = [note_tempo_map[str(note[6])] for note in item["midi_info"]]
else:
item["note_duration"] = [0]
item["note_pitch"] = [0]
item["note_tempo"] = [0]
@staticmethod
def process_pitch(item: dict, n_bos_frames: int, n_eos_frames: int):
wav, mel = item["wav"], item["mel"]
# Get f0 from waveform
f0 = extract_pitch_simple(wav)
if sum(f0) == 0:
raise BinarizationError("Empty f0")
assert len(mel) == len(f0) // hparams["f0_resolution"], (len(mel), len(f0))
# Quantize f0 values
pitch_coarse = f0_to_coarse(f0)
item["f0"] = f0
item["pitch"] = pitch_coarse
if hparams["binarization_args"]["with_f0cwt"]:
_, cont_logf0 = get_cont_logf0(f0)
logf0s_mean, logf0s_std = np.mean(cont_logf0), np.std(cont_logf0)
cont_logf0_norm = (cont_logf0 - logf0s_mean) / logf0s_std
cwt_spec, _ = get_logf0_cwt(cont_logf0_norm)
item["cwt_spec"] = cwt_spec
item["cwt_mean"] = logf0s_mean
item["cwt_std"] = logf0s_std
def build_pitch_map(self):
""" Using 0 to 128 notes for MIDI. """
pitch_map = {"0": 0}
for i, x in enumerate(range(self.hparams["note_range"][0], self.hparams["note_range"][1])):
pitch_map[str(x)] = i + 1
json.dump(pitch_map, open(f"{self.binary_data_dir}/pitch_map.json", "w"), ensure_ascii=False)
return pitch_map
def build_dur_map(self):
""" Using max duration for MIDI. """
dur_map = {"[PAD]": 0, "[BOS]": 1, "[EOS]": 2}
for i, x in enumerate(range(0, 128)):
dur_map[str(x)] = i + 4
json.dump(dur_map, open(f"{self.binary_data_dir}/dur_map.json", "w"), ensure_ascii=False)
return dur_map
def build_tempo_map(self):
tempo_map = {"[PAD]": 0, "[BOS]": 1, "[EOS]": 2}
tempo_range = self.binarization_args["tempo_range"]
for i, x in enumerate(range(tempo_range[0], tempo_range[1] + 1)):
tempo_map[str(x)] = i + 4
json.dump(tempo_map, open(f"{self.binary_data_dir}/tempo_map.json", "w"), ensure_ascii=False)
return tempo_map
def calculate_spk_stats(self, f0, spk_id):
f0_min = f0[np.nonzero(f0)].min()
f0_max = f0.max()
if str(spk_id) in self.spk_pitch_map:
spk_pitch_stat = self.spk_pitch_map[str(spk_id)]
if spk_pitch_stat["min"] > f0_min:
self.spk_pitch_map[str(spk_id)]["min"] = f0_min
if spk_pitch_stat["max"] < f0_max:
self.spk_pitch_map[str(spk_id)]["max"] = f0_max
else:
spk_pitch_stat = {}
spk_pitch_stat["max"] = f0_max
spk_pitch_stat["min"] = f0_min
self.spk_pitch_map[str(spk_id)] = spk_pitch_stat
def build_spk_pitch_map(self):
spk_pitch_map = {}
stat_map_dir = f"{self.binary_data_dir}/spk_pitch_map.json"
if os.path.exists(stat_map_dir):
spk_pitch_map = json.load(open(stat_map_dir, "r"))
spk_pitch_map.update(self.spk_pitch_map)
spk_pitch_map = {key: value for key, value in sorted(spk_pitch_map.items(), key=lambda x: int(x[0]))}
print("| Statistics of speaker's pitch is saved.")
json.dump(spk_pitch_map, open(stat_map_dir, "w"), ensure_ascii=False)
@staticmethod
def get_spk_embed(wav, ctx):
return ctx["voice_encoder"].embed_utterance(wav.astype(float))
@property
def num_workers(self):
return int(os.getenv("N_PROC", hparams.get("N_PROC", os.cpu_count())))
| jisang93/VISinger | preprocessor/base_binarizer.py | base_binarizer.py | py | 16,910 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "numpy.seterr",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "utils.commons.hparams.hparams",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "utils.commons.hparams.hparams",
"line_number": 34,
"usage_type": "name"
},
{
"api_name... |
6800272551 | import yaml
from populate.populator.common.errors import ConfigurationError
from .projects_manager import ProjectsManager
def project_constructor(loader, node):
if isinstance(node, yaml.ScalarNode):
item = loader.construct_scalar(node)
if not isinstance(item, str) or not item:
raise ConfigurationError(
'value {} cannot be interpreted as project'.format(item))
elif isinstance(node, yaml.MappingNode):
item = loader.construct_mapping(node)
if not isinstance(item, dict) or not item:
raise ConfigurationError(
'value {} cannot be interpreted as project'.format(item))
return ProjectsManager().get_object(item)
| tomasgarzon/exo-services | service-exo-projects/populator/projects/project_loader.py | project_loader.py | py | 715 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "yaml.ScalarNode",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "populate.populator.common.errors.ConfigurationError",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "yaml.MappingNode",
"line_number": 15,
"usage_type": "attribute"
... |
36837923429 | from __future__ import annotations
from dataclasses import dataclass
import bson.json_util as json
__all__ = ['Node', 'build_execution_tree']
@dataclass
class Node:
"""Represent SBE tree node."""
stage: str
plan_node_id: int
total_execution_time: int
n_returned: int
n_processed: int
children: list[Node]
def get_execution_time(self):
"""Execution time of the SBE node without execuion time of its children."""
return self.total_execution_time - sum(n.total_execution_time for n in self.children)
def print(self, level=0):
"""Pretty print of the SBE tree."""
print(
f'{"| "*level}{self.stage}, plaNodeId: {self.plan_node_id}, totalExecutionTime: {self.total_execution_time:,}, nReturned: {self.n_returned}, nProcessed: {self.n_processed}'
)
for child in self.children:
child.print(level + 1)
def build_execution_tree(execution_stats: dict[str, any]) -> Node:
"""Build SBE executioon tree from 'executionStats' field of query explain."""
assert execution_stats['executionSuccess']
return process_stage(execution_stats['executionStages'])
def process_stage(stage: dict[str, any]) -> Node:
"""Parse the given SBE stage."""
processors = {
'filter': process_filter,
'cfilter': process_filter,
'traverse': process_traverse,
'project': process_inner_node,
'limit': process_inner_node,
'scan': process_seek,
'coscan': process_leaf_node,
'nlj': process_nlj,
'hj': process_hash_join_node,
'mj': process_hash_join_node,
'seek': process_seek,
'ixseek': process_seek,
'limitskip': process_inner_node,
'group': process_inner_node,
'union': process_union_node,
'unique': process_unique_node,
'unwind': process_unwind_node,
}
processor = processors.get(stage['stage'])
if processor is None:
print(json.dumps(stage, indent=4))
raise ValueError(f'Unknown stage: {stage}')
return processor(stage)
def process_filter(stage: dict[str, any]) -> Node:
"""Process filter stage."""
input_stage = process_stage(stage['inputStage'])
return Node(**get_common_fields(stage), n_processed=stage['numTested'], children=[input_stage])
def process_traverse(stage: dict[str, any]) -> Node:
"""Process traverse, not used by Bonsai."""
outer_stage = process_stage(stage['outerStage'])
inner_stage = process_stage(stage['innerStage'])
return Node(**get_common_fields(stage), n_processed=stage['nReturned'],
children=[outer_stage, inner_stage])
def process_hash_join_node(stage: dict[str, any]) -> Node:
"""Process hj node."""
outer_stage = process_stage(stage['outerStage'])
inner_stage = process_stage(stage['innerStage'])
n_processed = outer_stage.n_returned + inner_stage.n_returned
return Node(**get_common_fields(stage), n_processed=n_processed,
children=[outer_stage, inner_stage])
def process_nlj(stage: dict[str, any]) -> Node:
"""Process nlj stage."""
outer_stage = process_stage(stage['outerStage'])
inner_stage = process_stage(stage['innerStage'])
n_processed = stage['totalDocsExamined']
return Node(**get_common_fields(stage), n_processed=n_processed,
children=[outer_stage, inner_stage])
def process_inner_node(stage: dict[str, any]) -> Node:
"""Process SBE stage with one input stage."""
input_stage = process_stage(stage['inputStage'])
return Node(**get_common_fields(stage), n_processed=input_stage.n_returned,
children=[input_stage])
def process_leaf_node(stage: dict[str, any]) -> Node:
"""Process SBE stage without input stages."""
return Node(**get_common_fields(stage), n_processed=stage['nReturned'], children=[])
def process_seek(stage: dict[str, any]) -> Node:
"""Process seek stage."""
return Node(**get_common_fields(stage), n_processed=stage['numReads'], children=[])
def process_union_node(stage: dict[str, any]) -> Node:
"""Process union stage."""
children = [process_stage(child) for child in stage['inputStages']]
return Node(**get_common_fields(stage), n_processed=stage['nReturned'], children=children)
def process_unwind_node(stage: dict[str, any]) -> Node:
"""Process unwind stage."""
input_stage = process_stage(stage['inputStage'])
return Node(**get_common_fields(stage), n_processed=input_stage.n_returned,
children=[input_stage])
def process_unique_node(stage: dict[str, any]) -> Node:
"""Process unique stage."""
input_stage = process_stage(stage['inputStage'])
n_processed = stage['dupsTested']
return Node(**get_common_fields(stage), n_processed=n_processed, children=[input_stage])
def get_common_fields(json_stage: dict[str, any]) -> dict[str, any]:
"""Exctract common field from json representation of SBE stage."""
return {
'stage': json_stage['stage'], 'plan_node_id': json_stage['planNodeId'],
'total_execution_time': json_stage['executionTimeNanos'],
'n_returned': json_stage['nReturned']
}
| mongodb/mongo | buildscripts/cost_model/execution_tree.py | execution_tree.py | py | 5,185 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "bson.json_util.dumps",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "bson.json_util",
"line_number": 62,
"usage_type": "name"
}
] |
830016519 | #!/usr/bin/env python
import gzip
import sys
import tarfile
import threading
import urllib.request
import zipfile
import lib.download.task as task
import lib.independence.fs as fs
import lib.ui.color as printer
# The greater purpose of (functions in) this file is
# to download a list of DownloadTasks
class Downloader(object):
'''Object to facilitate downloading'''
# Constructor
# tasklist is the list of DownloadTasks which should be downloaded
def __init__(self, tasklist):
self.tasklist = tasklist
# Returns True if the name suggests it is a tar archive, otherwise False
def is_tar(self, name):
return name.endswith('.tar') or name.endswith('tar.xz')\
or name.endswith('tar.gz') or name.endswith('.tgz')
# Returns True if the name suggests it is a zip archive, otherwise False
def is_zip(self, name):
return name.endswith('.zip')
# Returns True if the name suggests it is a gzip archive, otherwise False
def is_gzip(self, name):
return name.endswith('.gz')
# Extract output of downloaded file, if it has a compatible format
# task is the task for which the file is downloaded
def extract(self, task):
target = fs.join(task.directory,task.name)
basicstring = printer.format('extracter', printer.Color.CAN)
extractstring = printer.format('extracting', printer.Color.YEL)
print('[{0}] {1} {2}'.format(basicstring, extractstring, task.name))
if self.is_tar(task.url):
ttar = tarfile.open(target, 'r')
ttar.extractall(path=task.directory)
elif self.is_zip(task.url):
tzip = zipfile.ZipFile(target, 'r')
tzip.extractall(task.directory)
tzip.close()
elif self.is_gzip(task.url):
with gzip.open(target, 'rb') as f_in:
with open(task.directory, 'wb') as f_out:
fs.cp(f_in, f_out)
else:
return
finishedstring = printer.format('extracted', printer.Color.GRN)
print('[{0}] {1} {2}'.format(basicstring, finishedstring, task.name))
fs.rm(fs.join(task.directory, task.name))
dircontents = fs.ls(task.directory)
if len(dircontents) == 1 and fs.isdir(task.directory,dircontents[0]):
subdircontents = fs.ls(task.directory,dircontents[0])
for file in subdircontents:
path = fs.join(task.directory,dircontents[0])
fs.mv(fs.join(path,file), task.directory)
fs.rm(task.directory, dircontents[0], ignore_errors=True)
# Downloads a DownloadTask and prints some user information
# task is the downloadtask which contains the download information
def download(self, task):
basicstring = printer.format('downloader', printer.Color.CAN)
downloadstring = printer.format('downloading', printer.Color.YEL)
print('[{0}] {1} {2}'.format(basicstring, downloadstring, task.name))
u = urllib.request.urlopen(task.url)
with open(fs.join(task.directory,task.name), 'wb') as out_file:
out_file.write(u.read())
finishedstring = printer.format('downloaded', printer.Color.GRN)
print('[{0}] {1} {2}'.format(basicstring, finishedstring, task.name))
# Parallel function, which is called to download all tasks
# task is the downloadtask which should be operated on
def parallel_exec(self, task):
self.download(task)
if self.is_tar(task.url)\
or self.is_zip(task.url)\
or self.is_gzip(task.url):
self.extract(task)
# Main function to call. Each DownloadTask will be performed
def download_all(self):
threads = []
for task in self.tasklist:
threads.append(threading.Thread(target=self.parallel_exec, args=(task,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join() | Sebastiaan-Alvarez-Rodriguez/Meizodon | lib/download/downloader.py | downloader.py | py | 3,963 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "lib.independence.fs.join",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "lib.independence.fs",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "lib.download.task.directory",
"line_number": 41,
"usage_type": "attribute"
},
{
"api... |
10496867860 | import tensorflow as tf
from model.model_builder import ModelBuilder
from utils.model_post_processing import merge_post_process
from tensorflow.keras.models import Model
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from utils.priors import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--backbone_name", type=str, help="Pretrained backbone name\
| model_name : description | \
[ 1. mobilenetv2 : MobileNetV2 ]\
[ 2. mobilenetv3s : MobileNetV3-Small ] \
[ 3. mobilenetv3l : MobileNetV3-Large ] \
[ 4. efficient_lite_v0 : EfficientNet-Lite-B0 ]\
[ 5. efficientnetv2b0 : EfficientNet-V2-B0 ]\
[ 6. efficientnetv2b3 : EfficientNet-V2-B3 ]",
default='efficient_lite_v0')
parser.add_argument("--checkpoint_dir", type=str, help="Set the model storage directory",
default='./checkpoints/')
parser.add_argument("--model_weights", type=str, help="Saved model weights directory",
default='0906/_0906_efficient_lite_v0_display-detection_e200_lr0.001_b32_without-norm-small_prior-adam_best_loss.h5')
parser.add_argument("--num_classes", type=int, help="Set num classes for model and post-processing",
default=4)
parser.add_argument("--image_size", type=tuple, help="Set image size for priors and post-processing",
default=(300, 300))
parser.add_argument("--gpu_num", type=int, help="Set GPU number to use(When without distribute training)",
default=0)
parser.add_argument("--frozen_dir", type=str, help="Path to save frozen graph transformation result",
default='./checkpoints/converted_frozen_graph/')
parser.add_argument("--frozen_name", type=str, help="Frozen graph file name to save",
default='frozen_graph')
parser.add_argument("--include_postprocess", help="Frozen graph file name to save",
action='store_true')
parser.add_argument("--load_keras_model", help="Load model from Saved format(.pb) or Keras(.h5)",
action='store_true')
args = parser.parse_args()
if __name__ == '__main__':
tf.config.set_soft_device_placement(True)
tf.config.run_functions_eagerly(True)
gpu_number = '/device:GPU:' + str(args.gpu_num)
with tf.device(gpu_number):
spec_list = convert_spec_list()
priors = create_priors_boxes(specs=spec_list, image_size=args.image_size[0], clamp=True)
target_transform = MatchingPriors(priors, center_variance, size_variance, iou_threshold)
if args.load_keras_model:
model = tf.keras.models.load_model('./checkpoints/pruning', compile=False)
else:
model = ModelBuilder(image_size=args.image_size,
num_classes=args.num_classes,
include_preprocessing=args.include_postprocess).build_model(args.backbone_name)
model.load_weights(args.checkpoint_dir + args.model_weights, by_name=True)
model.summary()
if args.include_postprocess:
detection_output = merge_post_process(detections=model.output,
target_transform=target_transform,
confidence_threshold=0.5,
classes=args.num_classes)
model = Model(inputs=model.input, outputs=detection_output)
# model.summary()
#path of the directory where you want to save your model
frozen_out_path = args.frozen_dir
# name of the .pb file
frozen_graph_filename = args.frozen_name
# Convert Keras model to ConcreteFunction
full_model = tf.function(lambda x: model(x))
full_model = full_model.get_concrete_function(
tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))
# Get frozen ConcreteFunction
frozen_func = convert_variables_to_constants_v2(full_model)
frozen_func.graph.as_graph_def()
layers = [op.name for op in frozen_func.graph.get_operations()]
print("Frozen model layers: ")
# for layer in layers:
# print(layer)
print("Frozen model inputs: {0}".format(frozen_func.inputs))
print("Frozen model outputs: {0}".format(frozen_func.outputs))
# Save frozen graph to disk
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=frozen_out_path,
name=f"{frozen_graph_filename}.pb",
as_text=False)
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=frozen_out_path,
name=f"{frozen_graph_filename}.pbtxt",
as_text=True) | chansoopark98/Tensorflow-Keras-Object-Detection | convert_frozen_graph.py | convert_frozen_graph.py | py | 5,426 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tensorflow.config.set_soft_device_placement",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 41,
"usage_type": "attribute"
},
... |
23716751711 | import json
from PIL import Image
import os
def main(json_file_path):
json_file = open (json_file_path)
json_string = json_file.read()
json_data = json.loads(json_string)
image = json_data[0]
#for image in json_data:
image_file_path = image['image_path']
image_to_crop = Image.open(image_file_path)
cropped_images = []
image_rects = image['rects']
for r in image_rects:
score = r['score']
if score > 0.0:
diff_x = r['x2']-r['x1']
diff_y = r['y2']-r['y1']
if diff_x < diff_y:
wider_x = diff_y-diff_x + 10
wider_y = 10
else:
wider_y = diff_x - diff_y + 10
wider_x = 10
new_image = image_to_crop.crop((r['x1']-wider_x/2, r['y1']-wider_y/2, r['x2']+wider_x/2, r['y2']+wider_y/2))
#new_image = transform_image_to_square(new_image) #transform the image to square
cropped_images.append(new_image)
return cropped_images
def transform_image_to_square(img):
longer_side = max(img.size)
horizontal_padding = (longer_side - img.size[0]) / 2
vertical_padding = (longer_side - img.size[1]) / 2
square_img = img.crop(
(
-horizontal_padding,
-vertical_padding,
img.size[0] + horizontal_padding,
img.size[1] + vertical_padding
)
)
return square_img | Larbohell/datasyn | crop_image.py | crop_image.py | py | 1,446 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 14,
"usage_type": "name"
}
] |
22347170498 | import os
from logging import (
CRITICAL,
DEBUG,
ERROR,
getLogger,
INFO,
Logger,
WARNING,
)
from pathlib import Path
from rich.logging import RichHandler
from rich.highlighter import NullHighlighter
from .config import BodyworkConfig
from .constants import (
DEFAULT_LOG_LEVEL,
DEFAULT_LOG_LEVEL_ENV_VAR,
DEFAULT_PROJECT_DIR,
LOG_TIME_FORMAT,
PROJECT_CONFIG_FILENAME,
)
from .cli.terminal import console
from .exceptions import BodyworkConfigError
def bodywork_log_factory(
log_level: str = None,
config_file_path: Path = DEFAULT_PROJECT_DIR / PROJECT_CONFIG_FILENAME,
) -> Logger:
"""Create a standardised Bodywork logger.
If a log level is specified as an argument, then it will take
precedence overall all other methods of setting the log-level. Next
in the waterfall of priority is the log-level set in the project
config file, and then after that the level set by the
BODYWORK_LOG_LEVEL environment variable. Failing that, the default
log level (INFO) will be used.
:param log_level: The minimum severity level of messages to log,
defaults to None.
:param config_file_path: Path to project config file, defaults
DEFAULT_PROJECT_DIR/PROJECT_CONFIG_FILENAME.
"""
log_level_mapping = {
"DEBUG": DEBUG,
"INFO": INFO,
"WARNING": WARNING,
"ERROR": ERROR,
"CRITICAL": CRITICAL,
}
log = getLogger("bodywork")
if log_level is not None:
log.setLevel(log_level_mapping[log_level])
else:
try:
bodywork_config = BodyworkConfig(config_file_path)
log.setLevel(bodywork_config.logging.log_level)
except BodyworkConfigError:
try:
log_level_from_env_var = os.environ[DEFAULT_LOG_LEVEL_ENV_VAR]
log.setLevel(log_level_mapping[log_level_from_env_var])
except KeyError:
log.setLevel(log_level_mapping[DEFAULT_LOG_LEVEL])
if not log.hasHandlers():
log_handler = RichHandler(
console=console,
show_path=False,
highlighter=NullHighlighter(),
omit_repeated_times=False,
rich_tracebacks=True,
log_time_format=LOG_TIME_FORMAT,
)
log.addHandler(log_handler)
return log
| bodywork-ml/bodywork-core | src/bodywork/logs.py | logs.py | py | 2,348 | python | en | code | 430 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "constants.DEFAULT_PROJECT_DIR",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "constants.PROJECT_CONFIG_FILENAME",
"line_number": 30,
"usage_type": "name"
},
{
"api_... |
18932441390 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author : MG
@Time : 19-4-3 下午5:28
@File : __init__.py.py
@contact : mmmaaaggg@163.com
@desc :
"""
import logging
from logging.config import dictConfig
# log settings
logging_config = dict(
version=1,
formatters={
'simple': {
'format': '%(asctime)s %(levelname)s %(name)s %(filename)s.%(funcName)s:%(lineno)d|%(message)s'}
},
handlers={
'file_handler':
{
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logger.log',
'maxBytes': 1024 * 1024 * 10,
'backupCount': 5,
'level': 'DEBUG',
'formatter': 'simple',
'encoding': 'utf8'
},
'console_handler':
{
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'simple'
}
},
root={
'handlers': ['console_handler', 'file_handler'],
'level': logging.DEBUG,
}
)
# logging.getLogger('sqlalchemy.engine').setLevel(logging.WARN)
# logging.getLogger('urllib3.connectionpool').setLevel(logging.INFO)
dictConfig(logging_config)
if __name__ == "__main__":
pass
| IBATS/IBATS_Utils | ibats_utils/__init__.py | __init__.py | py | 1,277 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.DEBUG",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "logging.config.dictConfig",
"line_number": 46,
"usage_type": "call"
}
] |
4833395496 | import os
import torch
import genova
from torch import nn, optim
import torch.distributed as dist
from torch.utils.data import DataLoader
from torch.cuda.amp import autocast, GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from .optimal_path_inference import optimal_path_infer
from .seq_generation_inference import seq_generation_infer
class Task:
def __init__(self, cfg, serialized_model_path, distributed=True):
self.cfg = cfg
self.distributed = distributed
self.serialized_model_path = serialized_model_path
if cfg.mode == 'train':
if self.distributed:
dist.init_process_group(backend='nccl')
self.local_rank = int(os.environ["LOCAL_RANK"])
self.device = torch.device("cuda", self.local_rank)
torch.cuda.set_device(self.local_rank)
else: self.device = torch.device('cuda')
else:
if isinstance(cfg.infer.device, int):
torch.cuda.set_device(cfg.infer.device)
self.device = torch.device('cuda:'+str(cfg.infer.device))
else:
self.device = torch.device('cpu')
def initialize(self, *, train_spec_header,train_dataset_dir,val_spec_header,val_dataset_dir):
self.model = genova.models.Genova(self.cfg).to(self.device)
if self.cfg.task == 'optimal_path':
self.train_loss_fn = nn.KLDivLoss(reduction='batchmean')
self.eval_loss_fn = nn.KLDivLoss(reduction='sum')
elif self.cfg.task == 'node_classification':
self.train_loss_fn = nn.BCEWithLogitsLoss()
self.eval_loss_fn = nn.BCEWithLogitsLoss(reduction='sum')
elif self.cfg.task == 'sequence_generation':
self.train_loss_fn = nn.CrossEntropyLoss()
self.eval_loss_fn = nn.CrossEntropyLoss(reduction='sum')
else:
raise NotImplementedError
assert self.distributed==dist.is_initialized()
if self.distributed: self.model = DDP(self.model, device_ids=[self.local_rank], output_device=self.local_rank)
self.optimizer = optim.AdamW(self.model.parameters(), lr=self.cfg.train.lr)
self.scaler = GradScaler()
self.persistent_file_name = os.path.join(self.serialized_model_path,self.cfg.wandb.project+'_'+self.cfg.wandb.name+'.pt')
if os.path.exists(self.persistent_file_name):
checkpoint = torch.load(self.persistent_file_name)
if self.distributed: self.model.module.load_state_dict(checkpoint['model_state_dict'])
else: self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.train_dl = self.train_loader(train_spec_header,train_dataset_dir)
self.eval_dl = self.eval_loader(val_spec_header,val_dataset_dir)
def test_initialize(self, *, test_spec_header=None, test_dataset_dir=None):
assert not self.distributed
self.model = genova.models.Genova(self.cfg).to(self.device)
self.persistent_file_name = os.path.join(self.serialized_model_path,self.cfg.wandb.project+'_'+self.cfg.wandb.name+'.pt')
print('checkpoint: ', self.persistent_file_name)
assert os.path.exists(self.persistent_file_name)
if isinstance(self.cfg.infer.device, int):
checkpoint = torch.load(self.persistent_file_name)
else:
checkpoint = torch.load(self.persistent_file_name,map_location='cpu')
self.model.load_state_dict(checkpoint['model_state_dict'])
self.model.eval()
self.test_dl = self.test_loader(test_spec_header,test_dataset_dir)
self.test_spec_header = test_spec_header
def train_loader(self,train_spec_header,train_dataset_dir):
ds = genova.data.GenovaDataset(self.cfg,spec_header=train_spec_header,dataset_dir_path=train_dataset_dir)
sampler = genova.data.GenovaBatchSampler(self.cfg,self.device,0.95,train_spec_header,[0,128,256,512],self.model)
collate_fn = genova.data.GenovaCollator(self.cfg)
if self.distributed:
train_dl = DataLoader(ds,batch_sampler=sampler,collate_fn=collate_fn,pin_memory=True,num_workers=10)
else:
train_dl = DataLoader(ds,batch_sampler=sampler,collate_fn=collate_fn,pin_memory=True)
train_dl = genova.data.DataPrefetcher(train_dl,self.device)
return train_dl
def eval_loader(self,val_spec_header,val_dataset_dir):
ds = genova.data.GenovaDataset(self.cfg,spec_header=val_spec_header,dataset_dir_path=val_dataset_dir)
sampler = genova.data.GenovaBatchSampler(self.cfg,self.device,2,val_spec_header,[0,128,256,512],self.model)
collate_fn = genova.data.GenovaCollator(self.cfg)
if self.distributed:
eval_dl = DataLoader(ds,batch_sampler=sampler,collate_fn=collate_fn,pin_memory=True,num_workers=5)
else:
eval_dl = DataLoader(ds,batch_sampler=sampler,collate_fn=collate_fn,pin_memory=True)
eval_dl = genova.data.DataPrefetcher(eval_dl,self.device)
return eval_dl
def test_loader(self,test_spec_header,test_dataset_dir):
ds = genova.data.GenovaDataset(self.cfg,spec_header=test_spec_header,dataset_dir_path=test_dataset_dir)
sampler = genova.data.GenovaSequentialSampler(test_spec_header)
collate_fn = genova.data.GenovaCollator(self.cfg)
test_dl = DataLoader(ds,batch_sampler=sampler,collate_fn=collate_fn,pin_memory=True)
if isinstance(self.cfg.infer.device, int):
test_dl = genova.data.DataPrefetcher(test_dl,self.device)
return test_dl
def model_save(self):
if self.distributed:
torch.save({'model_state_dict':self.model.module.state_dict(),
'optimizer_state_dict':self.optimizer.state_dict()},self.persistent_file_name)
else:
torch.save({'model_state_dict':self.model.state_dict(),
'optimizer_state_dict':self.optimizer.state_dict()},self.persistent_file_name)
def train(self):
total_step = 0
loss_cum = 0
if self.cfg.task =='node_classification':
for epoch in range(0, self.cfg.train.total_epoch):
for encoder_input, label, label_mask in self.train_dl:
total_step += 1
if total_step%self.cfg.train.detect_period == 1: loss_cum = 0
self.optimizer.zero_grad()
with autocast():
output = self.model(encoder_input=encoder_input).squeeze(-1)
loss = self.train_loss_fn(output[label_mask],label[label_mask])
loss_cum += loss.item()
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
if total_step%self.cfg.train.detect_period == 0: yield loss_cum/self.cfg.train.detect_period, total_step, epoch
else:
for epoch in range(0, self.cfg.train.total_epoch):
for encoder_input, decoder_input, tgt, label, label_mask, _ in self.train_dl:
total_step += 1
if total_step%self.cfg.train.detect_period == 1: loss_cum = 0
self.optimizer.zero_grad()
with autocast():
output = self.model(encoder_input=encoder_input, decoder_input=decoder_input, tgt=tgt)
if self.cfg.task == 'optimal_path': output = output.log_softmax(-1)
loss = self.train_loss_fn(output[label_mask],label[label_mask])
loss_cum += loss.item()
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
if total_step%self.cfg.train.detect_period == 0: yield loss_cum/self.cfg.train.detect_period, total_step, epoch
def eval(self) -> float:
loss_cum = torch.Tensor([0]).to(self.device)
total_seq_len = torch.Tensor([0]).to(self.device)
if self.cfg.task =='node_classification':
total_match = torch.Tensor([0]).to(self.device)
true_positive = torch.Tensor([0]).to(self.device)
total_positive = torch.Tensor([0]).to(self.device)
total_true = torch.Tensor([0]).to(self.device)
for encoder_input, label, label_mask in self.eval_dl:
with torch.no_grad():
with autocast():
output = self.model(encoder_input=encoder_input)
output = output[label_mask].squeeze(-1)
label = label[label_mask]
loss = self.eval_loss_fn(output,label)
output = (output>0.5).float()
loss_cum += loss
total_seq_len += label_mask.sum()
total_match += (output == label).sum()
true_positive += ((output == label)[label == 1]).sum()
total_positive += (label == 1).sum()
total_true += (output == 1).sum()
if self.distributed:
dist.barrier()
dist.all_reduce(loss_cum)
dist.all_reduce(total_seq_len)
dist.all_reduce(total_match)
dist.all_reduce(true_positive)
dist.all_reduce(total_positive)
dist.all_reduce(total_true)
return (loss_cum/total_seq_len).item(), \
(total_match/total_seq_len).item(), \
(true_positive/total_positive).item(), \
(true_positive/total_true).item()
else:
for encoder_input, decoder_input, tgt, label, label_mask, _ in self.eval_dl:
with torch.no_grad():
with autocast():
output = self.model(encoder_input=encoder_input, decoder_input=decoder_input, tgt=tgt)
if self.cfg.task == 'optimal_path': output = output.log_softmax(-1)
loss = self.eval_loss_fn(output[label_mask],label[label_mask])
loss_cum += loss
total_seq_len += label_mask.sum()
if self.distributed:
dist.barrier()
dist.all_reduce(loss_cum)
dist.all_reduce(total_seq_len)
return (loss_cum/total_seq_len).item()
def inference(self) -> float:
if self.cfg.task == 'optimal_path':
optimal_path_infer(self.cfg, self.test_spec_header, self.test_dl, self.model, self.device)
elif self.cfg.task == 'sequence_generation':
seq_generation_infer(self.cfg, self.test_spec_header, self.test_dl, self.model, self.device)
| AmadeusloveIris/GraphNovo | genova/task/task.py | task.py | py | 10,916 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "torch.distributed.init_process_group",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.distributed",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name":... |
71300190183 | import time
import tqdm
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
from utils import *
def prepare_sequence(seq, word2idx):
idxs = [word2idx[w] for w in seq]
return torch.tensor(idxs, dtype=torch.long)
class BiLSTM_CRF_S(nn.Module):
def __init__(self, vocab_size, label2idx, embedding_dim, hidden_size, num_layers, dropout_ratio=0.3):
super(BiLSTM_CRF_S, self).__init__()
self.vocab_size = vocab_size
self.labelset_size = len(label2idx)
self.embedding_dim = embedding_dim
self.label2idx = label2idx
self.hidden_size = hidden_size
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
# 非BatchFirst在实际其实更方便...
self.LSTM = nn.LSTM(
input_size=embedding_dim,
hidden_size=hidden_size // 2,
num_layers=num_layers,
bidirectional=True,
# batch_first=True
)
# 把output转化为label
self.output2label = nn.Linear(hidden_size, self.labelset_size)
# 标签的转移得分
# transitons[i, j] 表示 从 i 转移到 j 的得分
self.transitions = nn.Parameter(
torch.randn(self.labelset_size, self.labelset_size, requires_grad=True))
# 不可能从STOP转移到其他标签,也不可能从其他标签转移到START
# 必须要加detach
self.transitions.detach()[label2idx[STOP_label], :] = -10000
self.transitions.detach()[:, label2idx[START_label]] = -10000
self.dropout1 = nn.Dropout(p=dropout_ratio)
self.dropout2 = nn.Dropout(p=dropout_ratio)
self.hidden = None
self.seq_length = None
self.batch_size = None
def init_uniform(self):
for ind in range(0, self.LSTM.num_layers):
weight = eval('self.LSTM.weight_ih_l' + str(ind))
bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
weight = eval('self.LSTM.weight_hh_l' + str(ind))
bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
if self.LSTM.bias:
for ind in range(0, self.LSTM.num_layers):
weight = eval('self.LSTM.bias_ih_l' + str(ind))
weight.data.zero_()
weight.data[self.LSTM.hidden_size: 2 * self.LSTM.hidden_size] = 1
weight = eval('self.LSTM.bias_hh_l' + str(ind))
weight.data.zero_()
weight.data[self.LSTM.hidden_size: 2 * self.LSTM.hidden_size] = 1
bias = np.sqrt(3.0 / self.embedding_dim)
nn.init.uniform_(self.embeddings.weight, -bias, bias)
def init_hidden(self):
r"""
初始化隐藏层参数
:param batch_size: batch_size
:return:
"""
return (torch.randn(2, self.batch_size, self.hidden_size // 2),
torch.randn(2, self.batch_size, self.hidden_size // 2))
def _get_scores(self, sentences):
'''
得到序列的特征
:param sentences: tensor [length, batch_size]
:return: feats tensor [length, batch_size, labelset_size]
'''
self.hidden = self.init_hidden()
# [length, batch_size] -> [length, batch_size, dim]
sentences_embeddings = self.embeddings(sentences)
sentences_embeddings = self.dropout1(sentences_embeddings)
# outputs [length, batch_size, hidden_size]
outputs, self.hidden = self.LSTM(sentences_embeddings, self.hidden)
outputs = self.dropout2(outputs)
# [length, batch_size, labelset_size]
feats = self.output2label(outputs)
return feats
def _forward_all_logsumexp(self, scores, masks):
r"""
计算所有可能路径的log_sum_exp
:param scores: tensor [length, batch_size, labelset_size]
LSTM传过来的emit score
:param masks: tensor [length, batch_size]
:return: terminal_score: tensor [batch_size]
"""
# 到当前单词,且状态为i的所有路径的log_sum_exp
dp = torch.full((self.labelset_size, self.batch_size), -10000.)
dp[self.label2idx[START_label]] = 0.
for i in range(self.seq_length):
# [batch_size, labelset_size]
score = scores[i]
# [batch_size] -> [batch_size, 1] -> [batch_size, labelset_size]
mask = masks[i].unsqueeze(dim=1).expand(self.batch_size, self.labelset_size)
# [labelset_size_from, batch_size, labelset_size_to]
tmp = dp.transpose(0, 1).unsqueeze(dim=2).expand(self.labelset_size, self.batch_size, self.labelset_size) + \
score.unsqueeze(dim=0).expand(self.labelset_size, self.batch_size, self.labelset_size) + \
self.transitions.unsqueeze(dim=1).expand(self.labelset_size, self.batch_size, self.labelset_size)
# [labelset_size_from, batch_size, labelset_size_to] -> [batch_size, labelset_size_to]
tmp = log_sum_exp(tmp, dim=0)
# mask为1的值更新,为0的不再更新
dp.masked_scatter_(mask, tmp.masked_select(mask))
# dp = dp + self.transitions[self.label2idx[STOP_label]]
dp = log_sum_exp(dp, dim=1)
return dp
def _get_gold_score(self, scores: torch.tensor, labels, masks):
'''
计算出所提供的正确路径得分数
:param scores: tensor [length, batch_size, labelset_size]
LSTM传过来的emit score
:param labels: tensor [length, batch_size]
每个序列正确的路径, 已经加了start
:param masks: tensor [length, batch_size]
:return:
scores: tensor [batch_size]
'''
dp = torch.zeros(self.batch_size)
st = torch.full([1, self.batch_size], self.label2idx[START_label], dtype=torch.long)
# [length + 1, batch_size]
labels = torch.cat(
[st, labels], dim=0
)
for i in range(self.seq_length):
# [batch_size, labelset_size]
score = scores[i]
# [batch_size]
mask = masks[i]
tmp = dp + self.transitions[labels[i], labels[i + 1]] + score[
range(self.batch_size), labels[i + 1]]
# mask为1的值更新为新的tmp值,为0的不再更新
dp.masked_scatter_(mask, tmp.masked_select(mask))
# label最后一个永远是pad....
# dp = dp + self.transitions[labels[-1], self.label2idx[STOP_label]]
# print(time.time() - st)
return dp
def neg_log_likelihood(self, sentences, labels, masks):
r"""
计算正确路径的负对数似然概率
:param sentences: tensor [length, batch_size]
:param labels: tensor [length, batch_size]
正确的label序列
:param masks:tensor [length, batch_size]
:return: FloatTensor
"""
self.set_batch_seq_size(sentences)
# [length, batch_size, labelset_size]
feats = self._get_scores(sentences)
forward_score = self._forward_all_logsumexp(feats, masks)
gold_score = self._get_gold_score(feats, labels, masks)
# print('forward_score: ', forward_score)
# print('gold_score :', gold_score)
return (forward_score - gold_score).sum() / self.batch_size
def _viterbi_decode(self, feats, masks):
r'''
使用维特比算法进行解码,找到最可能的序列结果
:param feats: tensor [length, batch_size, labelset_size]
LSTM传过来的特征向量
:param masks: tensor [length, batch_size]
:return: best_scores tensor [batch_size]
best_paths tensor [length, batch_size]
'''
# 记录每个节点由哪个父节点转移过来
parents = []
# 到当前单词,且状态为i的所有路径中log_sum_exp最大的值
dp = torch.full((self.labelset_size, self.batch_size), -10000.)
# START_label has all of the score.
dp[self.label2idx[START_label]] = 0.
for i in range(feats.shape[1]):
# [batch_size, labelset_size]
feat = feats[i]
# [batch_size] -> [batch_size, 1]
mask = masks[i].unsqueeze(dim=1)
# [labelset_size, batch_size, labelset_size]
# TODO: 搞清楚这些维数!!
tmp = dp.transpose(0, 1).unsqueeze(dim=2) + \
feat.unsqueeze(dim=0) + \
self.transitions.unsqueeze(dim=1)
max_scores, best_choose = tmp.max(dim=0)
# 添加路径信息,[batch_size, labelset_size]
parents.append(best_choose)
# 由于只保留了一条路径,可以省去log_sum_exp过程
dp.masked_scatter_(mask, max_scores.masked_select(mask))
# [batch_size, labelset_size]
# terminal_var = dp + self.transitions[:, self.label2idx[STOP_label]]
terminal_var = dp
# [batch_size]
best_scores, best_path_labels = terminal_var.max(dim=1)
best_paths = [best_path_labels]
for parent in reversed(parents):
best_path_labels = parent[range(parent.shape[0]), best_paths[-1]]
best_paths.append(best_path_labels)
best_paths.pop()
best_paths.reverse()
# 转化为 [batch_size, length]
best_paths = torch.stack(best_paths).transpose(0, 1)
return best_scores, best_paths
def set_batch_seq_size(self, sentence):
"""
set batch size and sequence length
"""
tmp = sentence.size()
self.batch_size = tmp[1]
self.seq_length = tmp[0]
def predict(self, sentences, masks):
r"""
预测数据的最可能序列以及得分
:param sentences: tensor [length, batch_size]
:return:
scores: tensor [batch_size]
paths: list [tensor: [batch_size]....] with length=length
"""
self.set_batch_seq_size(sentences)
# Get the emission scores from the BiLSTM
feats = self._get_scores(sentences)
# Find the best path, given the features.
scores, paths = self._viterbi_decode(feats, masks)
return scores, paths
class BiLSTM_CRF_L(nn.Module):
r"""
Large LSTM,直接使用nn.Linear(hidden_dim, self.labelset_size * self.labelset_size)
代替了转移矩阵,并且在制作数据集的时候采用label_i * labelset_size + label_(i + 1)
的方法,可以一次计算出gold score,在之后也不用每次加trans,大大提高了运行速度,但是
内存占用更大
"""
def __init__(self, vocab_size, label2idx, embedding_dim, hidden_dim, num_layers, dropout_ratio):
super(BiLSTM_CRF_L, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.LSTM = nn.LSTM(embedding_dim, hidden_dim // 2,
num_layers=num_layers, bidirectional=True)
self.num_layers = num_layers
self.dropout1 = nn.Dropout(p=dropout_ratio)
self.dropout2 = nn.Dropout(p=dropout_ratio)
self.labelset_size = len(label2idx)
self.label2idx = label2idx
self.start_tag = label2idx[START_label]
self.end_tag = label2idx[PAD_label]
self.batch_size = 1
self.seq_length = 1
self.hidden2tag = nn.Linear(hidden_dim, self.labelset_size * self.labelset_size)
def init_uniform(self):
# LSTM
r"""
线性初始化网络
:return:
"""
for ind in range(0, self.LSTM.num_layers):
weight = eval('self.LSTM.weight_ih_l' + str(ind))
bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
weight = eval('self.LSTM.weight_hh_l' + str(ind))
bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))
nn.init.uniform_(weight, -bias, bias)
if self.LSTM.bias:
for ind in range(0, self.LSTM.num_layers):
weight = eval('self.LSTM.bias_ih_l' + str(ind))
weight.data.zero_()
weight.data[self.LSTM.hidden_size: 2 * self.LSTM.hidden_size] = 1
weight = eval('self.LSTM.bias_hh_l' + str(ind))
weight.data.zero_()
weight.data[self.LSTM.hidden_size: 2 * self.LSTM.hidden_size] = 1
# embedding
# nn.Embeddig.weight默认初始化方式就是N(0, 1)分布
bias = np.sqrt(3.0 / self.embedding_dim)
nn.init.uniform_(self.embeddings.weight, -bias, bias)
# Linear
bias = np.sqrt(6.0 / (self.hidden2tag.weight.size(0) +
self.hidden2tag.weight.size(1)))
nn.init.uniform_(self.hidden2tag.weight, -bias, bias)
def rand_init_hidden(self):
"""
随机初始化hidden
"""
return torch.Tensor(
torch.randn(2 * self.num_layers, self.batch_size, self.hidden_dim // 2)), torch.Tensor(
torch.randn(2 * self.num_layers, self.batch_size, self.hidden_dim // 2))
def set_batch_seq_size(self, sentence):
"""
:param sentence [length, batch_size]
设置batch_size,seq_length
"""
tmp = sentence.size()
self.seq_length = tmp[0]
self.batch_size = tmp[1]
def load_pretrained_embedding(self, pre_embeddings):
"""
加载预训练embedding
"""
assert (pre_embeddings.size()[1] == self.embedding_dim)
self.embeddings.weight = nn.Parameter(pre_embeddings)
def _get_gold_score(self, scores, targets, masks):
r"""
计算正确路径得分
:param scores: [length, batch_size, labelset_size, labelset_size]
:param targets: [length, batch_size]
:param masks: [length, batch_size]
:return: gold_score tensor
"""
# [length, batch_size] -> [length, batch_size, 1]
targets = targets.unsqueeze(dim=2)
gold_score = torch.gather(scores.view(
self.seq_length, self.batch_size, -1), 2, targets).view(self.seq_length,
self.batch_size) # seq_len * batch_size
gold_score = gold_score.masked_select(masks).sum()
return gold_score
def _get_all_logsumexp(self, scores, masks):
r"""
计算所有路径的得分之和
:param scores: [length, batch_size, labelset_size, labelset_size]
:param masks: [length, batch_size]
:return:
"""
seq_iter = enumerate(scores)
# [batch_size, labelset_size_from, labelset_size_to]
_, inivalues = seq_iter.__next__()
# [batch_size, labelset_size_to], ps: 不加clone会报错
# 到当前单词,且状态为i的所有路径的log_sum_exp
dp = inivalues[:, self.start_tag, :].clone()
# 从正式的第一个label开始迭代
for idx, cur_values in seq_iter:
# [batch_size] -> [batch_size, labelset_size]
mask = masks[idx].view(self.batch_size, 1).expand(self.batch_size, self.labelset_size)
# cur_values: [batch_size, labelset_size_from, labelset_size_to]
cur_values = cur_values + dp.contiguous().view(self.batch_size, self.labelset_size,
1).expand(self.batch_size, self.labelset_size,
self.labelset_size)
# [batch_size, from_target, to_target] -> [batch_size, to_target]
tmp = log_sum_exp(cur_values, dim=1)
# 0保留自身值,1采用新的source值
dp.masked_scatter_(mask, tmp.masked_select(mask))
dp = dp[:, self.end_tag].sum()
return dp
def neg_log_likelihood(self, sentences, targets, masks, hidden=None):
r"""
计算损失函数
:param sentences: [length, batch_size]
:param targets: [length, batch_size]
:param masks: [length, batch_size]
:param hidden:
:return:
"""
# [length, batch_size, labelset_size, labelset_size]
crf_scores = self.forward(sentences)
gold_score = self._get_gold_score(crf_scores, targets, masks)
forward_score = self._get_all_logsumexp(crf_scores, masks)
loss = (forward_score - gold_score) / self.batch_size
# print(loss)
return loss
def _viterbi_decode(self, crf_scores, masks):
r'''
使用维特比算法进行解码,找到最可能的序列结果
:param crf_scores: tensor [length, batch_size, labelset_size, labelset_size]
LSTM传过来的emit score + trans score
:param masks: tensor [length, batch_size]
:return: scores tensor [batch_size]
paths [batch_size, seq_length - 1]
'''
# 方便后面的mask fill
masks = ~masks
path = torch.LongTensor(self.seq_length - 1, self.batch_size)
seq_iter = enumerate(crf_scores)
# [batch_size, from_labelset_size, to_labelset_size]
_, inivalues = seq_iter.__next__()
# 只保留start的初始得分, [batch_size, to_labelset_size]
forscores = inivalues[:, self.start_tag, :].clone()
parents = []
# 从正式的第一个label开始迭代
for idx, cur_values in seq_iter:
# [batch_size] -> [batch_size, labelset_size]
mask = masks[idx].view(self.batch_size, 1).expand(self.batch_size, self.labelset_size)
# cur_values: [batch_size, from_target, to_target]
cur_values = cur_values + forscores.contiguous().view(self.batch_size, self.labelset_size,
1).expand(self.batch_size, self.labelset_size,
self.labelset_size)
forscores, cur_parent = torch.max(cur_values, 1)
# [batch_size, to_target], mask是1是直接pad
cur_parent.masked_fill_(mask, self.end_tag)
parents.append(cur_parent)
pointer = parents[-1][:, self.end_tag]
path[-1] = pointer
for idx in range(len(parents) - 2, -1, -1):
back_point = parents[idx]
index = pointer.contiguous().view(-1, 1)
pointer = torch.gather(back_point, 1, index).view(-1)
path[idx] = pointer
return forscores, path.transpose(0, 1)
def predict(self, sentences, masks, hidden=None):
r"""
进行预测,计算得分和最优路径
:param sentences: [length, batch_size]
:param masks: [length, batch_size]
:return:
"""
self.eval()
crf_scores = self.forward(sentences)
scores, path = self._viterbi_decode(crf_scores, masks)
return scores, path
def forward(self, sentences, hidden=None):
r"""
计算crf_scores
:param sentences: [length, batch_size]
:param hidden: LSTM的初始隐藏层
:return: crf_scores [length, batch_size, labelset_size_from, labelset_size_to]
crf_scores[0, 0, 1, 10]: 第一个句的第一个单词 从label_1 -> label_10的emit_score + trans_score
"""
self.set_batch_seq_size(sentences)
embeds = self.embeddings(sentences)
d_embeds = self.dropout1(embeds)
# [length, batch_size, hidden_size]
lstm_out, hidden = self.LSTM(d_embeds, hidden)
lstm_out = lstm_out.view(-1, self.hidden_dim)
d_lstm_out = self.dropout2(lstm_out)
crf_scores = self.hidden2tag(d_lstm_out).view(-1, self.labelset_size, self.labelset_size)
crf_scores = crf_scores.view(self.seq_length, self.batch_size, self.labelset_size, self.labelset_size)
return crf_scores
| YaooXu/Chinese_seg_ner_pos | BiLSTM_CRF.py | BiLSTM_CRF.py | py | 20,207 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "torch.tensor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"lin... |
8755135805 | # -*- coding: utf-8 -*-
import logging
from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class DeliveryCarrier(models.Model):
_inherit = 'delivery.carrier'
of_use_sale = fields.Boolean(string=u"Utilisable à la vente")
of_use_purchase = fields.Boolean(string=u"Utilisable à l'achat")
of_supplier_ids = fields.Many2many(
comodel_name='res.partner', string="Fournisseurs", domain="[('supplier','=',True)]")
@api.one
def get_price(self):
super(DeliveryCarrier, self).get_price()
if not self.price:
PurchaseOrder = self.env['purchase.order']
purchase_id = self.env.context.get('purchase_id', False)
if purchase_id:
purchase = PurchaseOrder.browse(purchase_id)
if self.delivery_type not in ['fixed', 'base_on_rule']:
computed_price = 0.0
else:
carrier = self.verify_carrier(purchase.partner_id)
if carrier:
try:
computed_price = carrier.get_price_available_purchase(purchase)
self.available = True
except UserError as e:
# No suitable delivery method found, probably configuration error
_logger.info("Carrier %s: %s", carrier.name, e.name)
computed_price = 0.0
else:
computed_price = 0.0
self.price = computed_price * (1.0 + (float(self.margin) / 100.0))
@api.multi
def get_price_available_purchase(self, purchase):
self.ensure_one()
weight = volume = quantity = 0
total_delivery = 0.0
for line in purchase.order_line:
if line.state == 'cancel':
continue
if line.of_is_delivery:
total_delivery += line.price_total
if not line.product_id or line.of_is_delivery:
continue
qty = line.product_uom._compute_quantity(line.product_qty, line.product_id.uom_id)
weight += (line.product_id.weight or 0.0) * qty
volume += (line.product_id.volume or 0.0) * qty
quantity += qty
total = (purchase.amount_total or 0.0) - total_delivery
total = purchase.currency_id.with_context(date=purchase.date_order).compute(total, purchase.company_id.currency_id)
return self.get_price_from_picking(total, weight, volume, quantity)
def get_difference_from_picking(self, total, weight, volume, quantity):
self.ensure_one()
price = 0.0
price_dict = {'price': total, 'volume': volume, 'weight': weight, 'wv': volume * weight, 'quantity': quantity}
for line in self.price_rule_ids:
test = safe_eval(line.variable + line.operator + str(line.max_value), price_dict)
if test:
price = line.list_base_price + line.list_price * price_dict[line.variable_factor]
break
return price
| odof/openfire | of_delivery/models/delivery_carrier.py | delivery_carrier.py | py | 3,214 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "odoo.models.Model",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Boole... |
4023780816 | from bs4 import BeautifulSoup
import urllib.request
import os
class Sachalayatan:
sachDS = {}
def __init__(self, BASE_URL):
self.sachDS['BASE_URL'] = BASE_URL
def getHtml(self, url=''):
if len(url) > 0:
source = urllib.request.urlopen(url).read()
soup = BeautifulSoup(source,'lxml')
return soup
else:
source = urllib.request.urlopen(self.sachDS['BASE_URL']).read()
soup = BeautifulSoup(source, 'lxml')
return soup
def getMainNavURL(self, html):
urls = html.select("ul#subnavlist > li > a")
urlList = []
for url in urls:
urlList.append(url.get('href'))
self.sachDS['main_nav'] = urlList
self.writeListInFile('main_nav.txt', 'a', urlList)
return urlList
def getPaginationFromMainURL(self):
fileName = 'navigationlink.txt'
mainNavList = [line.rstrip('\n') for line in open('./main_nav.txt')]
if os.path.isfile(fileName) and os.access(fileName, os.R_OK):
open(fileName, "w").close()
for nav in mainNavList:
print('working with: ', nav)
print(nav)
self.writeLineInFile('navigationlink.txt', 'a', nav)
html = self.getHtml(nav)
urls = html.select('ul.pager > li.pager-item > a')
for url in urls:
print(url)
self.writeLineInFile('navigationlink.txt', 'a', url.get('href'))
self.writeLineInFile('navigationlink.txt', 'a', '')
def writeListInFile(self, fileName, mode, writeList):
# print(type(writeList))
txtFile = open(fileName, mode, encoding="utf-8")
for line in writeList:
txtFile.write(line + "\n")
txtFile.write("\n")
txtFile.close()
def writeLineInFile(self, fileName, mode, line):
# print(type(writeList))
txtFile = open(fileName, mode, encoding="utf-8")
txtFile.write(line + "\n")
txtFile.close()
def showList(self, itemList):
for iList in itemList:
print(iList)
BASE_URL = 'http://www.sachalayatan.com/'
sachObj = Sachalayatan(BASE_URL=BASE_URL)
# html = sachObj.getHtml()
# sachObj.getMainNavURL(html)
sachObj.getPaginationFromMainURL() | kakanghosh/sachalayatan | scrapping.py | scrapping.py | py | 2,300 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.request.urlopen",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 12,
"usage_type": "name"
},
{
"api_nam... |
14451113965 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 19 15:27:09 2017
@author: Administrator
"""
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
model = Sequential() #模型 初始化
model.add( Dense( 20, 64)) #添加 输入 层( 20 节点)、 第一 隐藏 层( 64 节点) 的 连接
model.add( Activation(' tanh')) #第一 隐藏 层 用 tanh 作为 激活 函数
model.add( Dropout( 0.5)) #使用 Dropout 防止 过 拟 合
model.add( Dense( 64, 64)) #添加 第一 隐藏 层( 64 节点)、 第二 隐藏 层( 64 节点) 的 连接
model.add( Activation(' tanh')) #第二 隐藏 层 用 tanh 作为 激活 函数
model.add( Dropout( 0.5)) #使用 Dropout 防止 过 拟 合
model.add( Dense( 64, 1)) #添加 第二 隐藏 层( 64 节点)、 输出 层( 1 节点) 的 连接
model.add( Activation(' sigmoid')) #输出 层 用 sigmoid 作为 激活 函数
sgd = SGD( lr= 0.1, decay= 1e-6, momentum= 0.9, nesterov= True) #定义 求解 算法
model.compile( loss=' mean_ squared_ error', optimizer= sgd) #编译 生成 模型, 损失 函数 为 平均 误差 平方 和
model.fit( X_train, y_train, nb_epoch= 20, batch_size= 16) #训练 模型
score = model.evaluate( X_test, y_test, batch_size= 16) #测试 模型
| golfbears/gameofclassname | keras_sample.py | keras_sample.py | py | 1,334 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "keras.models.Sequential",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "keras.layers.core.Dense",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "keras.layers.core.Activation",
"line_number": 14,
"usage_type": "call"
},
{
"api_... |
29426080346 | import requests
from bs4 import BeautifulSoup
import csv
url = "https://www.gov.uk/search/news-and-communications"
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
titres_bs = soup.find_all('a')
titres = []
for titre in titres_bs:
titres.append(titre.string)
print(titres)
en_tete = ["lien"]
with open('data.csv', 'w') as fichier_csv:
write = csv.writer(fichier_csv, delimiter=',')
write.writerow(en_tete)
for liens in titres:
write.writerow([liens]) | Lemak243/python_ | ecrire.py | ecrire.py | py | 512 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 21,
"usage_type": "call"
}
] |
12958605056 | import sys
from requests import get
from core.colors import bad, info, red, green, end
def honeypot(inp):
honey = 'https://api.shodan.io/labs/honeyscore/%s?key=C23OXE0bVMrul2YeqcL7zxb6jZ4pj2by' % inp
try:
result = get(honey).text
except:
result = None
sys.stdout.write('%s No information available' % bad + '\n')
if result:
if float(result) < 0.5:
color = green
else:
color = red
probability = str(float(result) * 10)
sys.stdout.write('%s Honeypot Probabilty: %s%s%%%s' %
(info, color, probability, end) + '\n')
| s0md3v/ReconDog | plugins/honeypot.py | honeypot.py | py | 635 | python | en | code | 1,623 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "core.colors.bad",
"l... |
70388179303 | from django.contrib.auth import get_user_model
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from foodgram_backend.settings import STANDARTLENGTH
User = get_user_model()
class Tag(models.Model):
name = models.CharField(
max_length=STANDARTLENGTH,
unique=True,
blank=False,
verbose_name='Название тега')
color = models.CharField(
max_length=7,
unique=True,
blank=False,
verbose_name='Цвет')
slug = models.SlugField(
max_length=STANDARTLENGTH,
unique=True,
blank=False,
verbose_name='Слаг')
def __str__(self):
return self.name
class Ingredient(models.Model):
name = models.CharField(
max_length=STANDARTLENGTH,
verbose_name='Название ингредиента',
blank=False,
db_index=True)
measurement_unit = models.CharField(
max_length=STANDARTLENGTH,
verbose_name='Единицы измерения',
blank=False)
def __str__(self):
return self.name
class Recipe(models.Model):
author = models.ForeignKey(
User,
related_name='recipes',
on_delete=models.CASCADE,
blank=False,
verbose_name='Автор')
name = models.CharField(
max_length=STANDARTLENGTH,
verbose_name='Название рецепта',
blank=False,)
image = models.ImageField(
upload_to='recipes/images/',
blank=False)
text = models.TextField(
blank=False,
verbose_name='Описание')
ingredients = models.ManyToManyField(
Ingredient,
through='RecipeIngredient',
blank=False,)
tags = models.ManyToManyField(
Tag,
through='RecipeTag',
blank=False,
verbose_name='Теги')
cooking_time = models.PositiveSmallIntegerField(
validators=[MinValueValidator(1),
MaxValueValidator(1000)],
blank=False,
verbose_name='Время приготовления',
help_text='в минутах')
pub_date = models.DateTimeField(
auto_now_add=True,
editable=False,
verbose_name='Дата создания')
class Meta:
constraints = [models.UniqueConstraint(
fields=['name', 'author'],
name='unique_name_author')]
def __str__(self):
return f'{self.name} {self.author}'
class RecipeIngredient(models.Model):
ingredient = models.ForeignKey(
Ingredient,
on_delete=models.CASCADE,
verbose_name='Ингредиент')
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Рецепт')
amount = models.PositiveSmallIntegerField(
validators=[MinValueValidator(1),
MaxValueValidator(1000)],
verbose_name='Количество')
class Meta:
constraints = [models.UniqueConstraint(
fields=['recipe', 'ingredient'],
name='unique_recipe_ingredient')]
def __str__(self):
return f'{self.recipe} {self.ingredient}'
class RecipeTag(models.Model):
tag = models.ForeignKey(
Tag,
on_delete=models.CASCADE,
verbose_name='Тег')
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Рецепт')
class Meta:
constraints = [models.UniqueConstraint(
fields=['recipe', 'tag'],
name='unique_recipe_tag')]
def __str__(self):
return f'{self.recipe} {self.tag}'
class ShoppingCart(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Пользователь',
related_name='shopping')
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Рецепт продуктовой корзины',
related_name='shopping')
class Meta:
constraints = [models.UniqueConstraint(
fields=['recipe', 'user'],
name='unique_favorite_recipe')]
def __str__(self):
return f'{self.recipe} {self.user}'
class Favourite(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Пользователь')
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Избранный рецепт',
related_name='favorite')
class Meta:
constraints = [models.UniqueConstraint(
fields=['recipe', 'user'],
name='unique_recipe_in_shopping_cart')]
def __str__(self):
return f'{self.recipe} {self.user}'
| Gustcat/foodgram-project-react | backend/recipes/models.py | models.py | py | 4,848 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
},
{
"ap... |
41133436478 | from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPalette
from PyQt5.QtWidgets import *
app = QApplication([])
app.setStyle('Fusion')
window = QWidget()
palette = QPalette()
palette.setColor(QPalette.ButtonText, Qt.blue)
app.setStyleSheet("QPushButton { margin: 10ex; background-color: #4747D2 }")
app.setPalette(palette)
layout = QVBoxLayout()
layout.addWidget(QPushButton('Top'))
layout.addWidget(QPushButton('Bottom'))
window.setLayout(layout)
window.show()
app.exec_() | imdiode/PythonExper | home5.py | home5.py | py | 476 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtGui.QPalette",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QPalette.ButtonText",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtGui.QPalette",
"line_number": 9,
"usage_type": "name"
},
{
"api_n... |
16571409721 | import os
import io
import hashlib
from base64 import standard_b64encode
from six.moves.urllib.request import urlopen, Request
from six.moves.urllib.error import HTTPError
from infi.pyutils.contexts import contextmanager
from infi.pypi_manager import PyPI, DistributionNotFound
from logging import getLogger
logger = getLogger()
def send_setuptools_request(repository, username, password, data):
# code taken from distribute 40.9.0, file ./setuptools/command/upload.py
# changed logging and return value
# TODO use code from twine?
# set up the authentication
user_pass = (username + ":" + password).encode('ascii')
# The exact encoding of the authentication string is debated.
# Anyway PyPI only accepts ascii for both username or password.
auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\r\n--' + boundary.encode('ascii')
end_boundary = sep_boundary + b'--\r\n'
body = io.BytesIO()
for key, value in data.items():
title = '\r\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = str(value).encode('utf-8')
body.write(sep_boundary)
body.write(title.encode('utf-8'))
body.write(b"\r\n\r\n")
body.write(value)
body.write(end_boundary)
body = body.getvalue()
logger.info("Submitting %s to %s" % (data['content'][0], repository))
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s' % boundary,
'Content-length': str(len(body)),
'Authorization': auth,
}
request = Request(repository, data=body,
headers=headers)
# send the data
try:
result = urlopen(request)
status = result.getcode()
reason = result.msg
except HTTPError as e:
status = e.code
reason = e.msg
except OSError as e:
logger.exception("")
raise
if status == 200:
return True
else:
logger.error('Upload failed (%s): %s' % (status, reason))
return False
def mirror_file(repository_config, filename, package_name, package_version, metadata):
# merge the metadata with constant data that setuptools sends and data about the file.
# then call the function that actually sends the post request.
f = open(filename, 'rb')
content = f.read()
f.close()
basename = os.path.basename(filename)
data = {
':action': 'file_upload',
'protocol_version': '1',
'metadata_version': '2.1',
'content': (basename, content),
'md5_digest': hashlib.md5(content).hexdigest(),
'name': package_name,
'version': package_version,
}
data.update(metadata)
repository = repository_config["repository"]
username = repository_config.get("username", "")
password = repository_config.get("password", "")
send_setuptools_request(repository, username, password, data)
@contextmanager
def temp_urlretrieve(url, localpath):
import requests
logger.info("Retrieving {}".format(url))
req = requests.get(url)
with open(localpath, 'wb') as fd:
fd.write(req.content)
try:
yield
finally:
os.remove(localpath)
def mirror_release(repository_config, package_name, version, version_data, release_data):
""" mirror a release (e.g. one sdist/bdist_egg etc.) based on data retrieved from
pypi about the package version and the release itself. """
# prepare metadata to post, download the file, and call mirror_file which finalizes the data and
# posts it to the server
metadata = {
'filetype': release_data['packagetype'],
'pyversion': '' if release_data['python_version'] == 'source' else release_data['python_version'],
'comment': release_data['comment_text'],
}
metadata_keys = ('platform','supported_platform','summary','description',
'keywords','home_page','download_url','author','author_email',
'maintainer','maintainer_email','license','classifier', 'classifiers',
'requires_dist','provides_dist','obsoletes_dist',
'requires_python','requires_external','project_urls',
'provides_extras', 'description_content_type')
for key in metadata_keys:
if key in version_data:
metadata[key] = version_data[key]
if "classifier" in metadata:
metadata["classifiers"] = metadata["classifier"]
if "classifiers" in metadata:
metadata["classifier"] = metadata["classifiers"]
with temp_urlretrieve(release_data['url'], release_data['filename']):
return mirror_file(repository_config, release_data['filename'], package_name, version, metadata)
def get_repository_config(server_name):
# we get a pypi repository alias but we need the url+username+password from pypirc
# distutils does the translation, but we have to fool it a little
from distutils.config import PyPIRCCommand
from distutils.dist import Distribution
pypirc = PyPIRCCommand(Distribution())
pypirc.repository = server_name
return pypirc._read_pypirc()
def mirror_package(server_name, package_name, version=None, pypi=None):
if pypi is None:
pypi = PyPI()
version = version or pypi.get_latest_version(package_name)
version_data = pypi.get_release_data(package_name, version)
release_dataset = pypi.get_releases_for_version(package_name, version)
repository_config = get_repository_config(server_name)
assert repository_config, "No repository config found for '{}'".format(server_name)
final_result = True
if not release_dataset:
msg = "No distributions found for {} {} (maybe you should try to build from download url?)"
raise DistributionNotFound(msg.format(package_name, version))
for release_data in release_dataset:
try:
result = mirror_release(repository_config, package_name, version, version_data, release_data)
except Exception:
logger.exception("Failed to upload {}".format(release_data))
result = False
final_result = final_result and result
return final_result
| Infinidat/infi.pypi_manager | src/infi/pypi_manager/mirror/mirror_all.py | mirror_all.py | py | 6,623 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "base64.standard_b64encode",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "six.moves.urll... |
13847534454 | from collections import namedtuple
# Define a namedtuple to represent search results
SearchResult = namedtuple(
"SearchResult", ["name", "location", "job_title", "profile_url"]
)
# Dummy data for testing
dummy_data = [
SearchResult(
name="John Smith",
location="New York, NY",
job_title="Software Engineer",
profile_url="https://example.com/johnsmith",
),
SearchResult(
name="Jane Doe",
location="San Francisco, CA",
job_title="Product Manager",
profile_url="https://example.com/janedoe",
),
SearchResult(
name="David Lee",
location="Seattle, WA",
job_title="Data Analyst",
profile_url="https://example.com/davidlee",
),
SearchResult(
name="Amy Chen",
location="Boston, MA",
job_title="Marketing Specialist",
profile_url="https://example.com/amychen",
),
SearchResult(
name="Mark Johnson",
location="Chicago, IL",
job_title="Sales Manager",
profile_url="https://example.com/markjohnson",
),
SearchResult(
name="Karen Kim",
location="Los Angeles, CA",
job_title="Graphic Designer",
profile_url="https://example.com/karenkim",
),
SearchResult(
name="Chris Taylor",
location="Austin, TX",
job_title="Software Developer",
profile_url="https://example.com/christaylor",
),
SearchResult(
name="Julia Rodriguez",
location="Miami, FL",
job_title="Project Manager",
profile_url="https://example.com/juliarodriguez",
),
SearchResult(
name="Michael Brown",
location="Denver, CO",
job_title="Business Analyst",
profile_url="https://example.com/michaelbrown",
),
SearchResult(
name="Lisa Nguyen",
location="Portland, OR",
job_title="UX Designer",
profile_url="https://example.com/lisanguyen",
),
]
grades = {
1: "Nursery 1",
2: "Nursery 2",
3: "Kindergarten 1",
4: "Kindergarten 2",
5: "Primary 1",
6: "Primary 2",
}
strands = [
{
"id": "strand-1",
"name": "Strand 1",
"substrands": [
{
"id": "substrand-1",
"name": "Substrand 1",
"topics": [
{
"id": "topic-1",
"name": "Topic 1",
"content": "This is the content for Topic 1",
},
{
"id": "topic-2",
"name": "Topic 2",
"content": "This is the content for Topic 2",
},
],
},
{
"id": "substrand-2",
"name": "Substrand 2",
"topics": [
{
"id": "topic-3",
"name": "Topic 3",
"content": "This is the content for Topic 3",
},
{
"id": "topic-4",
"name": "Topic 4",
"content": "This is the content for Topic 4",
},
],
},
],
},
{
"id": "strand-2",
"name": "Strand 2",
"substrands": [
{
"id": "substrand-3",
"name": "Substrand 3",
"topics": [
{
"id": "topic-5",
"name": "Topic 5",
"content": "This is the content for Topic 5",
},
{
"id": "topic-6",
"name": "Topic 6",
"content": "This is the content for Topic 6",
},
],
},
{
"id": "substrand-4",
"name": "Substrand 4",
"topics": [
{
"id": "topic-7",
"name": "Topic 7",
"content": "This is the content for Topic 7",
},
{
"id": "topic-8",
"name": "Topic 8",
"content": "This is the content for Topic 8",
},
],
},
],
},
]
curriculums = [
{
"subject": "Mathematics",
"grade": 1,
},
{
"subject": "English",
"grade": 2,
},
{
"subject": "Science",
"grade": 1,
},
{
"subject": "Social Studies",
"grade": 3,
},
{
"subject": "Art",
"grade": 2,
},
{
"subject": "History",
"grade": 3,
},
{
"subject": "Geography",
"grade": 1,
},
{
"subject": "Physical Education",
"grade": 2,
},
{
"subject": "Music",
"grade": 1,
},
{
"subject": "Foreign Language",
"grade": 2,
},
{
"subject": "Computer Science",
"grade": 3,
},
{
"subject": "Writing",
"grade": 1,
},
{
"subject": "Reading",
"grade": 2,
},
{
"subject": "Drama",
"grade": 3,
},
{
"subject": "Business",
"grade": 1,
},
{
"subject": "Engineering",
"grade": 2,
},
{
"subject": "Psychology",
"grade": 3,
},
{
"subject": "Philosophy",
"grade": 1,
},
{
"subject": "Marketing",
"grade": 2,
},
]
user = {
"id": 2,
"first_name": "Adwoa",
"middle_name": "Yaa",
"last_name": "Appiah",
"nickname": "Adyaa",
"full_name": "Adwoa Yaa Appiah",
"email": "adwoa.appiah@gmail.com",
"phone_number": "0241234567",
"birthdate": "1995-06-15",
"gender": "F",
"bio": "Software developer",
"friendship_status": None,
"profile_picture": "http://127.0.0.1:8000/media/users/IMG_20210920_100458_312.jpg",
"cover_picture": "http://127.0.0.1:8000/media/users/IMG_20210920_100458_312.jpg",
"school": "University of Ghana",
"education_history": ["St. Monica's Senior High School"],
"subjects": ["Computer Science", "Mathematics"],
"level": "Undergraduate",
"points": 200,
"url": "http://127.0.0.1:8000/api/users/2/",
"date_joined": "2023-03-25T09:13:36.104947Z",
"is_active": True,
"last_login": "2023-03-27T06:56:39.442993Z",
}
friends = [
{
"friend": {
"username": "johndoe",
"email": "johndoe@example.com",
"profile_picture": "/media/profile_pictures/johndoe.jpg",
}
},
{
"friend": {
"username": "janedoe",
"email": "janedoe@example.com",
"profile_picture": "/media/profile_pictures/janedoe.jpg",
}
},
{
"friend": {
"username": "bobsmith",
"email": "bobsmith@example.com",
"profile_picture": "/media/profile_pictures/bobsmith.jpg",
}
},
{
"friend": {
"username": "kwame",
"email": "kwame@example.com",
"profile_picture": "/media/profile_pictures/kwame.jpg",
"status": "online",
}
},
{
"friend": {
"username": "ama",
"email": "ama@example.com",
"profile_picture": "/media/profile_pictures/ama.jpg",
"status": "offline",
}
},
{
"friend": {
"username": "yaw",
"email": "yaw@example.com",
"profile_picture": "/media/profile_pictures/yaw.jpg",
"status": "online",
}
},
{
"friend": {
"username": "akosua",
"email": "akosua@example.com",
"profile_picture": "/media/profile_pictures/akosua.jpg",
"status": "offline",
}
},
]
album = {
"user_photos": [
{
"name": "Photo 1",
"url": "https://dummyimage.com/600x400/000/fff&text=Photo+1",
},
{
"name": "Photo 2",
"url": "https://dummyimage.com/600x400/000/fff&text=Photo+2",
},
{
"name": "Photo 3",
"url": "https://dummyimage.com/600x400/000/fff&text=Photo+3",
},
],
"user_videos": [
{
"name": "Video 1",
"url": "https://sample-videos.com/video123/mp4/720/big_buck_bunny_720p_1mb.mp4",
"mime_type": "video/mp4",
},
{
"name": "Video 2",
"url": "https://sample-videos.com/video123/mp4/720/big_buck_bunny_720p_2mb.mp4",
"mime_type": "video/mp4",
},
{
"name": "Video 3",
"url": "https://sample-videos.com/video123/mp4/720/big_buck_bunny_720p_5mb.mp4",
"mime_type": "video/mp4",
},
],
}
| Kwekuasiedu315/PROJECTS | askademy/aska/web/dummy.py | dummy.py | py | 9,177 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 5,
"usage_type": "call"
}
] |
20149744558 | # from django.http import HttpResponseServerError
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework import serializers, status
from holdmycomicsapi.models import User
class UserView(ViewSet):
"""HMC Users View"""
def create(self, request):
"""CREATE User"""
user = User.objects.create(
user_name=request.data["userName"],
store_name=request.data["storeName"],
email=request.data["email"],
uid=request.data["uid"],
)
serializer = UserSerializer(user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def retrieve(self, request, pk):
"""GET Single User"""
try:
user = User.objects.get(pk=pk)
serializer = UserSerializer(user)
return Response(serializer.data)
except User.DoesNotExist as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
def destroy(self, request, pk):
"""DELETE User"""
user = User.objects.get(pk=pk)
user.delete()
return Response('User Deleted', status=status.HTTP_204_NO_CONTENT)
class UserSerializer(serializers.ModelSerializer):
"""JSON Serializer for Users"""
class Meta:
model = User
fields = ('id', 'user_name', 'store_name', 'email', 'uid')
| SeaForeEx/HoldMyComics-Server | holdmycomicsapi/views/user.py | user.py | py | 1,451 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "rest_framework.viewsets.ViewSet",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "holdmycomicsapi.models.User.objects.create",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "holdmycomicsapi.models.User.objects",
"line_number": 13,
"usage... |
21050437162 | import unittest
import json
from app import create_app, bad_request, forbidden, not_found, unauthorized, internal_error
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
def tearDown(self):
self.app_context.pop()
def test_health_check(self):
response = self.client.get(
'/health',
headers={})
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertTrue('OK' == json_response.get('status'))
self.assertTrue({} == json_response.get('data'))
def test_app_info(self):
response = self.client.get(
'/',
headers={})
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertTrue('OK' == json_response.get('status'))
self.assertEqual('test', json_response.get('version'))
def test_app_info(self):
response = self.client.get(
'/not_found',
headers={})
self.assertEqual(response.status_code, 404)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(404, json_response.get('error'))
self.assertEqual('not found', json_response.get('message'))
def test_bad_request(self):
msg = 'test_bad_request'
code = 1234
res = bad_request(msg, code)
self.assertEqual(msg, res.json['message'])
self.assertEqual(code, res.json['error'])
self.assertEqual(400, res.status_code)
def test_forbidden(self):
msg = 'test_forbidden'
code = 12345
res = forbidden(msg, code)
self.assertEqual(msg, res.json['message'])
self.assertEqual(code, res.json['error'])
self.assertEqual(403, res.status_code)
def test_not_found(self):
msg = 'test_not_found'
code = 'âsav'
res = not_found(msg, code)
self.assertEqual(msg, res.json['message'])
self.assertEqual(code, res.json['error'])
self.assertEqual(404, res.status_code)
def test_unauthorized(self):
msg = 'test_unauthorized'
code = 12345
res = unauthorized(msg, code)
self.assertEqual(msg, res.json['message'])
self.assertEqual(code, res.json['error'])
self.assertEqual(401, res.status_code)
def test_internal_error(self):
msg = 'test_internal_error'
code = 12345
res = internal_error(msg, code)
self.assertEqual(msg, res.json['message'])
self.assertEqual(code, res.json['error'])
self.assertEqual(500, res.status_code)
| hungvm90/flask_tdd | tests/test_health_check_api.py | test_health_check_api.py | py | 2,834 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "app.create_app",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line... |
9580739784 | #!/usr/bin/python3
'''
File Storage
'''
import os
import json
import models
from models.base_model import BaseModel
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
classes = {
'BaseModel': BaseModel,
'User': User,
'Place': Place,
'State': State,
'City': City,
'Amenity': Amenity,
'Review': Review
}
class FileStorage:
'''
Serializes instances to a JSON file
and deserializes JSON file to instances
'''
__file_path = 'file.json'
__objects = {}
def all(self):
''' Returns the dictionary __objects '''
return self.__objects
def new(self, obj):
''' sets in __objects the obj with key <obj class name>.id '''
key = '{}.{}'.format(obj.__class__.__name__, obj.id)
value = obj
dic = {key: value}
self.__objects.update(dic)
def save(self):
''' serializes __objects to the JSON file (path: __file_path) '''
with open(self.__file_path, 'w') as f:
objs = {}
for key, value in self.__objects.items():
objs[key] = value.to_dict()
json.dump(objs, f)
def reload(self):
'''
deserializes the JSON file to __objects
(only if the JSON file (__file_path) exists
'''
try:
if os.path.exists(self.__file_path):
with open(self.__file_path) as f:
objs = json.load(f)
for key, value in objs.items():
obj_class = key.split('.')[0]
self.__objects[key] = classes[obj_class](**value)
except Exception:
pass
| Davidbukz4/AirBnB_clone | models/engine/file_storage.py | file_storage.py | py | 1,815 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.base_model.BaseModel",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "models.user.User",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "models.place.Place",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "model... |
42242684970 | import math
import numpy as np
import matplotlib.pyplot as plt
gap_list = [5.0e-6, 7.5e-6, 10e-6]
lam_list = np.logspace(-1.0,2.0,20)*1e-6
print(lam_list)
sens_vals_num = np.zeros((len(lam_list),len(gap_list)))
for i in range(len(gap_list)):
for j in range(4,len(lam_list)):
gap = gap_list[i]
lam = lam_list[j]
fname = 'data/lam_arr_cyl_%.3f_%.3f.npy' % (gap*1e6,lam*1e6)
cval = np.load(fname)
sigf = 1.9e-16
sens_vals_num[j,i] = sigf/cval[0]
f0 = 1e3
m = 1e-13
xvals = np.linspace(-2e-6,2e-6,1e3)
harm_pot = 0.5*m*(2*math.pi*f0)**2 * xvals**2
## now assume point mass at distance d from origin
d = 10e-6
Ma = 10e-13
alpha = 1.0e16
lam = 10e-6
G = 6.67e-11
grav_pot = alpha*G*m*Ma * (2.0*np.exp(-(d/lam))/d - np.exp(-np.abs(d-xvals)/lam)/np.abs(d-xvals) - np.exp(-np.abs(-d-xvals)/lam)/np.abs(-d-xvals))
grav_pot_approx = -2*alpha*G*m*Ma/d**3*np.exp(-d/lam)*(1 + d/lam + 0.5*(d/lam)**2)*xvals**2
print((1 + d/lam + 0.5*(d/lam)**2))
## now fit to a quadratic term near the minimum
fit_win = [400,600]
p1 = np.polyfit(xvals[fit_win[0]:fit_win[1]], harm_pot[fit_win[0]:fit_win[1]],2)
print(p1)
tot_pot = harm_pot + grav_pot
p2 = np.polyfit(xvals[fit_win[0]:fit_win[1]], tot_pot[fit_win[0]:fit_win[1]],2)
print(p2)
fig = plt.figure(33)
plt.plot(xvals,harm_pot)
plt.plot(xvals,harm_pot + grav_pot,'r')
xx = xvals[fit_win[0]:fit_win[1]]
plt.plot(xx,np.polyval(p1,xx),'c')
plt.plot(xx,np.polyval(p2,xx),'m')
fig2 = plt.figure(34)
plt.plot(xvals,grav_pot)
plt.plot(xvals,grav_pot_approx,'r')
plt.show()
| charlesblakemore/opt_lev_analysis | casimir/force_calc/plot_point_pot.py | plot_point_pot.py | py | 1,570 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.logspace",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_numbe... |
28240417211 | import requests
import argparse
import json
import pandas as pd
import streamlit as st
APP_URL = "http://127.0.0.1:8000/predict"
# Adding arguments to customize CLI
argparser = argparse.ArgumentParser(description='Process hyper-parameters')
argparser.add_argument('--movie_title', type=str, default='', help='movie title')
argparser.add_argument('--scraping_limit', type=int, default=10, help='scraping limit')
argparser.add_argument('--reviewer', type=str, default='user', help='reviwer type')
argparser.add_argument('--char_limit', type=int, default=30000, help='char limit summary input')
argparser.add_argument('--max_length', type=int, default=100, help='char limit summary output')
args = argparser.parse_args()
print('\n ---------------------')
print('Scraping Details: ')
print(f'Movie title: {args.movie_title}')
print(f'Number of total reviews attempted to scrape: {args.scraping_limit}')
print(f'Reviews from: {args.reviewer}')
print(f'Character limit for summary text: {args.char_limit}')
payload = {
'movie_title': args.movie_title,
'scraping_limit': args.scraping_limit,
'reviewer': args.reviewer,
'char_limit': args.char_limit,
'max_length':args.max_length
}
response=requests.post(APP_URL, json=payload)
decoded_output=response.content.decode('UTF-8')
output=json.loads(decoded_output)
print(output) | lethologicoding/text_summarization | app/server.py | server.py | py | 1,345 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 38,
"usage_type": "call"
}
] |
22918946072 | import unittest
import os
import lsst.utils.tests
import pandas as pd
import numpy as np
import asyncio
import matplotlib.pyplot as plt
from astropy.time import TimeDelta
from lsst.utils import getPackageDir
from lsst.summit.utils.enums import PowerState
from lsst.summit.utils.efdUtils import makeEfdClient, getDayObsStartTime, calcNextDay
from lsst.summit.utils.tmaUtils import (
getSlewsFromEventList,
getTracksFromEventList,
getAzimuthElevationDataForEvent,
plotEvent,
getCommandsDuringEvent,
TMAStateMachine,
TMAEvent,
TMAEventMaker,
TMAState,
AxisMotionState,
getAxisAndType,
_initializeTma,
)
from utils import getVcr
__all__ = [
'writeNewTmaEventTestTruthValues',
]
vcr = getVcr()
def getTmaEventTestTruthValues():
"""Get the current truth values for the TMA event test cases.
Returns
-------
seqNums : `np.array` of `int`
The sequence numbers of the events.
startRows : `np.array` of `int`
The _startRow numbers of the events.
endRows : `np.array` of `int`
The _endRow numbers of the events.
types : `np.array` of `str`
The event types, as a string, i.e. the ``TMAEvent.name`` of the event's
``event.type``.
endReasons : `np.array` of `str`
The event end reasons, as a string, i.e. the ``TMAEvent.name`` of the
event's ``event.endReason``.
"""
packageDir = getPackageDir("summit_utils")
dataFilename = os.path.join(packageDir, "tests", "data", "tmaEventData.txt")
seqNums, startRows, endRows, types, endReasons = np.genfromtxt(dataFilename,
delimiter=',',
dtype=None,
names=True,
encoding='utf-8',
unpack=True
)
return seqNums, startRows, endRows, types, endReasons
def writeNewTmaEventTestTruthValues():
"""This function is used to write out the truth values for the test cases.
If the internal event creation logic changes, these values can change, and
will need to be updated. Run this function, and check the new values into
git.
Note: if you have cause to update values with this function, make sure to
update the version number on the TMAEvent class.
"""
dayObs = 20230531 # obviously must match the day in the test class
eventMaker = TMAEventMaker()
events = eventMaker.getEvents(dayObs)
packageDir = getPackageDir("summit_utils")
dataFilename = os.path.join(packageDir, "tests", "data", "tmaEventData.txt")
columnHeader = "seqNum,startRow,endRow,type,endReason"
with open(dataFilename, 'w') as f:
f.write(columnHeader + '\n')
for event in events:
line = (f"{event.seqNum},{event._startRow},{event._endRow},{event.type.name},"
f"{event.endReason.name}")
f.write(line + '\n')
def makeValid(tma):
"""Helper function to turn a TMA into a valid state.
"""
for name, value in tma._parts.items():
if value == tma._UNINITIALIZED_VALUE:
tma._parts[name] = 1
def _turnOn(tma):
"""Helper function to turn TMA axes on for testing.
Do not call directly in normal usage or code, as this just arbitrarily
sets values to turn the axes on.
Parameters
----------
tma : `lsst.summit.utils.tmaUtils.TMAStateMachine`
The TMA state machine model to initialize.
"""
tma._parts['azimuthSystemState'] = PowerState.ON
tma._parts['elevationSystemState'] = PowerState.ON
class TmaUtilsTestCase(lsst.utils.tests.TestCase):
def test_tmaInit(self):
tma = TMAStateMachine()
self.assertFalse(tma._isValid)
# setting one axis should not make things valid
tma._parts['azimuthMotionState'] = 1
self.assertFalse(tma._isValid)
# setting all the other components should make things valid
tma._parts['azimuthInPosition'] = 1
tma._parts['azimuthSystemState'] = 1
tma._parts['elevationInPosition'] = 1
tma._parts['elevationMotionState'] = 1
tma._parts['elevationSystemState'] = 1
self.assertTrue(tma._isValid)
def test_tmaReferences(self):
"""Check the linkage between the component lists and the _parts dict.
"""
tma = TMAStateMachine()
# setting one axis should not make things valid
self.assertEqual(tma._parts['azimuthMotionState'], tma._UNINITIALIZED_VALUE)
self.assertEqual(tma._parts['elevationMotionState'], tma._UNINITIALIZED_VALUE)
tma.motion[0] = AxisMotionState.TRACKING # set azimuth to 0
tma.motion[1] = AxisMotionState.TRACKING # set azimuth to 0
self.assertEqual(tma._parts['azimuthMotionState'], AxisMotionState.TRACKING)
self.assertEqual(tma._parts['elevationMotionState'], AxisMotionState.TRACKING)
def test_getAxisAndType(self):
# check both the long and short form names work
for s in ['azimuthMotionState', 'lsst.sal.MTMount.logevent_azimuthMotionState']:
self.assertEqual(getAxisAndType(s), ('azimuth', 'MotionState'))
# check in position, and use elevation instead of azimuth to test that
for s in ['elevationInPosition', 'lsst.sal.MTMount.logevent_elevationInPosition']:
self.assertEqual(getAxisAndType(s), ('elevation', 'InPosition'))
for s in ['azimuthSystemState', 'lsst.sal.MTMount.logevent_azimuthSystemState']:
self.assertEqual(getAxisAndType(s), ('azimuth', 'SystemState'))
def test_initStateLogic(self):
tma = TMAStateMachine()
self.assertFalse(tma._isValid)
self.assertFalse(tma.isMoving)
self.assertFalse(tma.canMove)
self.assertFalse(tma.isTracking)
self.assertFalse(tma.isSlewing)
self.assertEqual(tma.state, TMAState.UNINITIALIZED)
_initializeTma(tma) # we're valid, but still aren't moving and can't
self.assertTrue(tma._isValid)
self.assertNotEqual(tma.state, TMAState.UNINITIALIZED)
self.assertTrue(tma.canMove)
self.assertTrue(tma.isNotMoving)
self.assertFalse(tma.isMoving)
self.assertFalse(tma.isTracking)
self.assertFalse(tma.isSlewing)
_turnOn(tma) # can now move, still valid, but not in motion
self.assertTrue(tma._isValid)
self.assertTrue(tma.canMove)
self.assertTrue(tma.isNotMoving)
self.assertFalse(tma.isMoving)
self.assertFalse(tma.isTracking)
self.assertFalse(tma.isSlewing)
# consider manipulating the axes by hand here and testing these?
# it's likely not worth it, given how much this exercised elsewhere,
# but these are the only functions not yet being directly tested
# tma._axesInFault()
# tma._axesOff()
# tma._axesOn()
# tma._axesInMotion()
# tma._axesTRACKING()
# tma._axesInPosition()
@vcr.use_cassette()
class TMAEventMakerTestCase(lsst.utils.tests.TestCase):
@classmethod
@vcr.use_cassette()
def setUpClass(cls):
try:
cls.client = makeEfdClient(testing=True)
except RuntimeError:
raise unittest.SkipTest("Could not instantiate an EFD client")
cls.dayObs = 20230531
# get a sample expRecord here to test expRecordToTimespan
cls.tmaEventMaker = TMAEventMaker(cls.client)
cls.events = cls.tmaEventMaker.getEvents(cls.dayObs) # does the fetch
cls.sampleData = cls.tmaEventMaker._data[cls.dayObs] # pull the data from the object and test length
@vcr.use_cassette()
def tearDown(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.client.influx_client.close())
@vcr.use_cassette()
def test_events(self):
data = self.sampleData
self.assertIsInstance(data, pd.DataFrame)
self.assertEqual(len(data), 993)
@vcr.use_cassette()
def test_rowDataForValues(self):
rowsFor = set(self.sampleData['rowFor'])
self.assertEqual(len(rowsFor), 6)
# hard coding these ensures that you can't extend the axes/model
# without being explicit about it here.
correct = {'azimuthInPosition',
'azimuthMotionState',
'azimuthSystemState',
'elevationInPosition',
'elevationMotionState',
'elevationSystemState'}
self.assertSetEqual(rowsFor, correct)
@vcr.use_cassette()
def test_monotonicTimeInDataframe(self):
# ensure that each row is later than the previous
times = self.sampleData['private_efdStamp']
self.assertTrue(np.all(np.diff(times) > 0))
@vcr.use_cassette()
def test_monotonicTimeApplicationOfRows(self):
# ensure you can apply rows in the correct order
tma = TMAStateMachine()
row1 = self.sampleData.iloc[0]
row2 = self.sampleData.iloc[1]
# just running this check it is OK
tma.apply(row1)
tma.apply(row2)
# and that if you apply them in reverse order then things will raise
tma = TMAStateMachine()
with self.assertRaises(ValueError):
tma.apply(row2)
tma.apply(row1)
@vcr.use_cassette()
def test_fullDaySequence(self):
# make sure we can apply all the data from the day without falling
# through the logic sieve
for engineering in (True, False):
tma = TMAStateMachine(engineeringMode=engineering)
_initializeTma(tma)
for rowNum, row in self.sampleData.iterrows():
tma.apply(row)
@vcr.use_cassette()
def test_endToEnd(self):
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
self.assertIsInstance(events, list)
self.assertEqual(len(events), 200)
self.assertIsInstance(events[0], TMAEvent)
slews = [e for e in events if e.type == TMAState.SLEWING]
tracks = [e for e in events if e.type == TMAState.TRACKING]
self.assertEqual(len(slews), 157)
self.assertEqual(len(tracks), 43)
seqNums, startRows, endRows, types, endReasons = getTmaEventTestTruthValues()
for eventNum, event in enumerate(events):
self.assertEqual(event.seqNum, seqNums[eventNum])
self.assertEqual(event._startRow, startRows[eventNum])
self.assertEqual(event._endRow, endRows[eventNum])
self.assertEqual(event.type.name, types[eventNum])
self.assertEqual(event.endReason.name, endReasons[eventNum])
@vcr.use_cassette()
def test_noDataBehaviour(self):
eventMaker = self.tmaEventMaker
noDataDayObs = 19500101 # do not use 19700101 - there is data for that day!
with self.assertLogs(level='WARNING') as cm:
correctMsg = f"No EFD data found for dayObs={noDataDayObs}"
events = eventMaker.getEvents(noDataDayObs)
self.assertIsInstance(events, list)
self.assertEqual(len(events), 0)
msg = cm.output[0]
self.assertIn(correctMsg, msg)
@vcr.use_cassette()
def test_helperFunctions(self):
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
slews = [e for e in events if e.type == TMAState.SLEWING]
tracks = [e for e in events if e.type == TMAState.TRACKING]
foundSlews = getSlewsFromEventList(events)
foundTracks = getTracksFromEventList(events)
self.assertEqual(slews, foundSlews)
self.assertEqual(tracks, foundTracks)
@vcr.use_cassette()
def test_getEvent(self):
# test the singular event getter, and what happens if the event doesn't
# exist for the day
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
nEvents = len(events)
event = eventMaker.getEvent(self.dayObs, 0)
self.assertIsInstance(event, TMAEvent)
self.assertEqual(event, events[0])
event = eventMaker.getEvent(self.dayObs, 100)
self.assertIsInstance(event, TMAEvent)
self.assertEqual(event, events[100])
with self.assertLogs(level='WARNING') as cm:
correctMsg = f"Event {nEvents+1} not found for {self.dayObs}"
event = eventMaker.getEvent(self.dayObs, nEvents+1)
msg = cm.output[0]
self.assertIn(correctMsg, msg)
@vcr.use_cassette()
def test_printing(self):
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
# test str(), repr(), and _ipython_display_() for an event
print(str(events[0]))
print(repr(events[0]))
print(events[0]._ipython_display_())
# spot-check both a slow and a track to print
slews = [e for e in events if e.type == TMAState.SLEWING]
tracks = [e for e in events if e.type == TMAState.TRACKING]
eventMaker.printEventDetails(slews[0])
eventMaker.printEventDetails(tracks[0])
eventMaker.printEventDetails(events[-1])
# check the full day trick works
eventMaker.printFullDayStateEvolution(self.dayObs)
tma = TMAStateMachine()
_initializeTma(tma) # the uninitialized state contains wrong types for printing
eventMaker.printTmaDetailedState(tma)
@vcr.use_cassette()
def test_getAxisData(self):
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
azData, elData = getAzimuthElevationDataForEvent(self.client, events[0])
self.assertIsInstance(azData, pd.DataFrame)
self.assertIsInstance(elData, pd.DataFrame)
paddedAzData, paddedElData = getAzimuthElevationDataForEvent(self.client,
events[0],
prePadding=2,
postPadding=1)
self.assertGreater(len(paddedAzData), len(azData))
self.assertGreater(len(paddedElData), len(elData))
# just check this doesn't raise when called, and check we can pass the
# data in
plotEvent(self.client, events[0], azimuthData=azData, elevationData=elData)
@vcr.use_cassette()
def test_plottingAndCommands(self):
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
event = events[28] # this one has commands, and we'll check that later
# check we _can_ plot without a figure, and then stop doing that
plotEvent(self.client, event)
fig = plt.figure(figsize=(10, 8))
# just check this doesn't raise when called
plotEvent(self.client, event, fig=fig)
plt.close(fig)
commandsToPlot = ['raDecTarget', 'moveToTarget', 'startTracking', 'stopTracking']
commands = getCommandsDuringEvent(self.client, event, commandsToPlot, doLog=False)
self.assertTrue(not all([time is None for time in commands.values()])) # at least one command
plotEvent(self.client, event, fig=fig, commands=commands)
del fig
@vcr.use_cassette()
def test_findEvent(self):
eventMaker = self.tmaEventMaker
events = eventMaker.getEvents(self.dayObs)
event = events[28] # this one has a contiguous event before it
time = event.begin
found = eventMaker.findEvent(time)
self.assertEqual(found, event)
dt = TimeDelta(0.01, format='sec')
# must be just inside to get the same event back, because if a moment
# is shared it gives the one which starts with the moment (whilst
# logging info messages about it)
time = event.end - dt
found = eventMaker.findEvent(time)
self.assertEqual(found, event)
# now check that if we're a hair after, we don't get the same event
time = event.end + dt
found = eventMaker.findEvent(time)
self.assertNotEqual(found, event)
# Now check the cases which don't find an event at all. It would be
# nice to check the log messages here, but it seems too fragile to be
# worth it
dt = TimeDelta(1, format='sec')
tooEarlyOnDay = getDayObsStartTime(self.dayObs) + dt # 1 second after start of day
found = eventMaker.findEvent(tooEarlyOnDay)
self.assertIsNone(found)
# 1 second before end of day and this day does not end with an open
# event
tooLateOnDay = getDayObsStartTime(calcNextDay(self.dayObs)) - dt
found = eventMaker.findEvent(tooLateOnDay)
self.assertIsNone(found)
# going just inside the last event of the day should be fine
lastEvent = events[-1]
found = eventMaker.findEvent(lastEvent.end - dt)
self.assertEqual(found, lastEvent)
# going at the very end of the last event of the day should actually
# find nothing, because the last moment of an event isn't actually in
# the event itself, because of how contiguous events are defined to
# behave (being half-open intervals)
found = eventMaker.findEvent(lastEvent.end)
self.assertIsNone(found, lastEvent)
class TestMemory(lsst.utils.tests.MemoryTestCase):
pass
def setup_module(module):
lsst.utils.tests.init()
if __name__ == "__main__":
lsst.utils.tests.init()
unittest.main()
| lsst-sitcom/summit_utils | tests/test_tmaUtils.py | test_tmaUtils.py | py | 17,835 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "utils.getVcr",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "lsst.utils.getPackageDir",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path",
"lin... |
32199462370 | #!/usr/bin/python3
"""hard coding is a hard working"""
import requests
import sys
if __name__ == '__main__':
moi = requests.get(sys.argv[1])
if moi.status_code >= 400:
print('Error code: {}'.format(moi.status_code))
else:
print(moi.text)
| jinDeHao/alx-higher_level_programming | 0x11-python-network_1/7-error_code.py | 7-error_code.py | py | 267 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
}
] |
25380312208 | #!usr/bin/env python
# coding:utf-8
__author__ = 'sunyaxiong'
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
sys.path.append('E:/GitWorkspace/enndc_management/enndc_management')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
import django
django.setup()
from django.db.models import F, Count
from vmserver.models import List
from openpyxl import load_workbook
from enndc_management.settings import *
import pprint
import json
import logging
LOG = logging.getLogger('vmserver')
BUG = logging.getLogger('request')
def get_excel_info():
wb = load_workbook(filename=BASE_DIR + '/vmserver/pyvmomi_api/info-lzc.xlsx')
ws = wb['info']
rows = ws.rows
info_list = []
for row in rows:
info = {}
for i in range(len(row)):
info[rows[0][i].value] = row[i].value
info_list.append(info)
del info_list[0]
# print len(info_list)
# json.dump(info_list, open('info.json', 'w'), indent=4, ensure_ascii=False)
return info_list
def compare_update():
info_list = get_excel_info()
successed_flag = 0
failed_flag = 0
for info in info_list:
query = List.objects.filter(list_name=info['list_name'])
if len(query) == 0:
LOG.info('{0} not found'.format(info['list_name']))
failed_flag += 1
else:
query.update(**info)
LOG.info('vm {0} has updated'.format(info['list_name']))
successed_flag += 1
LOG.info('there has {0} vm info update and {1} vm info failed'.format(successed_flag, failed_flag))
if __name__ == '__main__':
get_excel_info()
| willsion/enndc_management | vmserver/pyvmomi_api/appinfo_excel_to_db.py | appinfo_excel_to_db.py | py | 1,630 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ.setdef... |
8724066404 | """
Example of pi-IW: guided Rollout-IW, interleaving planning and learning.
"""
import numpy as np
import tensorflow as tf
from planning_step import gridenvs_BASIC_features, features_to_atoms
from online_planning import softmax_Q_tree_policy
# Function that will be executed at each interaction with the environment
def observe_pi_iw_dynamic(env, node):
x = tf.constant(np.expand_dims(node.data["obs"], axis=0).astype(np.float32))
logits, features = model(x, output_features=True)
node.data["probs"] = tf.nn.softmax(logits).numpy().ravel()
node.data["features"] = features_to_atoms(features.numpy().ravel().astype(np.bool)) # discretization -> bool
def observe_pi_iw_BASIC(env, node):
x = tf.constant(np.expand_dims(node.data["obs"], axis=0).astype(np.float32))
logits = model(x)
node.data["probs"] = tf.nn.softmax(logits).numpy().ravel()
gridenvs_BASIC_features(env, node) # compute BASIC features
def planning_step(actor, planner, dataset, policy_fn, tree_budget, cache_subtree, discount_factor):
nodes_before_planning = len(actor.tree)
budget_fn = lambda: len(actor.tree) - nodes_before_planning == tree_budget
planner.plan(tree=actor.tree,
successor_fn=actor.generate_successor,
stop_condition_fn=budget_fn,
policy_fn=policy_fn)
tree_policy = softmax_Q_tree_policy(actor.tree, actor.tree.branching_factor, discount_factor, temp=0)
a = sample_pmf(tree_policy)
prev_root_data, current_root_data = actor.step(a, cache_subtree=cache_subtree)
dataset.append({"observations": prev_root_data["obs"],
"target_policy": tree_policy})
return current_root_data["r"], current_root_data["done"]
if __name__ == "__main__":
import gym
from rollout_iw import RolloutIW
from tree import TreeActor
from supervised_policy import SupervisedPolicy, Mnih2013
from utils import sample_pmf
from experience_replay import ExperienceReplay
import gridenvs.examples # load simple envs
# HYPERPARAMETERS
seed = 0
env_id = "GE_PathKeyDoor-v0"
use_dynamic_feats = False # otherwise BASIC features will be used
n_episodes = 5
tree_budget = 20
discount_factor = 0.99
cache_subtree = True
batch_size = 32
learning_rate = 0.0007
replay_capacity = 1000
regularization_factor = 0.001
clip_grad_norm = 40
rmsprop_decay = 0.99
rmsprop_epsilon = 0.1
# Set random seed
np.random.seed(seed)
tf.random.set_seed(seed)
# Instead of env.step() and env.reset(), we'll use TreeActor helper class, which creates a tree and adds nodes to it
env = gym.make(env_id)
observe_fn = observe_pi_iw_dynamic if use_dynamic_feats else observe_pi_iw_BASIC
actor = TreeActor(env, observe_fn=observe_fn)
planner = RolloutIW(branching_factor=env.action_space.n, ignore_cached_nodes=True)
model = Mnih2013(num_logits=env.action_space.n, add_value=False)
optimizer = tf.keras.optimizers.RMSprop(learning_rate=learning_rate,
rho=rmsprop_decay,
epsilon=rmsprop_epsilon)
learner = SupervisedPolicy(model, optimizer, regularization_factor=regularization_factor, use_graph=True)
experience_replay = ExperienceReplay(capacity=replay_capacity)
def network_policy(node, branching_factor):
return node.data["probs"]
# Initialize experience replay: run some steps until we have enough examples to form one batch
print("Initializing experience replay", flush=True)
actor.reset()
while len(experience_replay) < batch_size:
r, episode_done = planning_step(actor=actor,
planner=planner,
dataset=experience_replay,
policy_fn=network_policy,
tree_budget=tree_budget,
cache_subtree=cache_subtree,
discount_factor=discount_factor)
if episode_done: actor.reset()
# Interleave planning and learning steps
print("\nInterleaving planning and learning steps.", flush=True)
actor.reset()
steps_cnt = 0
episode_steps = 0
episodes_cnt = 0
while episodes_cnt < n_episodes:
r, episode_done = planning_step(actor=actor,
planner=planner,
dataset=experience_replay,
policy_fn=network_policy,
tree_budget=tree_budget,
cache_subtree=cache_subtree,
discount_factor=discount_factor)
# Learning step
batch = experience_replay.sample(batch_size)
loss, _ = learner.train_step(tf.constant(batch["observations"], dtype=tf.float32),
tf.constant(batch["target_policy"], dtype=tf.float32))
steps_cnt += 1
episode_steps +=1
print("\n".join([" ".join(row) for row in env.unwrapped.get_char_matrix(actor.tree.root.data["s"])]),
"Reward: ", r, "Simulator steps:", actor.nodes_generated,
"Planning steps:", steps_cnt, "Loss:", loss.numpy(), "\n")
if episode_done:
print("Problem solved in %i steps (min 13 steps)."%episode_steps)
actor.reset()
episodes_cnt += 1
episode_steps = 0
if episodes_cnt < n_episodes: print("\n------- New episode -------") | aig-upf/pi-IW | online_planning_learning.py | online_planning_learning.py | py | 5,661 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "tensorflow.constant",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.n... |
34112929148 | import requests
import json
battles_win_history = []
# Request Player History from splinterland API
resp = requests.get('https://api.splinterlands.io/battle/history?player=kingsgambit0615').json()
battles = resp['battles']
temp = []
for battle in battles:
temp.append(battle['mana_cap'])
output = []
for x in temp:
if x not in output:
output.append(x)
output.sort()
print(output)
# print('Loop:' + str(temp))
# temp = temp + 1
# # Loads Battle Details
# fight_details = json.loads(battle['details'])
# # Get player winning deck summoners and monsters
# if fight_details['winner'] == "kingsgambit0615":
# # Get Mana Cap Of the Match
# mana_cap = battle['mana_cap']
# print("mana: " + str(mana_cap))
# # Get Ruleset Of the Match
# ruleset = battle['ruleset']
# print("ruleset: " + ruleset)
# # team_one = (fight_details['team1'])
# # print(team_one['player'])
# try:
# if fight_details['team1']['player'] == "kingsgambit0615":
# summoner = fight_details['team1']['summoner']['card_detail_id']
# print("Summoner: " + str(summoner))
# monsters = fight_details['team1']['monsters']
# for monster in monsters:
# print("monster:" + str(monster['card_detail_id']))
# else:
# summoner = fight_details['team2']['summoner']['card_detail_id']
# print("Summoner: " + str(summoner))
# monsters = fight_details['team2']['monsters']
# for monster in monsters:
# print("monster:" + str(monster['card_detail_id']))
# except:
# pass
| jomarmontuya/splinterlands-bot-python | data.py | data.py | py | 1,816 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
}
] |
12409403965 | import collections
import re
import furl
from django.core.urlresolvers import resolve, reverse, NoReverseMatch
from django.core.exceptions import ImproperlyConfigured
from django.http.request import QueryDict
from rest_framework import exceptions
from rest_framework import serializers as ser
from rest_framework.fields import SkipField
from rest_framework.fields import get_attribute as get_nested_attributes
from api.base import utils
from api.base.exceptions import InvalidQueryStringError
from api.base.exceptions import Conflict
from api.base.exceptions import JSONAPIException
from api.base.exceptions import TargetNotSupportedError
from api.base.exceptions import RelationshipPostMakesNoChanges
from api.base.settings import BULK_SETTINGS
from api.base.utils import absolute_reverse, extend_querystring_params, get_user_auth, extend_querystring_if_key_exists
from framework.auth import core as auth_core
from website import settings
from website import util as website_utils
from website.models import Node
from website.util.sanitize import strip_html
def format_relationship_links(related_link=None, self_link=None, rel_meta=None, self_meta=None):
"""
Properly handles formatting of self and related links according to JSON API.
Removes related or self link, if none.
"""
ret = {'links': {}}
if related_link:
ret['links'].update({
'related': {
'href': related_link or {},
'meta': rel_meta or {}
}
})
if self_link:
ret['links'].update({
'self': {
'href': self_link or {},
'meta': self_meta or {}
}
})
return ret
def is_anonymized(request):
private_key = request.query_params.get('view_only', None)
return website_utils.check_private_key_for_anonymized_link(private_key)
class HideIfRegistration(ser.Field):
"""
If node is a registration, this field will return None.
"""
def __init__(self, field, **kwargs):
super(HideIfRegistration, self).__init__(**kwargs)
self.field = field
self.source = field.source
self.required = field.required
self.read_only = field.read_only
def get_attribute(self, instance):
if instance.is_registration:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(HideIfRegistration, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
class HideIfDisabled(ser.Field):
"""
If the user is disabled, returns None for attribute fields, or skips
if a RelationshipField.
"""
def __init__(self, field, **kwargs):
super(HideIfDisabled, self).__init__(**kwargs)
self.field = field
self.source = field.source
self.required = field.required
self.read_only = field.read_only
def get_attribute(self, instance):
if instance.is_disabled:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(HideIfDisabled, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
class HideIfWithdrawal(HideIfRegistration):
"""
If registration is withdrawn, this field will return None.
"""
def get_attribute(self, instance):
if instance.is_retracted:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
class AllowMissing(ser.Field):
def __init__(self, field, **kwargs):
super(AllowMissing, self).__init__(**kwargs)
self.field = field
def to_representation(self, value):
return self.field.to_representation(value)
def bind(self, field_name, parent):
super(AllowMissing, self).bind(field_name, parent)
self.field.bind(field_name, self)
def get_attribute(self, instance):
"""
Overwrite the error message to return a blank value is if there is no existing value.
This allows the display of keys that do not exist in the DB (gitHub on a new OSF account for example.)
"""
try:
return self.field.get_attribute(instance)
except SkipField:
return ''
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def _url_val(val, obj, serializer, **kwargs):
"""Function applied by `HyperlinksField` to get the correct value in the
schema.
"""
url = None
if isinstance(val, Link): # If a Link is passed, get the url value
url = val.resolve_url(obj, **kwargs)
elif isinstance(val, basestring): # if a string is passed, it's a method of the serializer
if getattr(serializer, 'field', None):
serializer = serializer.parent
url = getattr(serializer, val)(obj) if obj is not None else None
else:
url = val
if not url and url != 0:
raise SkipField
else:
return url
class IDField(ser.CharField):
"""
ID field that validates that 'id' in the request body is the same as the instance 'id' for single requests.
"""
def __init__(self, **kwargs):
kwargs['label'] = 'ID'
super(IDField, self).__init__(**kwargs)
# Overrides CharField
def to_internal_value(self, data):
request = self.context.get('request')
if request:
if request.method in utils.UPDATE_METHODS and not utils.is_bulk_request(request):
id_field = self.get_id(self.root.instance)
if id_field != data:
raise Conflict(detail=('The id you used in the URL, "{}", does not match the id you used in the json body\'s id field, "{}". The object "{}" exists, otherwise you\'d get a 404, so most likely you need to change the id field to match.'.format(id_field, data, id_field)))
return super(IDField, self).to_internal_value(data)
def get_id(self, obj):
return getattr(obj, self.source, '_id')
class TypeField(ser.CharField):
"""
Type field that validates that 'type' in the request body is the same as the Meta type.
Also ensures that type is write-only and required.
"""
def __init__(self, **kwargs):
kwargs['write_only'] = True
kwargs['required'] = True
super(TypeField, self).__init__(**kwargs)
# Overrides CharField
def to_internal_value(self, data):
if isinstance(self.root, JSONAPIListSerializer):
type_ = self.root.child.Meta.type_
else:
type_ = self.root.Meta.type_
if type_ != data:
raise Conflict(detail=('This resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the resource\'s type.'.format(type_, data)))
return super(TypeField, self).to_internal_value(data)
class TargetTypeField(ser.CharField):
"""
Enforces that the related resource has the correct type
"""
def __init__(self, **kwargs):
kwargs['write_only'] = True
kwargs['required'] = True
self.target_type = kwargs.pop('target_type')
super(TargetTypeField, self).__init__(**kwargs)
def to_internal_value(self, data):
if self.target_type != data:
raise Conflict(detail=('The target resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the target resource\'s type.'.format(self.target_type, data)))
return super(TargetTypeField, self).to_internal_value(data)
class JSONAPIListField(ser.ListField):
def to_internal_value(self, data):
if not isinstance(data, list):
self.fail('not_a_list', input_type=type(data).__name__)
return super(JSONAPIListField, self).to_internal_value(data)
class AuthorizedCharField(ser.CharField):
"""
Passes auth of the logged-in user to the object's method
defined as the field source.
Example:
content = AuthorizedCharField(source='get_content')
"""
def __init__(self, source=None, **kwargs):
assert source is not None, 'The `source` argument is required.'
self.source = source
super(AuthorizedCharField, self).__init__(source=self.source, **kwargs)
def get_attribute(self, obj):
user = self.context['request'].user
auth = auth_core.Auth(user)
field_source_method = getattr(obj, self.source)
return field_source_method(auth=auth)
class RelationshipField(ser.HyperlinkedIdentityField):
"""
RelationshipField that permits the return of both self and related links, along with optional
meta information. ::
children = RelationshipField(
related_view='nodes:node-children',
related_view_kwargs={'node_id': '<pk>'},
self_view='nodes:node-node-children-relationship',
self_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_node_count'}
)
The lookup field must be surrounded in angular brackets to find the attribute on the target. Otherwise, the lookup
field will be returned verbatim. ::
wiki_home = RelationshipField(
related_view='addon:addon-detail',
related_view_kwargs={'node_id': '<_id>', 'provider': 'wiki'},
)
'_id' is enclosed in angular brackets, but 'wiki' is not. 'id' will be looked up on the target, but 'wiki' will not.
The serialized result would be '/nodes/abc12/addons/wiki'.
Field can handle nested attributes: ::
wiki_home = RelationshipField(
related_view='wiki:wiki-detail',
related_view_kwargs={'node_id': '<_id>', 'wiki_id': '<wiki_pages_current.home>'}
)
Field can handle a filter_key, which operates as the source field (but
is named differently to not interfere with HyperLinkedIdentifyField's source
The ``filter_key`` argument defines the Mongo key (or ODM field name) to filter on
when using the ``FilterMixin`` on a view. ::
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
)
Field can include optional filters:
Example:
replies = RelationshipField(
self_view='nodes:node-comments',
self_view_kwargs={'node_id': '<node._id>'},
filter={'target': '<pk>'})
)
"""
json_api_link = True # serializes to a links object
def __init__(self, related_view=None, related_view_kwargs=None, self_view=None, self_view_kwargs=None,
self_meta=None, related_meta=None, always_embed=False, filter=None, filter_key=None, **kwargs):
related_view = related_view
self_view = self_view
related_kwargs = related_view_kwargs
self_kwargs = self_view_kwargs
self.views = {'related': related_view, 'self': self_view}
self.view_kwargs = {'related': related_kwargs, 'self': self_kwargs}
self.related_meta = related_meta
self.self_meta = self_meta
self.always_embed = always_embed
self.filter = filter
self.filter_key = filter_key
assert (related_view is not None or self_view is not None), 'Self or related view must be specified.'
if related_view:
assert related_kwargs is not None, 'Must provide related view kwargs.'
if not callable(related_kwargs):
assert isinstance(related_kwargs,
dict), "Related view kwargs must have format {'lookup_url_kwarg: lookup_field}."
if self_view:
assert self_kwargs is not None, 'Must provide self view kwargs.'
assert isinstance(self_kwargs, dict), "Self view kwargs must have format {'lookup_url_kwarg: lookup_field}."
view_name = related_view
if view_name:
lookup_kwargs = related_kwargs
else:
view_name = self_view
lookup_kwargs = self_kwargs
if kwargs.get('lookup_url_kwarg', None):
lookup_kwargs = kwargs.pop('lookup_url_kwarg')
super(RelationshipField, self).__init__(view_name, lookup_url_kwarg=lookup_kwargs, **kwargs)
# Allow a RelationshipField to be modified if explicitly set so
if kwargs.get('read_only') is not None:
self.read_only = kwargs['read_only']
def resolve(self, resource, field_name):
"""
Resolves the view when embedding.
"""
lookup_url_kwarg = self.lookup_url_kwarg
if callable(lookup_url_kwarg):
lookup_url_kwarg = lookup_url_kwarg(getattr(resource, field_name))
kwargs = {attr_name: self.lookup_attribute(resource, attr) for (attr_name, attr) in
lookup_url_kwarg.items()}
view = self.view_name
if callable(self.view_name):
view = view(getattr(resource, field_name))
return resolve(
reverse(
view,
kwargs=kwargs
)
)
def process_related_counts_parameters(self, params, value):
"""
Processes related_counts parameter.
Can either be a True/False value for fetching counts on all fields, or a comma-separated list for specifying
individual fields. Ensures field for which we are requesting counts is a relationship field.
"""
if utils.is_truthy(params) or utils.is_falsy(params):
return params
field_counts_requested = [val for val in params.split(',')]
countable_fields = {field for field in self.parent.fields if
getattr(self.parent.fields[field], 'json_api_link', False) or
getattr(getattr(self.parent.fields[field], 'field', None), 'json_api_link', None)}
for count_field in field_counts_requested:
# Some fields will hide relationships, e.g. HideIfWithdrawal
# Ignore related_counts for these fields
fetched_field = self.parent.fields.get(count_field)
hidden = fetched_field and isinstance(fetched_field, HideIfWithdrawal) and getattr(value, 'is_retracted', False)
if not hidden and count_field not in countable_fields:
raise InvalidQueryStringError(
detail="Acceptable values for the related_counts query param are 'true', 'false', or any of the relationship fields; got '{0}'".format(
params),
parameter='related_counts'
)
return field_counts_requested
def get_meta_information(self, meta_data, value):
"""
For retrieving meta values, otherwise returns {}
"""
meta = {}
for key in meta_data or {}:
if key == 'count' or key == 'unread':
show_related_counts = self.context['request'].query_params.get('related_counts', False)
if self.context['request'].parser_context.get('kwargs'):
if self.context['request'].parser_context['kwargs'].get('is_embedded'):
show_related_counts = False
field_counts_requested = self.process_related_counts_parameters(show_related_counts, value)
if utils.is_truthy(show_related_counts):
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent)
elif utils.is_falsy(show_related_counts):
continue
elif self.field_name in field_counts_requested:
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent)
else:
continue
elif key == 'projects_in_common':
if not get_user_auth(self.context['request']).user:
continue
if not self.context['request'].query_params.get('show_projects_in_common', False):
continue
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent)
else:
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent)
return meta
def lookup_attribute(self, obj, lookup_field):
"""
Returns attribute from target object unless attribute surrounded in angular brackets where it returns the lookup field.
Also handles the lookup of nested attributes.
"""
bracket_check = _tpl(lookup_field)
if bracket_check:
source_attrs = bracket_check.split('.')
# If you are using a nested attribute for lookup, and you get the attribute wrong, you will not get an
# error message, you will just not see that field. This allows us to have slightly more dynamic use of
# nested attributes in relationship fields.
try:
return_val = get_nested_attributes(obj, source_attrs)
except KeyError:
return None
return return_val
return lookup_field
def kwargs_lookup(self, obj, kwargs_dict):
"""
For returning kwargs dictionary of format {"lookup_url_kwarg": lookup_value}
"""
if callable(kwargs_dict):
kwargs_dict = kwargs_dict(obj)
kwargs_retrieval = {}
for lookup_url_kwarg, lookup_field in kwargs_dict.items():
try:
lookup_value = self.lookup_attribute(obj, lookup_field)
except AttributeError as exc:
raise AssertionError(exc)
if lookup_value is None:
return None
kwargs_retrieval[lookup_url_kwarg] = lookup_value
return kwargs_retrieval
# Overrides HyperlinkedIdentityField
def get_url(self, obj, view_name, request, format):
urls = {}
for view_name, view in self.views.items():
if view is None:
urls[view_name] = {}
else:
kwargs = self.kwargs_lookup(obj, self.view_kwargs[view_name])
if kwargs is None:
urls[view_name] = {}
else:
if callable(view):
view = view(getattr(obj, self.field_name))
url = self.reverse(view, kwargs=kwargs, request=request, format=format)
if self.filter:
formatted_filter = self.format_filter(obj)
if formatted_filter:
url = extend_querystring_params(url, {'filter': formatted_filter})
else:
url = None
url = extend_querystring_if_key_exists(url, self.context['request'], 'view_only')
urls[view_name] = url
if not urls['self'] and not urls['related']:
urls = None
return urls
def to_esi_representation(self, value, envelope='data'):
relationships = self.to_representation(value)
try:
href = relationships['links']['related']['href']
except KeyError:
raise SkipField
else:
if href and not href == '{}':
if self.always_embed:
envelope = 'data'
query_dict = dict(format=['jsonapi', ], envelope=[envelope, ])
if 'view_only' in self.parent.context['request'].query_params.keys():
query_dict.update(view_only=[self.parent.context['request'].query_params['view_only']])
esi_url = extend_querystring_params(href, query_dict)
return '<esi:include src="{}"/>'.format(esi_url)
def format_filter(self, obj):
qd = QueryDict(mutable=True)
filter_fields = self.filter.keys()
for field_name in filter_fields:
try:
# check if serializer method passed in
serializer_method = getattr(self.parent, self.filter[field_name])
except AttributeError:
value = self.lookup_attribute(obj, self.filter[field_name])
else:
value = serializer_method(obj)
if not value:
continue
qd.update({'[{}]'.format(field_name): value})
if not qd.keys():
return None
return qd.urlencode(safe=['[', ']'])
# Overrides HyperlinkedIdentityField
def to_representation(self, value):
request = self.context.get('request', None)
format = self.context.get('format', None)
assert request is not None, (
'`%s` requires the request in the serializer'
" context. Add `context={'request': request}` when instantiating "
'the serializer.' % self.__class__.__name__
)
# By default use whatever format is given for the current context
# unless the target is a different type to the source.
#
# Eg. Consider a HyperlinkedIdentityField pointing from a json
# representation to an html property of that representation...
#
# '/snippets/1/' should link to '/snippets/1/highlight/'
# ...but...
# '/snippets/1/.json' should link to '/snippets/1/highlight/.html'
if format and self.format and self.format != format:
format = self.format
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.get_url(value, self.view_name, request, format)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s". You may have failed to include the related '
'model in your API, or incorrectly configured the '
'`lookup_field` attribute on this field.'
)
if value in ('', None):
value_string = {'': 'the empty string', None: 'None'}[value]
msg += (
' WARNING: The value of the field on the model instance '
"was %s, which may be why it didn't match any "
'entries in your URL conf.' % value_string
)
raise ImproperlyConfigured(msg % self.view_name)
if url is None:
raise SkipField
related_url = url['related']
related_meta = self.get_meta_information(self.related_meta, value)
self_url = url['self']
self_meta = self.get_meta_information(self.self_meta, value)
return format_relationship_links(related_url, self_url, related_meta, self_meta)
class FileCommentRelationshipField(RelationshipField):
def get_url(self, obj, view_name, request, format):
if obj.kind == 'folder':
raise SkipField
return super(FileCommentRelationshipField, self).get_url(obj, view_name, request, format)
class TargetField(ser.Field):
"""
Field that returns a nested dict with the url (constructed based
on the object's type), optional meta information, and link_type.
Example:
target = TargetField(link_type='related', meta={'type': 'get_target_type'})
"""
json_api_link = True # serializes to a links object
view_map = {
'node': {
'view': 'nodes:node-detail',
'lookup_kwarg': 'node_id'
},
'comment': {
'view': 'comments:comment-detail',
'lookup_kwarg': 'comment_id'
},
'nodewikipage': {
'view': None,
'lookup_kwarg': None
}
}
def __init__(self, **kwargs):
self.meta = kwargs.pop('meta', {})
self.link_type = kwargs.pop('link_type', 'url')
super(TargetField, self).__init__(read_only=True, **kwargs)
def resolve(self, resource, field_name):
"""
Resolves the view for target node or target comment when embedding.
"""
view_info = self.view_map.get(resource.target.referent._name, None)
if not view_info:
raise TargetNotSupportedError('{} is not a supported target type'.format(
resource.target._name
))
if not view_info['view']:
return None, None, None
embed_value = resource.target._id
kwargs = {view_info['lookup_kwarg']: embed_value}
return resolve(
reverse(
view_info['view'],
kwargs=kwargs
)
)
def to_esi_representation(self, value, envelope='data'):
href = value.get_absolute_url()
if href:
esi_url = extend_querystring_params(href, dict(envelope=[envelope, ], format=['jsonapi', ]))
return '<esi:include src="{}"/>'.format(esi_url)
return self.to_representation(value)
def to_representation(self, value):
"""
Returns nested dictionary in format {'links': {'self.link_type': ... }
If no meta information, self.link_type is equal to a string containing link's URL. Otherwise,
the link is represented as a links object with 'href' and 'meta' members.
"""
meta = website_utils.rapply(self.meta, _url_val, obj=value, serializer=self.parent)
return {'links': {self.link_type: {'href': value.referent.get_absolute_url(), 'meta': meta}}}
class LinksField(ser.Field):
"""Links field that resolves to a links object. Used in conjunction with `Link`.
If the object to be serialized implements `get_absolute_url`, then the return value
of that method is used for the `self` link.
Example: ::
links = LinksField({
'html': 'absolute_url',
'children': {
'related': Link('nodes:node-children', node_id='<pk>'),
'count': 'get_node_count'
},
'contributors': {
'related': Link('nodes:node-contributors', node_id='<pk>'),
'count': 'get_contrib_count'
},
'registrations': {
'related': Link('nodes:node-registrations', node_id='<pk>'),
'count': 'get_registration_count'
},
})
"""
def __init__(self, links, *args, **kwargs):
ser.Field.__init__(self, read_only=True, *args, **kwargs)
self.links = links
def get_attribute(self, obj):
# We pass the object instance onto `to_representation`,
# not just the field attribute.
return obj
def extend_absolute_url(self, obj):
return extend_querystring_if_key_exists(obj.get_absolute_url(), self.context['request'], 'view_only')
def to_representation(self, obj):
ret = {}
for name, value in self.links.iteritems():
try:
url = _url_val(value, obj=obj, serializer=self.parent)
except SkipField:
continue
else:
ret[name] = url
if hasattr(obj, 'get_absolute_url') and 'self' not in self.links:
ret['self'] = self.extend_absolute_url(obj)
return ret
_tpl_pattern = re.compile(r'\s*<\s*(\S*)\s*>\s*')
def _tpl(val):
"""Return value within ``< >`` if possible, else return ``None``."""
match = _tpl_pattern.match(val)
if match:
return match.groups()[0]
return None
def _get_attr_from_tpl(attr_tpl, obj):
attr_name = _tpl(str(attr_tpl))
if attr_name:
attribute_value = obj
for attr_segment in attr_name.split('.'):
attribute_value = getattr(attribute_value, attr_segment, ser.empty)
if attribute_value is not ser.empty:
return attribute_value
elif attr_name in obj:
return obj[attr_name]
else:
raise AttributeError(
'{attr_name!r} is not a valid '
'attribute of {obj!r}'.format(
attr_name=attr_name, obj=obj,
))
else:
return attr_tpl
# TODO: Make this a Field that is usable on its own?
class Link(object):
"""Link object to use in conjunction with Links field. Does reverse lookup of
URLs given an endpoint name and attributed enclosed in `<>`. This includes
complex key strings like 'user.id'
"""
def __init__(self, endpoint, args=None, kwargs=None, query_kwargs=None, **kw):
self.endpoint = endpoint
self.kwargs = kwargs or {}
self.args = args or tuple()
self.reverse_kwargs = kw
self.query_kwargs = query_kwargs or {}
def resolve_url(self, obj):
kwarg_values = {key: _get_attr_from_tpl(attr_tpl, obj) for key, attr_tpl in self.kwargs.items()}
arg_values = [_get_attr_from_tpl(attr_tpl, obj) for attr_tpl in self.args]
query_kwarg_values = {key: _get_attr_from_tpl(attr_tpl, obj) for key, attr_tpl in self.query_kwargs.items()}
# Presumably, if you have are expecting a value but the value is empty, then the link is invalid.
for item in kwarg_values:
if kwarg_values[item] is None:
raise SkipField
return utils.absolute_reverse(
self.endpoint,
args=arg_values,
kwargs=kwarg_values,
query_kwargs=query_kwarg_values,
**self.reverse_kwargs
)
class WaterbutlerLink(Link):
"""Link object to use in conjunction with Links field. Builds a Waterbutler URL for files.
"""
def __init__(self, must_be_file=None, must_be_folder=None, **kwargs):
self.kwargs = kwargs
self.must_be_file = must_be_file
self.must_be_folder = must_be_folder
def resolve_url(self, obj):
"""Reverse URL lookup for WaterButler routes
"""
if self.must_be_folder is True and not obj.path.endswith('/'):
raise SkipField
if self.must_be_file is True and obj.path.endswith('/'):
raise SkipField
url = website_utils.waterbutler_api_url_for(obj.node._id, obj.provider, obj.path, **self.kwargs)
if not url:
raise SkipField
else:
return url
class NodeFileHyperLinkField(RelationshipField):
def __init__(self, kind=None, never_embed=False, **kws):
self.kind = kind
self.never_embed = never_embed
super(NodeFileHyperLinkField, self).__init__(**kws)
def get_url(self, obj, view_name, request, format):
if self.kind and obj.kind != self.kind:
raise SkipField
return super(NodeFileHyperLinkField, self).get_url(obj, view_name, request, format)
class JSONAPIListSerializer(ser.ListSerializer):
def to_representation(self, data):
enable_esi = self.context.get('enable_esi', False)
envelope = self.context.update({'envelope': None})
# Don't envelope when serializing collection
errors = {}
bulk_skip_uneditable = utils.is_truthy(self.context['request'].query_params.get('skip_uneditable', False))
if isinstance(data, collections.Mapping):
errors = data.get('errors', None)
data = data.get('data', None)
if enable_esi:
ret = [
self.child.to_esi_representation(item, envelope=None) for item in data
]
else:
ret = [
self.child.to_representation(item, envelope=envelope) for item in data
]
if errors and bulk_skip_uneditable:
ret.append({'errors': errors})
return ret
# Overrides ListSerializer which doesn't support multiple update by default
def update(self, instance, validated_data):
# avoiding circular import
from api.nodes.serializers import ContributorIDField
# if PATCH request, the child serializer's partial attribute needs to be True
if self.context['request'].method == 'PATCH':
self.child.partial = True
bulk_skip_uneditable = utils.is_truthy(self.context['request'].query_params.get('skip_uneditable', False))
if not bulk_skip_uneditable:
if len(instance) != len(validated_data):
raise exceptions.ValidationError({'non_field_errors': 'Could not find all objects to update.'})
id_lookup = self.child.fields['id'].source
data_mapping = {item.get(id_lookup): item for item in validated_data}
if isinstance(self.child.fields['id'], ContributorIDField):
instance_mapping = {self.child.fields['id'].get_id(item): item for item in instance}
else:
instance_mapping = {getattr(item, id_lookup): item for item in instance}
ret = {'data': []}
for resource_id, resource in instance_mapping.items():
data = data_mapping.pop(resource_id, None)
ret['data'].append(self.child.update(resource, data))
# If skip_uneditable in request, add validated_data for nodes in which the user did not have edit permissions to errors
if data_mapping and bulk_skip_uneditable:
ret.update({'errors': data_mapping.values()})
return ret
# overrides ListSerializer
def run_validation(self, data):
meta = getattr(self, 'Meta', None)
bulk_limit = getattr(meta, 'bulk_limit', BULK_SETTINGS['DEFAULT_BULK_LIMIT'])
num_items = len(data)
if num_items > bulk_limit:
raise JSONAPIException(source={'pointer': '/data'},
detail='Bulk operation limit is {}, got {}.'.format(bulk_limit, num_items))
return super(JSONAPIListSerializer, self).run_validation(data)
# overrides ListSerializer: Add HTML-sanitization similar to that used by APIv1 front-end views
def is_valid(self, clean_html=True, **kwargs):
"""
After validation, scrub HTML from validated_data prior to saving (for create and update views)
Exclude 'type' from validated_data.
"""
ret = super(JSONAPIListSerializer, self).is_valid(**kwargs)
if clean_html is True:
self._validated_data = website_utils.rapply(self.validated_data, strip_html)
for data in self._validated_data:
data.pop('type', None)
return ret
class JSONAPISerializer(ser.Serializer):
"""Base serializer. Requires that a `type_` option is set on `class Meta`. Also
allows for enveloping of both single resources and collections. Looks to nest fields
according to JSON API spec. Relational fields must set json_api_link=True flag.
Self/html links must be nested under "links".
"""
# Don't serialize relationships that use these views
# when viewing thru an anonymous VOL
views_to_hide_if_anonymous = {
'users:user-detail',
'nodes:node-registrations',
}
# overrides Serializer
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return JSONAPIListSerializer(*args, **kwargs)
def invalid_embeds(self, fields, embeds):
fields_check = fields[:]
for index, field in enumerate(fields_check):
if getattr(field, 'field', None):
fields_check[index] = field.field
invalid_embeds = set(embeds.keys()) - set(
[f.field_name for f in fields_check if getattr(f, 'json_api_link', False)])
return invalid_embeds
def to_esi_representation(self, data, envelope='data'):
href = None
query_params_blacklist = ['page[size]']
href = self.get_absolute_url(data)
if href and href != '{}':
esi_url = furl.furl(href).add(args=dict(self.context['request'].query_params)).remove(
args=query_params_blacklist).remove(args=['envelope']).add(args={'envelope': envelope}).url
return '<esi:include src="{}"/>'.format(esi_url)
# failsafe, let python do it if something bad happened in the ESI construction
return super(JSONAPISerializer, self).to_representation(data)
# overrides Serializer
def to_representation(self, obj, envelope='data'):
"""Serialize to final representation.
:param obj: Object to be serialized.
:param envelope: Key for resource object.
"""
ret = {}
meta = getattr(self, 'Meta', None)
type_ = getattr(meta, 'type_', None)
assert type_ is not None, 'Must define Meta.type_'
data = {
'id': '',
'type': type_,
'attributes': {},
'relationships': {},
'embeds': {},
'links': {},
}
embeds = self.context.get('embed', {})
context_envelope = self.context.get('envelope', envelope)
if context_envelope == 'None':
context_envelope = None
enable_esi = self.context.get('enable_esi', False)
is_anonymous = is_anonymized(self.context['request'])
to_be_removed = set()
if is_anonymous and hasattr(self, 'non_anonymized_fields'):
# Drop any fields that are not specified in the `non_anonymized_fields` variable.
allowed = set(self.non_anonymized_fields)
existing = set(self.fields.keys())
to_be_removed = existing - allowed
fields = [field for field in self.fields.values() if
not field.write_only and field.field_name not in to_be_removed]
invalid_embeds = self.invalid_embeds(fields, embeds)
invalid_embeds = invalid_embeds - to_be_removed
if invalid_embeds:
raise InvalidQueryStringError(parameter='embed',
detail='The following fields are not embeddable: {}'.format(
', '.join(invalid_embeds)))
for field in fields:
try:
attribute = field.get_attribute(obj)
except SkipField:
continue
nested_field = getattr(field, 'field', None)
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
data['attributes'][field.field_name] = None
else:
try:
representation = field.to_representation(attribute)
except SkipField:
continue
if getattr(field, 'json_api_link', False) or getattr(nested_field, 'json_api_link', False):
# If embed=field_name is appended to the query string or 'always_embed' flag is True, directly embed the
# results in addition to adding a relationship link
if embeds and (field.field_name in embeds or getattr(field, 'always_embed', None)):
if enable_esi:
try:
result = field.to_esi_representation(attribute, envelope=envelope)
except SkipField:
continue
else:
try:
# If a field has an empty representation, it should not be embedded.
result = self.context['embed'][field.field_name](obj)
except SkipField:
result = None
if result:
data['embeds'][field.field_name] = result
else:
data['embeds'][field.field_name] = {'error': 'This field is not embeddable.'}
try:
if not (is_anonymous and
hasattr(field, 'view_name') and
field.view_name in self.views_to_hide_if_anonymous):
data['relationships'][field.field_name] = representation
except SkipField:
continue
elif field.field_name == 'id':
data['id'] = representation
elif field.field_name == 'links':
data['links'] = representation
else:
data['attributes'][field.field_name] = representation
if not data['relationships']:
del data['relationships']
if not data['embeds']:
del data['embeds']
if context_envelope:
ret[context_envelope] = data
if is_anonymous:
ret['meta'] = {'anonymous': True}
else:
ret = data
return ret
def get_absolute_url(self, obj):
raise NotImplementedError()
def get_absolute_html_url(self, obj):
return extend_querystring_if_key_exists(obj.absolute_url, self.context['request'], 'view_only')
# overrides Serializer: Add HTML-sanitization similar to that used by APIv1 front-end views
def is_valid(self, clean_html=True, **kwargs):
"""
After validation, scrub HTML from validated_data prior to saving (for create and update views)
Exclude 'type' and '_id' from validated_data.
"""
ret = super(JSONAPISerializer, self).is_valid(**kwargs)
if clean_html is True:
self._validated_data = self.sanitize_data()
self._validated_data.pop('type', None)
self._validated_data.pop('target_type', None)
if self.context['request'].method in utils.UPDATE_METHODS:
self._validated_data.pop('_id', None)
return ret
def sanitize_data(self):
return website_utils.rapply(self.validated_data, strip_html)
class JSONAPIRelationshipSerializer(ser.Serializer):
"""Base Relationship serializer. Requires that a `type_` option is set on `class Meta`.
Provides a simplified serialization of the relationship, allowing for simple update request
bodies.
"""
id = ser.CharField(required=False, allow_null=True)
type = TypeField(required=False, allow_null=True)
def to_representation(self, obj):
meta = getattr(self, 'Meta', None)
type_ = getattr(meta, 'type_', None)
assert type_ is not None, 'Must define Meta.type_'
relation_id_field = self.fields['id']
attribute = relation_id_field.get_attribute(obj)
relationship = relation_id_field.to_representation(attribute)
data = {'type': type_, 'id': relationship} if relationship else None
return data
def DevOnly(field):
"""Make a field only active in ``DEV_MODE``. ::
experimental_field = DevMode(CharField(required=False))
"""
return field if settings.DEV_MODE else None
class RestrictedDictSerializer(ser.Serializer):
def to_representation(self, obj):
data = {}
fields = [field for field in self.fields.values() if not field.write_only]
for field in fields:
try:
attribute = field.get_attribute(obj)
except ser.SkipField:
continue
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
data[field.field_name] = None
else:
data[field.field_name] = field.to_representation(attribute)
return data
def relationship_diff(current_items, new_items):
"""
To be used in POST and PUT/PATCH relationship requests, as, by JSON API specs,
in update requests, the 'remove' items' relationships would be deleted, and the
'add' would be added, while for create requests, only the 'add' would be added.
:param current_items: The current items in the relationship
:param new_items: The items passed in the request
:return:
"""
return {
'add': {k: new_items[k] for k in (set(new_items.keys()) - set(current_items.keys()))},
'remove': {k: current_items[k] for k in (set(current_items.keys()) - set(new_items.keys()))}
}
class AddonAccountSerializer(JSONAPISerializer):
id = ser.CharField(source='_id', read_only=True)
provider = ser.CharField(read_only=True)
profile_url = ser.CharField(required=False, read_only=True)
display_name = ser.CharField(required=False, read_only=True)
links = links = LinksField({
'self': 'get_absolute_url',
})
class Meta:
type_ = 'external_accounts'
def get_absolute_url(self, obj):
kwargs = self.context['request'].parser_context['kwargs']
kwargs.update({'account_id': obj._id})
return absolute_reverse(
'users:user-external_account-detail',
kwargs=kwargs
)
return obj.get_absolute_url()
class LinkedNode(JSONAPIRelationshipSerializer):
id = ser.CharField(source='node._id', required=False, allow_null=True)
class Meta:
type_ = 'linked_nodes'
class LinkedNodesRelationshipSerializer(ser.Serializer):
data = ser.ListField(child=LinkedNode())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return obj['self'].linked_nodes_self_url
def get_related_url(self, obj):
return obj['self'].linked_nodes_related_url
class Meta:
type_ = 'linked_nodes'
def get_pointers_to_add_remove(self, pointers, new_pointers):
diff = relationship_diff(
current_items={pointer.node._id: pointer for pointer in pointers},
new_items={val['node']['_id']: val for val in new_pointers}
)
nodes_to_add = []
for node_id in diff['add']:
node = Node.load(node_id)
if not node:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(node_id))
nodes_to_add.append(node)
return nodes_to_add, diff['remove'].values()
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {'data': [
pointer for pointer in
obj.nodes_pointer
if not pointer.node.is_deleted and not pointer.node.is_collection
], 'self': obj}
def update(self, instance, validated_data):
collection = instance['self']
auth = utils.get_user_auth(self.context['request'])
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
for pointer in remove:
collection.rm_pointer(pointer, auth)
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
def create(self, validated_data):
instance = self.context['view'].get_object()
auth = utils.get_user_auth(self.context['request'])
collection = instance['self']
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
if not len(add):
raise RelationshipPostMakesNoChanges
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
| karenhanson/osf.io_rmap_integration_old | api/base/serializers.py | serializers.py | py | 49,061 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "website.util.check_private_key_for_anonymized_link",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "website.util",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.Field",
"line_number": 61,
"usage_type": "attr... |
21683160230 | from flask import Flask, make_response, jsonify, request
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import requests
import numpy as np
import pandas as pd
import json
app = Flask(__name__)
rows = []
@app.route('/getDB')
def emplace():
prod_name = ''
prod_name = request.args.get('name')
res = requests.get('http://43.201.114.241:8080/userinfo/product/')
rows = res.json()
product_name = []
ingredients_list = []
calories = []
price = []
amount = []
carbohydrate = []
cholesterol = []
company = []
cooking_type = []
fat = []
id = []
primary_type = []
product_category = []
product_image = []
protein = []
sat_fat = []
secondary_type = []
serving_size = []
sodium = []
specific = []
sugar = []
trans_fat = []
vegan_option = []
for i in range(len(rows)):
product_name.append(rows[i]["product_name"])
ingredients_list.append(rows[i]["ingredient"])
calories.append(rows[i]["calory"])
price.append(rows[i]["price"])
amount.append(rows[i]["amount"])
carbohydrate.append(rows[i]["carbohydrate"])
cholesterol.append(rows[i]["cholesterol"])
vegan_option.append(rows[i]["vegan_option"])
trans_fat.append(rows[i]["trans_fat"])
sugar.append(rows[i]["sugar"])
company.append(rows[i]["company"])
cooking_type.append(rows[i]["cooking_type"])
fat.append(rows[i]["fat"])
id.append(rows[i]["id"])
primary_type.append(rows[i]["primary_type"])
product_category.append(rows[i]["product_category"])
product_image.append(rows[i]["product_image"])
protein.append(rows[i]["protein"])
sat_fat.append(rows[i]["sat_fat"])
secondary_type.append(rows[i]["secondary_type"])
serving_size.append(rows[i]["serving_size"])
sodium.append(rows[i]["sodium"])
specific.append(rows[i]["specific"])
data = pd.DataFrame({"product_name": product_name,
"ingredients_list": ingredients_list,
"calories": calories,
"price": price,
"amount": amount,
"carbohydrate": carbohydrate,
"cholesterol": cholesterol,
"company": company,
"cooking_type": cooking_type,
"fat": fat,
"id": id,
"primary_type": primary_type,
"product_category": product_category,
"product_image": product_image,
"protein": protein,
"sat_fat": sat_fat,
"secondary_type": secondary_type,
"serving_size": serving_size,
"sodium": sodium,
"specific": specific,
"sugar": sugar,
"trans_fat": trans_fat,
"vegan_option": vegan_option})
cnt_vector = CountVectorizer(ngram_range=(1, 3))
vector_categ = cnt_vector.fit_transform(data['ingredients_list'])
categ_sim = cosine_similarity(
vector_categ, vector_categ).argsort()[:, ::-1]
target_idx = data[data['product_name'] == prod_name].index.values
target_idx = target_idx[:1]
sim_idx = categ_sim[target_idx, :].reshape(-1)
sim_idx = sim_idx[sim_idx != target_idx]
result = data.iloc[sim_idx].sort_values('price', ascending=False)
data = data.iloc[target_idx[0]]
result = result[(result['price'] > (data.price*0.9)) &
(result['price'] < (data.price*1.1))]
result = result[(result['calories'] > (data.calories*0.9)) &
(result['calories'] < (data.calories*1.1))]
result = result.to_json(orient='records', force_ascii=False)
result = json.loads(result)
return make_response(jsonify(result), 200)
if __name__ == '__main__':
app.run(host='0.0.0.0')
| jane-k/RecommendationSystem | app.py | app.py | py | 4,148 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask.reque... |
30346539101 | import os
import psycopg2
from flask import Flask, render_template, request, url_for, redirect
from app import app
def get_db_connection():
conn = psycopg2.connect(host='localhost',
database='restaurant',
user=os.environ['DB_USERNAME'],
password=os.environ['DB_PASSWORD'])
return conn
@app.route('/')
def index():
return render_template("index.html")
@app.route('/restaurant/')
def restaurant():
conn = get_db_connection()
cur = conn.cursor()
cur.execute('SELECT * FROM restaurants;')
restaurants = cur.fetchall()
cur.close()
conn.close()
return render_template('restaurant.html', restaurants=restaurants)
@app.route('/create/', methods=('GET', 'POST'))
def create():
if request.method == 'POST':
restaurant_name = request.form['restaurant_name']
area = request.form['area']
category = request.form['category']
restaurant_visited = int(request.form['restaurant_visited'])
average_rating = request.form['average_rating']
ratings_count = int(request.form['ratings_count'])
conn = get_db_connection()
cur = conn.cursor()
cur.execute('INSERT INTO restaurants (restaurant_name, area, category, restaurant_visited, average_rating, ratings_count)'
'VALUES (%s, %s, %s, %s, %s, %s)',
(restaurant_name, area, category, restaurant_visited, average_rating, ratings_count))
conn.commit()
cur.close()
conn.close()
return redirect(url_for('/restaurant/'))
return render_template('create.html') | anthonygfrn/Restaurant-App-Demo1 | app/routes.py | routes.py | py | 1,668 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "psycopg2.connect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template"... |
28078812679 | import numpy as np
from PIL import Image
img=Image.open("tiger.jpg")
img=np.array(img)
def rgb2gray(rgb):
return np.dot(rgb, [0.299, 0.587, 0.114])
img=rgb2gray(img)
row=img.shape[0]
col=img.shape[1]
print(row)
print(col)
# img.resize(1200,1920);
# row=img.shape[0]
# col=img.shape[1]
# print(row)
# print(col)
Image.fromarray(img).show()
filtered_image=[]
new_row=row/3
new_col=col/3
filter=[[1,1,1],
[1,1,1],
[1,1,1]]
for i in range(0,row,3):
lis=[]
for j in range(0,col,3):
val=0
for k in range(3):
for l in range(3):
val+=img[i+k][j+l]
lis.append(val/9)
filtered_image.append(lis)
filtered_image=np.array(filtered_image)
print(filtered_image)
print(filtered_image.shape[0])
print(filtered_image.shape[1])
Image.fromarray(filtered_image).show() | NegiArvind/NeroFuzzyTechniques-Lab-Program | compressing_filter.py | compressing_filter.py | py | 787 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 7,
... |
33807342463 | from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QHBoxLayout
# 0 = empty
# 1 = X
# 2 = O
board = [0,0,0,0,0,0,0,0,0]
app = QApplication([])
window = QWidget()
layout1 = QHBoxLayout()
layout2 = QHBoxLayout()
layout3 = QHBoxLayout()
layoutMain = QVBoxLayout()
buttons = [QPushButton(' '), QPushButton(' '), QPushButton(' '), QPushButton(' '), QPushButton(' '), QPushButton(' '),QPushButton(' '), QPushButton(' '), QPushButton(' ')]
layout1.addWidget(buttons[0])
layout1.addWidget(buttons[1])
layout1.addWidget(buttons[2])
layout2.addWidget(buttons[3])
layout2.addWidget(buttons[4])
layout2.addWidget(buttons[5])
layout3.addWidget(buttons[6])
layout3.addWidget(buttons[7])
layout3.addWidget(buttons[8])
layoutMain.addLayout(layout1)
layoutMain.addLayout(layout2)
layoutMain.addLayout(layout3)
window.setLayout(layoutMain)
bestMoveCache = {}
# Event handlers
def button0_click():
global board
makeMove(0, board)
def button1_click():
global board
makeMove(1, board)
def button2_click():
global board
makeMove(2, board)
def button3_click():
global board
makeMove(3, board)
def button4_click():
global board
makeMove(4, board)
def button5_click():
global board
makeMove(5, board)
def button6_click():
global board
makeMove(6, board)
def button7_click():
global board
makeMove(7, board)
def button8_click():
global board
makeMove(8, board)
# Main algorithm
def minimax(p, b, depth=0):
# check if terminal state
# player 2 = AI = maximizing player
#print(depth,b)
check1 = boardWin(b, 1)
check2 = boardWin(b, 2)
if check1 == True:
return (-10, -1, depth)
elif check2 == True:
return (10, -1, depth)
elif 0 not in b:
return (0, -1, depth)
# find empty spots
spots = []
for i in range(len(b)):
if b[i] == 0:
spots.append(i)
bestmove = -1
bestscore = 0
bestdepth = 0
# init scores
if (p == 2):
bestscore = -10000
else:
bestscore = 10000
# for each spot get score
for i in spots:
board = b
board[i] = p
if (p == 2): # maximize
score, move, d = minimax(1, board, depth+1)
if score > bestscore:
bestscore = score
bestmove = i
else: # minimize
score, move, d = minimax(2, board, depth+1)
if score < bestscore:
bestscore = score
bestmove = i
board[i] = 0
return (bestscore, bestmove, bestdepth)
# Check if player p has a winning condition
def boardWin(b, p):
if b[0] == p and b[1] == p and b[2] == p:
return True
if b[3] == p and b[4] == p and b[5] == p:
return True
if b[6] == p and b[7] == p and b[8] == p:
return True
if b[0] == p and b[3] == p and b[6] == p:
return True
if b[1] == p and b[4] == p and b[7] == p:
return True
if b[2] == p and b[5] == p and b[8] == p:
return True
if b[0] == p and b[4] == p and b[8] == p:
return True
if b[6] == p and b[4] == p and b[2] == p:
return True
return False
# Check if the board is in a valid state
def boardValid(b):
global bestMoveCache
countX = 0
countO = 0
for i in b:
if i == 1:
countX += 1
elif i == 2:
countO += 1
if (countX != countO) and (countX != countO + 1):
return False
if boardWin(b, 2):
if boardWin(b,1):
return False
return countX == countO # for O win, counts need to be equal
if boardWin(b, 1):
if countX != countO + 1: # for X win, counts need to be unequal
return False
# no winner, but valid board
print(b)
if boardWin(b, 1) or boardWin(b, 2):
return False
if 0 not in b:
# board is not counted because we cannot make more moves
return False
# Calculate best moves
if countX == countO + 1:
bestmove = minimax(2, b)
print("2:", tuple(b), bestmove[1])
bestMoveCache[tuple(b)] = bestmove[1]
else:
bestmove = minimax(1,b)
print("2:", tuple(b), bestmove[1])
bestMoveCache[tuple(b)] = bestmove[1]
return True
# Generate all valid possible game states
def generateValidMoves(size):
validboards = 0
for i in range(3**size):
# convert to base 3
b = []
cur = i
for j in range(size):
b.insert(0, cur % 3)
cur = cur // 3
if boardValid(b):
validboards += 1
print(str(validboards) + " valid boards!")
# Make a move on the board, first human then AI
def makeMove(pos, board):
#global bestMoveCache
if boardWin(board,1) or boardWin(board,2) or (0 not in board):
return
if board[pos] != 0:
return
else:
# play human move
board[pos] = 1
buttons[pos].setText("X")
res = boardWin(board,1)
if res == True:
print("Player 1 wins!")
return
elif (0 not in board):
print("Tie!")
return
print(board)
# play AI move
#print(tuple(board))
#print(bestMoveCache)
if tuple(board) not in bestMoveCache:
print("AI FAIL")
return
else:
aipos = bestMoveCache[tuple(board)]
#aiscore, aipos, aidepth = minimax(2, board)
print("AI move " + str(aipos))
board[aipos] = 2
buttons[aipos].setText("O")
res = boardWin(board,2)
if res == True:
print("Player 2 wins!")
return
elif (0 not in board):
print("Tie!")
return
print(board)
buttons[0].clicked.connect(button0_click)
buttons[1].clicked.connect(button1_click)
buttons[2].clicked.connect(button2_click)
buttons[3].clicked.connect(button3_click)
buttons[4].clicked.connect(button4_click)
buttons[5].clicked.connect(button5_click)
buttons[6].clicked.connect(button6_click)
buttons[7].clicked.connect(button7_click)
buttons[8].clicked.connect(button8_click)
print("Go!")
generateValidMoves(9)
print(minimax(2, [1, 2, 1, 2, 2, 1, 0, 0, 0]))
window.show()
app.exec_()
buffer = ""
counter = 0
for i in bestMoveCache:
x = list(i)
p1_bin = []
p2_bin = []
#for j in range(9):
# if x[j] == 1:
# p1_bin.insert(0,1)
# p2_bin.insert(0,0)
# elif x[j] == 2:
# p1_bin.insert(0,0)
# p2_bin.insert(0,1)
# else:
# p1_bin.insert(0,0)
# p2_bin.insert(0,0)
for j in range(9):
if x[j] == 1:
p1_bin.append(1)
p2_bin.append(0)
elif x[j] == 2:
p1_bin.append(0)
p2_bin.append(1)
else:
p1_bin.append(0)
p2_bin.append(0)
#print("record",str(i),"board1",''.join(str(e) for e in p1_bin),"board2",''.join(str(e) for e in p2_bin),"best move",bestMoveCache[i])
print("sync_reset;\ncheck_mem("+str(counter)+",\""+''.join(str(e) for e in p1_bin)+"\",\""+''.join(str(e) for e in p2_bin)+"\","+str(bestMoveCache[i])+",\'1\'); -- " + str(i))
y = "00" + ''.join(str(e) for e in p1_bin) + ''.join(str(e) for e in p2_bin) + '{0:04b}'.format(bestMoveCache[i])
#print(y, '{0:08x}'.format(int(y, 2)))
buffer = y + buffer
counter += 1
offset = len(buffer)
f = open("tictactoe.txt", "w")
done = False
for i in range(32):
f.write("ram512x8_inst_" + str(i) + " : SB_RAM512X8\n")
f.write("generic map (\n")
if done:
break
for j in range(16):
if offset <= 0:
done = True
break
cur = ""
subtract = min(offset, 256)
offset -= subtract
cur += '{0:064X}'.format(int(buffer[offset:offset+subtract], 2))
print(cur)
f.write("INIT_" + '{0:01X}'.format(j) + " => X\"" + cur + "\"")
if j == 15:
f.write("\n")
else:
f.write(",\n")
f.write(")\n")
f.write("port map (\nRDATA => RDATA_a("+str(i)+"),\nRADDR => RADDR_c,\nRCLK => RCLK_c,\nRCLKE => RCLKE_c("+str(i)+"),\nRE => RE_c("+str(i)+"),\nWADDR => (others => \'0\'),\nWCLK=> \'0\',\nWCLKE => \'0\',\nWDATA => (others => \'0\'),\nWE => \'0\'\n);\n")
f.close()
| j-tetteroo/tictactoe-fpga | python/tictactoe.py | tictactoe.py | py | 7,748 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QHBoxLayout",
"line_number": 12,
"usage_type": "call"
},
{
"a... |
22501232171 | """ Module to implement Views for all API Queries"""
from rest_framework.views import APIView
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.authtoken.models import Token
from rest_framework import serializers
from rest_framework.response import Response
from rest_framework import status, generics
from django.shortcuts import get_object_or_404
from django.contrib.auth import authenticate
from django.contrib.auth.models import Group
from .serializers import UserSerializer, CustomerSerializer, ManagerSerializer, AdminSerializer, RoomSerializer, SlotSerializer, ReservationLinkSerializer, ReservationSerializer, ActiveReservationSerializer, EmployeeIDSerializer
from users.models import User, Customer, Manager, Admin, EmployeeID
from users.constants import EMPLOYEE_PREFIXES
from manager_iface.models import Room, Slot
from customer_iface.utilities import send_generated_key
from customer_iface.models import IsolatedResData, Reservation
from datetime import datetime
class GenerateAuthToken(APIView):
""" Class based view to display how to create an API Authentication Token by GET request
and Generate a token if user is admin by POST request
"""
def get(self, request):
ret = dict()
ret['message'] = "Obtain or view your API Authentication Token if you are an admin by sending a POST request to this URL"
ret['format'] = "Required JSON format - {'email':<your_email_id>, 'password':<your_password>}"
ret['example'] = "{'email':'admin@gmail.com', 'password':'secret'}"
return Response(ret, status=status.HTTP_200_OK)
def post(self, request, *args, **kwargs):
email = request.data['email']
password = request.data['password']
# Getting User
try:
this_user = User.objects.get(email=email)
except User.DoesNotExist:
return Response({'error':"User Not Found"}, status=status.HTTP_404_NOT_FOUND)
# Verifying Password
cnf_user = authenticate(email=this_user.email, password=password)
if(cnf_user):
pass
else:
return Response({'error':"Authentication Failed"}, status=status.HTTP_401_UNAUTHORIZED)
# Checking if admin
if(Group.objects.get(user=this_user).name=="AdminPrivilege"):
pass
else:
return Response({'error':"Must be admin"}, status=status.HTTP_401_UNAUTHORIZED)
# Generate and return authentication token
token, created = Token.objects.get_or_create(user=this_user)
return Response({
'token': token.key,
'email': this_user.email
}, status=status.HTTP_200_OK)
class UserHandler(APIView): # For a list of users
""" Class based API View to handle listing of users
"""
serializer_class = UserSerializer
queryset = User.objects.all()
def get(self, request, format=None):
users = User.objects.all()
serializer = UserSerializer(users, many=True, context={'request':request}) # Since there can be multiple users
return Response(serializer.data)
# CANNOT CREATE USER DIRECTLY, HAS TO BE CUSTOMER, MANAGER or ADMIN
class UserDetail(generics.RetrieveAPIView): # Read-Only for an individual user
""" Class based API View to display and delete specific User instance
details thrugh GET and DELETE requests
"""
queryset = User.objects.all()
serializer_class = UserSerializer
lookup_field = 'id'
def delete(self, request, id):
try:
user = User.objects.get(id=id)
except User.DoesNotExist:
return Response({"message": "User not found."}, status=404)
else:
user.delete()
return Response({"message": "User and relevant data have been deleted."}, status=204)
class CustomerHandler(APIView): # For a list of users
""" Class based API View to handle listing and creation of Customers
through GET and POST reqquests
"""
serializer_class = CustomerSerializer
queryset = Customer.objects.all()
def get(self, request, format=None):
users = Customer.objects.all()
serializer = CustomerSerializer(users, many=True, context={'request':request}) # Since there can be multiple users
return Response(serializer.data)
def post(self, request, format=None):
user_data = dict()
user_data['email'] = request.data.pop('email')
user_data['name'] = request.data.pop('name')
user_data['password'] = request.data.pop('password', None) # Password Required is tested here
if(not user_data['password']):
return Response({"error":"Password is required"}, status=status.HTTP_400_BAD_REQUEST)
user_serial = (UserHandler().serializer_class)(data=user_data, context={'request':request})
if(user_serial.is_valid()):
serializer = (self.serializer_class)(data=request.data, context={'request':request})
if(serializer.is_valid()):
user_serial.save(is_staff=False, is_superuser=False) # Saving if both serializers are valid
user = User.objects.get(email=user_data['email'])
serializer.save(instance=user)
cust_group = Group.objects.get(name="CustomerPrivilege") # Adding to Customer Group
cust_group.user_set.add(user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
err = serializer.errors
else:
err = user_serial.errors
return Response(err, status=status.HTTP_400_BAD_REQUEST)
class CustomerDetail(generics.RetrieveDestroyAPIView): # Read-Only for an individual customer
""" Class based API View to display and delete specific Customer user
details thrugh GET and DELETE requests
"""
queryset = Customer.objects.all()
serializer_class = CustomerSerializer
custom_lookup_field = 'id'
def get_object(self): # OVERRIDING the get_object method to pdefine customised object lookup
queryset = User.objects.all()
filter = dict()
field = self.custom_lookup_field
filter[field] = self.kwargs[field]
user = get_object_or_404(queryset, **filter)
self.check_object_permissions(self.request, user)
return user.customer
def delete(self, request, id):
try:
user = User.objects.get(id=id)
except User.DoesNotExist:
return Response({"message": "Customer not found."}, status=404)
else:
user.delete()
return Response({"message": "Customer and relevant data have been deleted."}, status=204)
class ManagerHandler(APIView): # For a list of users
""" Class based API View to handle listing and creation of Managers
through GET and POST requests
"""
serializer_class = ManagerSerializer
queryset = Manager.objects.all()
def id_exists(self,value): # TO VERIFY IF EMP_ID IS ALREADY ASSIGNED TO SOMEONE
if(Manager.objects.filter(emp_id=value).count()==0):
return False
else:
return True
def id_valid(self,value): # TO VERIFY ID EMP_ID IS VALID
existing_ids = tuple(map(str,EmployeeID.objects.all()))
if(value in existing_ids):
if(value[:3]=="MAN"): # Must be manager type employee ID
return True
else:
return False
else:
return False
def get(self, request, format=None):
users = Manager.objects.all()
serializer = ManagerSerializer(users, many=True, context={'request':request}) # Since there can be multiple users
return Response(serializer.data)
def post(self, request, format=None):
user_data = dict()
user_data['email'] = request.data.pop('email')
user_data['name'] = request.data.pop('name')
user_data['password'] = request.data.pop('password', None) # Password Required is tested here
if(not user_data['password']): # PASSWORD AND EMP_ID CHECKED HERE
return Response({"error":"Password is required"}, status=status.HTTP_400_BAD_REQUEST)
id_check = request.data.pop('emp_id', None)
if(not id_check):
return Response({"error":"Employee ID is required"}, status=status.HTTP_400_BAD_REQUEST)
if((self.id_exists(id_check)) or (not self.id_valid(id_check))):
return Response({"error":"Employee ID is invalid"}, status=status.HTTP_400_BAD_REQUEST)
empid_inst = EmployeeID.objects.get(emp_id=id_check) # GETTING EMPLOYEE_ID RECORD OBJECT
user_serial = (UserHandler().serializer_class)(data=user_data, context={'request':request})
if(user_serial.is_valid()):
serializer = (self.serializer_class)(data=request.data, context={'request':request})
if(serializer.is_valid()):
user_serial.save(is_staff=True, is_superuser=False)
user = User.objects.get(email=user_data['email']) # Saving after both serializers are valid
empid_inst.assignee = user
empid_inst.save() # Setting assignee for employee id instance
serializer.save(instance=user, emp_id=empid_inst) # Saving User and Employee ID instances
manager_group = Group.objects.get(name="ManagerPrivilege") # Adding to Manager Group
manager_group.user_set.add(user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
err = serializer.errors
else:
err = user_serial.errors
return Response(err, status=status.HTTP_400_BAD_REQUEST)
class ManagerDetail(generics.RetrieveDestroyAPIView): # Read-Only for an individual manager
""" Class based API View to display and delete specific Manager user
details through GET and DELETE requests
"""
queryset = Manager.objects.all()
serializer_class = ManagerSerializer
custom_lookup_field = 'id'
def get_object(self): # OVERRIDING the get_object method to pdefine customised object lookup
queryset = User.objects.all()
filter = dict()
field = self.custom_lookup_field
filter[field] = self.kwargs[field]
user = get_object_or_404(queryset, **filter)
self.check_object_permissions(self.request, user)
return user.manager
def delete(self, request, id):
try:
user = User.objects.get(id=id)
except User.DoesNotExist:
return Response({"message": "Manager not found."}, status=404)
else:
user.delete()
return Response({"message": "Manager and relevant data have been deleted."}, status=204)
class AdminHandler(APIView): # For a list of users
""" Class based API View to handle listing and creation of Admins
through GET and POST reqquests
"""
serializer_class = AdminSerializer
queryset = Admin.objects.all()
def id_exists(self,value): # TO VERIFY IF EMP_ID IS ALREADY ASSIGNED TO SOMEONE
if(Manager.objects.filter(emp_id=value).count()==0):
return False
else:
return True
def id_valid(self,value): # TO VERIFY ID EMP_ID IS VALID
existing_ids = tuple(map(str,EmployeeID.objects.all()))
if(value in existing_ids):
if(value[:3]=="ADM"): # Must be manager type employee ID
return True
else:
return False
else:
return False
def get(self, request, format=None):
users = Admin.objects.all()
serializer = AdminSerializer(users, many=True, context={'request':request}) # Since there can be multiple users
return Response(serializer.data)
def post(self, request, format=None):
user_data = dict()
user_data['email'] = request.data.pop('email')
user_data['name'] = request.data.pop('name')
user_data['password'] = request.data.pop('password', None) # Password Required is tested here
if(not user_data['password']): # PASSWORD AND EMP_ID CHECKED HERE
return Response({"error":"Password is required"}, status=status.HTTP_400_BAD_REQUEST)
id_check = request.data.pop('emp_id', None)
if(not id_check):
return Response({"error":"Employee ID is required"}, status=status.HTTP_400_BAD_REQUEST)
if((self.id_exists(id_check)) or (not self.id_valid(id_check))):
return Response({"error":"Employee ID is invalid"}, status=status.HTTP_400_BAD_REQUEST)
empid_inst = EmployeeID.objects.get(emp_id=id_check) # GETTING EMPLOYEE_ID RECORD OBJECT
user_serial = (UserHandler().serializer_class)(data=user_data, context={'request':request})
if(user_serial.is_valid()):
serializer = (self.serializer_class)(data=request.data, context={'request':request})
if(serializer.is_valid()):
user_serial.save(is_staff=True, is_superuser=True)
user = User.objects.get(email=user_data['email']) # Saving after both serializers are valid
empid_inst.assignee = user
empid_inst.save() # Setting assignee for employee id instance
serializer.save(instance=user, emp_id=empid_inst) # Saving User and Employee ID instances
adm_group = Group.objects.get(name="AdminPrivilege") # Adding to Admin Group
adm_group.user_set.add(user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
err = serializer.errors
else:
err = user_serial.errors
return Response(err, status=status.HTTP_400_BAD_REQUEST)
class AdminDetail(generics.RetrieveDestroyAPIView): # Read-Only for an individual manager
""" Class based API View to display and delete specific Admin user
details thrugh GET and DELETE requests
"""
queryset = Admin.objects.all()
serializer_class = AdminSerializer
custom_lookup_field = 'id'
def get_object(self): # OVERRIDING the get_object method to pdefine customised object lookup
queryset = User.objects.all()
filter = dict()
field = self.custom_lookup_field
filter[field] = self.kwargs[field]
user = get_object_or_404(queryset, **filter)
self.check_object_permissions(self.request, user)
return user.admin
def delete(self, request, id):
try:
user = User.objects.get(id=id)
except User.DoesNotExist:
return Response({"message": "Admin not found."}, status=404)
else:
user.delete()
return Response({"message": "Admin and relevant data have been deleted."}, status=204)
class EmpidHandler(APIView): # For a list of users
""" Class based API View to handle listing and creation of Employee IDs
through GET and POST reqquests
"""
serializer_class = EmployeeIDSerializer
queryset = EmployeeID.objects.all()
def get(self, request, format=None):
ids = EmployeeID.objects.all()
serializer = EmployeeIDSerializer(ids, many=True, context={'request':request}) # Since there can be multiple users
return Response(serializer.data)
def post(self, request, format=None):
emp_type = request.data['emp_type']
creator = request.user # Will be an Admin User only (Permission Controlled)
serializer = (self.serializer_class)(data=request.data, context={'request':request})
if(serializer.is_valid()): # Setting assignee for employee id instance
pre = EMPLOYEE_PREFIXES[emp_type] #GENERATING EMPLOYEE ID
gen_empid = pre+(str(EmployeeID.objects.filter(emp_type=emp_type).count()+1).rjust(3,'0'))
serializer.save(emp_id=gen_empid, creator=creator)
empid_inst = EmployeeID.objects.get(emp_id=gen_empid)
send_generated_key(empid_inst) # SEND AN EMAIL to ADMIN
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
err = user_serial.errors
return Response(err, status=status.HTTP_400_BAD_REQUEST)
class EmpidDetail(generics.RetrieveDestroyAPIView): # Read-Only for an individual manager
""" Class based API View to display and delete specific Employee ID
details thrugh GET and DELETE requests. Deleteion only results in deletion of
the concerned employee User and his rooms but retains ID instance for reuse
"""
queryset = EmployeeID.objects.all()
serializer_class = EmployeeIDSerializer
lookup_field = 'emp_id'
def delete(self, request, emp_id):
try:
empid_inst = EmployeeID.objects.get(emp_id=emp_id)
user = empid_inst.assignee
except User.DoesNotExist:
return Response({"message": "Employee ID not found."}, status=404)
else:
user.delete()
empid_inst.assignee = None
empid_inst.save() # Delete the employee, unassign employee ID, but the ID is AVAILABLE FOR REUSE
return Response({"message": "Emplyee Deleted. ID Avalaible for Re-Assignment"}, status=204)
class RoomHandler(generics.RetrieveAPIView):
""" Class based API View to display and delete specific Room
details through GET and DELETE requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Room.objects.all()
def get(self, request, format=None):
rooms = Room.objects.all()
serializer = RoomSerializer(rooms, many=True, context={'request':request})
return Response(serializer.data)
class RoomDetail(generics.RetrieveDestroyAPIView): # Read-Only for an individual user
""" Class based API View to display and delete specific Room
details thrugh GET and DELETE requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Room.objects.all()
serializer_class = RoomSerializer
lookup_field = 'room_no'
def delete(self, request, room_no): # Overriding the default delete method
this_room = self.queryset.get(room_no=room_no)
# Simoultaneously setting status as Cancelled in Isolated Reservation Data
# This is done using a pre_delete signal attached to Reservation model
this_room.delete()
return Response({"message": "Room and relevant data have been deleted."}, status=204)
class SlotHandler(generics.RetrieveAPIView):
""" Class based API View to handle listing of Slots for rooms
through GET requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Slot.objects.all()
def get(self, request, format=None):
slots = Slot.objects.all()
serializer = SlotSerializer(slots, many=True, context={'request':request})
return Response(serializer.data)
class SlotDetail(generics.RetrieveDestroyAPIView):
""" Class based API View to display and delete specific Slot
details thrugh GET and DELETE requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Slot.objects.all()
serializer_class = SlotSerializer
lookup_field = 'id'
def delete(self, request, id): # Overriding the default delete method
this_slot = self.queryset.get(id=id)
# Simoultaneously setting status as Cancelled in Isolated Reservation Data
# This is done using a pre_delete signal attached to Reservation model
this_slot.delete()
return Response({"message": "Slot and relevant data have been deleted."}, status=204)
class AllReservations(APIView): # For a list of users
""" Class based API View to handle listing all reservation type (past, future, etc)
URLs through GET requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
reserves = User.objects.filter(email=request.user.email) # Dummy QuerySet with on Entry
serializer = ReservationLinkSerializer(reserves, many=True, context={'request':request})
return Response(serializer.data)
class PastReservations(APIView): # For a list of users
""" Class based API View to handle listing past reservations
through GET request
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
today = datetime.date(datetime.now())
now = datetime.time(datetime.now())
reserves = IsolatedResData.objects.filter( date__lt=today,
status="Active")|( IsolatedResData.objects.filter( date=today,
end_time__lt=now,
status="Active"))
serializer = ReservationSerializer(reserves, many=True, context={'request':request})
return Response(serializer.data)
class FutureReservations(APIView): # For a list of users
""" Class based API View to handle listing future reservations
through GET requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
today = datetime.date(datetime.now())
now = datetime.time(datetime.now())
reserves = Reservation.objects.filter( date__gt=today)|( Reservation.objects.filter( date=today,
slot__start_time__gt=now)) # All reservations in this model are "Active"
serializer = ActiveReservationSerializer(reserves, many=True, context={'request':request})
return Response(serializer.data)
class OngoingReservations(APIView): # For a list of users
""" Class based API View to handle listing currently occupied reservations
through GET requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
today = datetime.date(datetime.now())
now = datetime.time(datetime.now())
reserves = IsolatedResData.objects.filter( date=today,
start_time__lte=now,
status="Active")|( IsolatedResData.objects.filter( date=today,
end_time__gte=now,
status="Active"))
serializer = ReservationSerializer(reserves, many=True, context={'request':request})
return Response(serializer.data)
class CancelledReservations(APIView): # For a list of users
""" Class based API View to handle listing cancelled reservations
through GET requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
reserves = IsolatedResData.objects.filter(status='Cancelled')
serializer = ReservationSerializer(reserves, many=True, context={'request':request})
return Response(serializer.data)
class InactiveReservationDetail(generics.RetrieveAPIView): # Read-Only for an individual user
""" Class based API View to display individual Reservation
trhough GET requests, either in Past or Cancelled
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = IsolatedResData.objects.all()
serializer_class = ReservationSerializer
lookup_field = 'id'
class ActiveReservationManage(generics.RetrieveDestroyAPIView):
""" Class based API View to handle deletion and display of a specific Reservations
through GET and DELETE requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Reservation.objects.all()
serializer_class = ActiveReservationSerializer
lookup_field = 'id'
def delete(self, request, id): # Overriding the default delete method
this_reserve = self.queryset.get(id=id)
# Simoultaneously setting status as Cancelled in Isolated Reservation Data
# This is done using a pre_delete signal attached to Reservation model
this_reserve.delete()
return Response({"message": "Reservation has been deleted."}, status=204)
| karthik-d/room-slot-booking | roomBookingManager/api/views.py | views.py | py | 22,909 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "rest_framework.views.APIView",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 32,
"usage_type": "attri... |
36120976493 | from typing import Any, Dict
import os
import sys
from forte.data.caster import MultiPackBoxer
from forte.data.data_pack import DataPack
from forte.data.multi_pack import MultiPack
from forte.data.readers import OntonotesReader, DirPackReader
from forte.data.readers.deserialize_reader import MultiPackDirectoryReader
from forte.pipeline import Pipeline
from forte.processors.base import MultiPackProcessor, MultiPackWriter
from forte.processors.writers import PackNameJsonPackWriter
from fortex.nltk import NLTKWordTokenizer, NLTKPOSTagger, NLTKSentenceSegmenter
from ft.onto.base_ontology import EntityMention, CrossDocEntityRelation
class PackCopier(MultiPackProcessor):
"""
Copy the text from existing pack to a new pack.
"""
def _process(self, input_pack: MultiPack):
from_pack: DataPack = input_pack.get_pack(self.configs.copy_from)
copy_pack: DataPack = input_pack.add_pack(self.configs.copy_to)
copy_pack.set_text(from_pack.text)
if from_pack.pack_name is not None:
copy_pack.pack_name = from_pack.pack_name + "_copy"
else:
copy_pack.pack_name = "copy"
ent: EntityMention
for ent in from_pack.get(EntityMention):
EntityMention(copy_pack, ent.begin, ent.end)
@classmethod
def default_configs(cls) -> Dict[str, Any]:
return {"copy_from": "default", "copy_to": "duplicate"}
class ExampleCoreferencer(MultiPackProcessor):
"""
Mark some example coreference relations.
"""
def _process(self, input_pack: MultiPack):
pack_i = input_pack.get_pack("default")
pack_j = input_pack.get_pack("duplicate")
for ent_i, ent_j in zip(
pack_i.get(EntityMention), pack_j.get(EntityMention)
):
link = CrossDocEntityRelation(input_pack, ent_i, ent_j)
link.rel_type = "coreference"
input_pack.add_entry(link)
class ExampleCorefCounter(MultiPackProcessor):
def __init__(self):
super().__init__()
self.coref_count = 0
def _process(self, input_pack: MultiPack):
rels = list(input_pack.get_entries_of(CrossDocEntityRelation))
self.coref_count += len(rels)
def finish(self, _):
print(f"Found {self.coref_count} pairs in the multi packs.")
def pack_example(input_path, output_path):
"""
This example read data from input path and serialize to output path.
Args:
input_path:
output_path:
Returns:
"""
print("Pack serialization example.")
nlp = Pipeline[DataPack]()
nlp.set_reader(OntonotesReader())
nlp.add(NLTKSentenceSegmenter())
nlp.add(NLTKWordTokenizer())
nlp.add(NLTKPOSTagger())
# This is a simple writer that serialize the result to the current
# directory and will use the DocID field in the data pack as the file name.
nlp.add(
PackNameJsonPackWriter(),
{
"output_dir": output_path,
"indent": 2,
"overwrite": True,
},
)
nlp.run(input_path)
def multi_example(input_path, output_path):
"""
This example reads data from input path, and write multi pack output
to output path.
Args:
input_path:
output_path:
Returns:
"""
print("Multi Pack serialization example.")
print(
"We first read the data, and add multi-packs to them, and then "
"save the results."
)
coref_pl = Pipeline()
coref_pl.set_reader(DirPackReader())
coref_pl.add(MultiPackBoxer())
coref_pl.add(PackCopier())
coref_pl.add(ExampleCoreferencer())
coref_pl.add(ExampleCorefCounter())
coref_pl.add(
MultiPackWriter(),
config={
"output_dir": output_path,
"indent": 2,
"overwrite": True,
},
)
coref_pl.run(input_path)
print(
"We can then load the saved results, and see if everything is OK. "
"We should see the same number of multi packs there. "
)
reading_pl = Pipeline()
reading_pl.set_reader(
MultiPackDirectoryReader(),
config={
"multi_pack_dir": os.path.join(output_path, "multi"),
"data_pack_dir": os.path.join(output_path, "packs"),
},
)
reading_pl.add(ExampleCorefCounter())
reading_pl.run()
def main(data_path: str):
pack_output = "pack_out"
multipack_output = "multi_out"
pack_example(data_path, pack_output)
multi_example(pack_output, multipack_output)
if __name__ == "__main__":
main(sys.argv[1])
| asyml/forte | examples/serialization/serialize_example.py | serialize_example.py | py | 4,579 | python | en | code | 230 | github-code | 36 | [
{
"api_name": "forte.processors.base.MultiPackProcessor",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "forte.data.multi_pack.MultiPack",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "forte.data.data_pack.DataPack",
"line_number": 23,
"usage_type":... |
8798334453 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""key.py: Handles the keysubmissions for groups"""
import json
import os
import sqlite3
import sys
import string
import auth
from httperror import HTTPError
RETURN_HEADERS = []
def __do_get():
RETURN_HEADERS.append('Status: 403')
return "This script is NOT get-able"
def __do_post():
postdata = sys.stdin.read()
try:
postdata = json.loads(postdata)
except json.JSONDecodeError:
RETURN_HEADERS.append('Status: 400')
return "Malformed Request. Data not JSON-decodable"
if 'action' in postdata and postdata['action'] == 'submitkey':
return __submitkey(postdata)
if 'action' in postdata and postdata['action'] == 'groupstatus':
return __groupstatus(postdata)
RETURN_HEADERS.append('Status: 500')
return "Not implemented"
def __submitkey(postdata):
if not 'authtoken' in postdata or not 'key' in postdata:
raise HTTPError("Missing Required Attributes")
return submitkey(postdata['authtoken'], postdata['key'])
def cleanstring(dirtystring):
cleanstring = dirtystring.lower().strip()
printable = set(string.printable)
cleanstring = ''.join(filter(lambda x: x in printable, cleanstring))
return cleanstring
def submitkey(authtoken, key):
"""Verifies a key, and submits it. Returns the groups status"""
group = auth.verify_token(authtoken)
if group is None:
raise HTTPError("Invalid Authtoken, please relogin", 401)
group_status = json.loads(groupstate(authtoken))
if int(group_status['remain_guess']) < 1:
raise HTTPError(groupstate(authtoken), 403)
key = cleanstring(key)
database = sqlite3.connect('database.sqlite3')
submitted = database.execute(('SELECT count() FROM claims'
' WHERE groupname=:groupname AND key=:key'),
{"groupname": group, "key": key}).fetchone()[0]
if submitted != 0:
raise HTTPError(groupstate(authtoken), 410)
badkey = database.execute(('SELECT count() FROM badkeys'
' WHERE groupname=:groupname AND key=:key'),
{"groupname": group, "key": key}).fetchone()[0]
if badkey != 0:
raise HTTPError(groupstate(authtoken), 410)
keyexist = database.execute('SELECT count() FROM keys WHERE LOWER(key)=:key',
{'key': key}).fetchone()[0]
if keyexist == 0:
database.execute('INSERT INTO badkeys(groupname, key) values(:groupname, :key)',
{'groupname': group, 'key': key})
database.commit()
raise HTTPError(groupstate(authtoken), 400)
database.execute('INSERT INTO claims(groupname, key) values(:groupname, :key)',
{'groupname': group, 'key': key})
database.commit()
return groupstate(authtoken)
def __groupstatus(request):
if not 'authtoken' in request:
raise HTTPError("Missing Authtoken")
status = groupstate(request['authtoken'])
if status is None:
raise HTTPError("Authtoken is not valid. Please relogin")
return status
def groupstate(authtoken):
"""Calculates the groups state, and returns it as a json-string"""
group = auth.verify_token(authtoken)
if group is None:
return None
database = sqlite3.connect('database.sqlite3')
status = database.execute(('SELECT count(),'
' strftime("%s", datetime(min(submittime), "+10 minute"))'
' FROM badkeys WHERE'
' groupname=:groupname AND '
' submittime > datetime("now", "-10 minute")'),
{"groupname": group}).fetchone()
returnvalue = {
"group": group,
"points": get_all_points(),
"remain_guess": 3 - status[0],
"time_to_new_guess": int(status[1]) if (type(status[1]) == str) else None
}
return json.dumps(returnvalue)
def get_all_points():
"""Retrieves a calculated list of all groups points"""
database = sqlite3.connect('database.sqlite3')
allclaims = database.execute(('select cl.groupname, cl.catchtime, ke.key,'
' ke.first, ke.second, ke.third, ke.other'
' from claims as cl inner join keys as ke'
' on (ke.key == cl.key collate nocase)'
' order by ke.key asc, cl.catchtime asc;'))
allrows = allclaims.fetchall()
groups = {}
key = None
num_in_key = 0
for row in allrows:
_key = row[2]
_group = row[0]
_point1 = row[3]
_point2 = row[4]
_point3 = row[5]
_point = row[6]
if _key != key:
num_in_key = 0
key = _key
if not _group in groups:
groups[_group] = 0
num_in_key = num_in_key + 1
if num_in_key == 1:
groups[_group] = groups[_group] + _point1
elif num_in_key == 2:
groups[_group] = groups[_group] + _point2
elif num_in_key == 3:
groups[_group] = groups[_group] + _point3
else:
groups[_group] = groups[_group] + _point
returnvalue = []
for group in groups.keys():
returnvalue.append({"name": group, "score": groups[group]})
return returnvalue
def __main():
if not 'REQUEST_METHOD' in os.environ:
raise HTTPError("Missing REQUEST_METHOD")
if os.environ['REQUEST_METHOD'] == 'GET':
return __do_get()
if os.environ['REQUEST_METHOD'] == 'POST':
return __do_post()
raise HTTPError("Undhandled REQUEST_METHOD")
if __name__ == '__main__':
try:
RESPONSE = __main()
except HTTPError as err:
if err.status:
RETURN_HEADERS.append('Status: %d' % err.status)
else:
RETURN_HEADERS.append('Status: 400')
RESPONSE = err.message
NUM_HEADERS = len(RETURN_HEADERS)
if NUM_HEADERS == 0:
print('Status: 200')
else:
for header in RETURN_HEADERS:
print(header)
print('Content-Length: %d' % len(RESPONSE))
print()
print(RESPONSE)
| daGnutt/skvaderhack | api/key.py | key.py | py | 6,279 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin.read",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"... |
37219887215 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('community', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='usergiallorosso',
options={'verbose_name': 'Usuario Giallorosso', 'verbose_name_plural': 'Usuarios Giallorossos'},
),
]
| vaquer/ilGiallorosso | ilGiallorosso/community/migrations/0002_auto_20151127_1820.py | 0002_auto_20151127_1820.py | py | 434 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterModelOptions",
"line_number": 14,
"usage_type": "call"
... |
28592053848 | import random
import time
#These have to do with importing the pictures
import io
import os
import PySimpleGUI as sg
import PIL
from PIL import Image
n = 49
image = Image.open(r'C:\Users\carte\OneDrive\Desktop\Coding\Hangman\HM_' + chr(n) + '.png')
image.thumbnail((200, 200))
bio = io.BytesIO()
image.save(bio, format="PNG")
sg.theme ( 'DarkPurple' )
layout =[ [sg.Image( key="-IMAGE-")],
[sg.Text(('')), sg.Text(size=(50,1), key='-OUTPUT-')],
[sg.Text(('')), sg.Text(size=(50,1), key='-OUTPUT2-')],
[sg.Text(('')), sg.Text(size=(50,1), key='-OUTPUT3-')],
[sg.Input(key='-IN-', do_not_clear=False)],
[sg.Button("ENTER"), sg.Exit("EXIT GAME")]]
window = sg.Window("Hangman Game", layout, margins = (150, 150),finalize=True, resizable = True)
window['-OUTPUT-'].update('Hello, please enter a word or phrase for the Hangman Game')
#Opens the window and asks for an input to play hangman
def getInput():
valid = False
values = ""
length = 0
while valid == False:
event , values = window.read()
#Rechecks everything after every press of enter
if event == "ENTER":
#Checks length everytime it loops
length = len(values['-IN-'])
inputString = values['-IN-']
if (length == 0 or (has_numbers(values['-IN-']) == True)):
#
#Delete this at the end
print('Invalid Entry')
window['-OUTPUT2-'].update('Invalid Entry - No Input')
#Need this to not get errror is the length is zero
else:
#Have to do this after making sure the length isnt' zero
last_char = inputString[-1]
if ( last_char == ' '):
#
#Delete this at the end
print ( "Invalid Entry - Ends with a Space" )
window['-OUTPUT2-'].update('Invalid Entry - Ends with a Space')
else:
print('Valid Entry')
window['-OUTPUT2-'].update('')
PlayGame(values['-IN-'])
valid = True
if event == sg.WIN_CLOSED or event =='EXIT GAME':
break
def PlayGame(inputString):
x = 0
correctGuesses = 0
#Refreshing the screen to the game screen
Refresh( n )
arr = list(inputString)
arrGuessed = []
correctGuesses = numSpaces(arr)
root = arrayToList(arr, len(arr))
String = update(root)
window['-OUTPUT2-'].update(String)
#Guessing Loop
#There isn't a do while. Might have to do while(True) and break statement with the Gamewon() function
while(correctGuesses != len(arr)):
x = 0
event , values = window.read()
inVal = values['-IN-']
guessed = alreadyGuessed(arrGuessed, inVal )
if(event == sg.WIN_CLOSED or event =='EXIT GAME'):
break
elif( n == 55 ):
newImage(n)
GameLost(inputString)
return 0
elif( len(inVal) == 1 and (inVal.isdigit() == False and guessed == False)):
arrGuessed.append(inVal)
print(alreadyGuessed)
print("Valid Input")
root, x = CheckGuess( inVal, root )
if(x == 0):
print("Incorrect Guess")
newImage(n)
window['-OUTPUT2-'].update(update(root))
correctGuesses = correctGuesses + x
else:
print( "Invalid" )
if(correctGuesses == len(arr)):
#window['-Image-'].update("")
window['-OUTPUT-'].update("You won the Game!")
window['-OUTPUT2-'].update("The answer was: "+ inputString)
window['-OUTPUT3-'].update("")
event , values = window.read()
event , values = window.read()
def newImage(i):
global n
print( n )
n +=1
image = Image.open(r'C:\Users\carte\OneDrive\Desktop\Coding\Hangman\HM_' + chr(n) + '.png')
image.thumbnail((200, 200))
bio = io.BytesIO()
image.save(bio, format="PNG")
window['-IMAGE-'].update(data=bio.getvalue())
def alreadyGuessed(arr, char):
for x in arr:
if(x == char):
return True
return False
#Checks if the input has numbers in ( we don't want numbers in their)
def has_numbers(inputString):
return any(char.isdigit() for char in inputString)
#Now it will update the text.
#Needs to update the text box to get rid of it
#Needs to input my picture
def Refresh( a ):
#HOLY FUCK I DID IT
window['-IMAGE-'].update(data=bio.getvalue())
window['-OUTPUT-'].update(("Please Enter a letter to guess"))
def GameLost(inputString):
window['-OUTPUT-'].update("You fucking lost the Game!")
window['-OUTPUT2-'].update("The answer was: "+ inputString)
window['-OUTPUT3-'].update("You suck")
event , values = window.read()
def playAgain():
global n
n = 49
#---------------------------------------------------------------------
#--------Input, Node, Checkguess, New Image Functions-----------------
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# Representation of a node
class Node:
def __init__(self, val, show = False):
self.val = val
self.next = None
self.show = False
# Function to insert node
def insert(root, item):
temp = Node(item)
if(item == ' '):
temp.show = True
else:
temp.show = False
if (root == None):
root = temp
else :
ptr = root
while (ptr.next != None):
ptr = ptr.next
ptr.next = temp
return root
def update(root):
Str = ""
while (root != None):
if( root.show == True ):
if(root.val == " "):
Str = Str + " " + root.val
else:
Str = Str + " " + root.val
else:
Str = Str + " _ "
root = root.next
return Str
def arrayToList(arr, n):
root = None
for i in range(0, n, 1):
root = insert(root, arr[i])
return root
#Finds the number of spaces in the array of characters
def numSpaces(arr):
p = 0
for x in arr:
if(arr[x] == ' '):
p += 1
return p
def CheckGuess( char, head ):
curr = head
n = 0
while( curr != None ):
if( curr.val == char or curr.val == char.upper() or curr.val == char.lower() ):
if( curr.show == False ):
n = n + 1
curr.show = True
curr = curr.next
else:
curr = curr.next
print( "You found ", n ," instances of -" , char , "-" )
return head, n
def numSpaces(array):
p = 0
for x in array:
if(x == ' '):
p += 1
return p
getInput()
print( "Window Closed")
window.close()
| CarterDFluckiger/Hangman | Hangman.py | Hangman.py | py | 7,079 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.theme",
"line_num... |
2884245779 | # coding:utf-8
# @Time : 2019-04-28 11:13
# @Author: Xiawang
from utils.util import delete_requests, get_app_header
host = 'https://gate.lagou.com/v1/entry'
header = get_app_header(100018934)
def delete_orderId(orderIds):
url = host + '/order/orderId?orderIds={orderIds}'.format(orderIds=orderIds)
remark = '删除投递记录'
return delete_requests(url=url, remark=remark)
| Ariaxie-1985/aria | api_script/entry/order/orderId.py | orderId.py | py | 391 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.util.get_app_header",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "utils.util.delete_requests",
"line_number": 13,
"usage_type": "call"
}
] |
13395821581 | """
Entrypoints.
@author: gjorando
"""
import os
import json
from datetime import datetime
import torch
import click
from PIL import Image
import neurartist
def odd_int(value):
value = int(value)
if value % 2 == 0:
raise ValueError("Odd number required")
return value
def threshold_or_neg(value):
value = float(value)
if value > 1:
raise ValueError("Value should be between 0 and 1, or negative")
return value
def list_parameter(value_type=int):
def list_parameter_(param_value):
nonlocal value_type
if param_value is None:
result = None
else:
result = json.loads(param_value)
assert isinstance(result, list), "parameter should be a list"
for i, v in enumerate(result):
result[i] = value_type(v)
return result
return list_parameter_
@click.command()
# General
@click.option(
"--content", "-c",
"content_path",
required=True,
type=click.Path(exists=True, dir_okay=False),
help="Content image"
)
@click.option(
"--style", "-s",
"style_path",
required=True,
type=click.Path(exists=True, dir_okay=False),
help="Style image"
)
@click.option(
"--output", "-o",
"output_path",
default="./",
type=click.Path(dir_okay=True, writable=True),
help="Output path"
)
@click.option(
"--size", "-S",
"img_size",
default=512,
type=click.INT,
help="Output size"
)
@click.option(
"--epochs", "-e",
"num_epochs",
default=250,
type=click.INT,
help="Maximum number of epochs"
)
@click.option(
"--trade-off",
"trade_off",
default=3,
type=click.FLOAT,
help="Trade-off between content (>1) and style (<1) faithfullness"
)
@click.option(
"--init-random/--init-image",
"random_init",
default=False,
help="Init optimizer either from random noise, or image (default)"
)
@click.option(
"--init-image-path",
"random_init_path",
default=None,
type=click.Path(exists=True, dir_okay=False),
help="If --init-image is set, path to an image (default: content image)"
)
# Layers options
@click.option(
"--content-layers",
default=None,
type=list_parameter(),
help="Indexes of content layers (as a string representing a list)"
)
@click.option(
"--style-layers",
default=None,
type=list_parameter(),
help="Indexes of style layers (as a string representing a list)"
)
@click.option(
"--content-weights",
default=None,
type=list_parameter(float),
help="Content weights (as a string representing a list)"
)
@click.option(
"--style-weights",
default=None,
type=list_parameter(float),
help="Style weights (as a string representing a list)"
)
# Color control
@click.option(
"--color-control",
default="none",
type=click.Choice(["histogram_matching", "luminance_only", "none"]),
help="Color control method (default: none)"
)
@click.option(
"--cc-luminance-only-normalize",
"luminance_only_normalize",
is_flag=True,
help="For color control/luminance only method, normalize output luma"
)
# Spatial control
@click.option(
"--content-guidance",
"content_guidance_path",
default=None,
type=click.Path(exists=True, dir_okay=True, file_okay=False),
help="Content guidance channels folder path"
)
@click.option(
"--style-guidance",
"style_guidance_path",
default=None,
type=click.Path(exists=True, dir_okay=True, file_okay=False),
help="Style guidance channels folder path"
)
@click.option(
"--guidance-propagation-method",
default="simple",
type=click.Choice(["simple", "inside", "all"]),
help="Propagation method for guidance channels"
)
@click.option(
"--guidance-threshold",
default=.5,
type=threshold_or_neg,
help="Threshold between 0 and 1 for guidance channels thresholding, or any"
" negative value for non thresholding"
)
@click.option(
"--guidance-propagation-kernel-size",
default=None,
type=odd_int,
help="Kernel size for propagation of guidance channels (relevant for "
"inside and all methods)"
)
@click.option(
"--guidance-propagation-dilation",
default=None,
type=click.INT,
help="Dilation for propagation of guidance channels (relevant for "
"inside and all methods)"
)
# Meta
@click.option(
"--device", "-d",
default=None,
help="PyTorch device to use (default: cuda if available, otherwise cpu)"
)
@click.option(
"--verbose/--quiet",
"verbose",
default=True,
help="Verbose flag prints info during computation (default: verbose)"
)
@click.version_option(version=neurartist.__version__)
def main(
content_path,
style_path,
output_path,
img_size,
num_epochs,
trade_off,
random_init,
random_init_path,
content_layers,
style_layers,
content_weights,
style_weights,
color_control,
luminance_only_normalize,
content_guidance_path,
style_guidance_path,
guidance_propagation_method,
guidance_threshold,
guidance_propagation_kernel_size,
guidance_propagation_dilation,
device,
verbose
):
"""
Create beautiful art using deep learning.
"""
# Check that content_guidance_path and style_guidance_path are either both
# None or both set
guidance_check = int(content_guidance_path is None)
guidance_check += int(style_guidance_path is None)
if guidance_check not in (0, 2):
raise ValueError(
"content_guidance and style_guidance must be both set or both None"
)
# If a negative value is set, no thresholding is done
if guidance_threshold < 0:
guidance_threshold = None
# If the output path is a directory, we append a generated filename
if os.path.isdir(output_path):
output_path = os.path.join(
output_path,
"{}.png".format(datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
)
# Automatic detection of optimal device
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
# RuntimeError if we use a non-valid device
torch.device(device)
# Load and transform the input images
content_image, style_image = neurartist.utils.load_input_images(
content_path,
style_path,
img_size,
device
)
# If color control mode is histogram matching, update style image
if color_control == "histogram_matching":
neurartist.utils.color_histogram_matching(content_image, style_image)
# Instantiate the model
model = neurartist.models.NeuralStyle(
content_layers=content_layers,
style_layers=style_layers,
content_weights=content_weights,
style_weights=style_weights,
trade_off=trade_off,
device=device
)
# Load guidance channels if desired
if content_guidance_path is None:
content_guidance = None
style_guidance = None
else:
kernel_params = {}
if guidance_propagation_kernel_size is not None:
kernel_params["kernel_size"] = \
(guidance_propagation_kernel_size,)*2
if guidance_propagation_dilation is not None:
kernel_params["dilation"] = \
(guidance_propagation_dilation,)*2
content_guidance = neurartist.utils.load_guidance_channels(
content_guidance_path,
img_size,
model,
method=guidance_propagation_method,
threshold=guidance_threshold,
kernel_parameters=kernel_params,
fallback_channel=True,
device=device
)
style_guidance = neurartist.utils.load_guidance_channels(
style_guidance_path,
img_size,
model,
method=guidance_propagation_method,
kernel_parameters=kernel_params,
fallback_channel=True,
device=device
)
# Initialize the optimizer
if random_init:
# Despite what's described in the article, initializing the gradient
# descent with a random input doesn't produce good results at all
output = torch.randn(content_image.size()).type_as(content_image.data)
elif random_init_path is None:
output = content_image.clone()
else:
output = neurartist.utils.input_transforms(
content_image.shape[-2:], # Use actual content size
device=device
)(Image.open(random_init_path))
# The output image is updated by backward propagation
output.requires_grad_(True)
optimizer = torch.optim.LBFGS([output])
# Fetch the target style and content
content_targets, style_targets = model.get_images_targets(
content_image,
style_image,
style_guidance
)
if verbose:
print(f"Device={device}")
print(f"Content={content_path}")
print(f"Style={style_path}")
print(f"Output={output_path}")
print(f"Size={img_size}")
print(f"Epochs={num_epochs}")
print(f"Trade-off={trade_off}")
print(f"Random init={random_init}")
print(f"Color control={color_control}")
print(f"Guidance={content_guidance_path is not None}")
if content_guidance_path is not None:
print(f"Guidance propagation method={guidance_propagation_method}")
print(f"Model={model}")
print()
print("Ctrl-C to prematurely end computations")
print("Epoch\tContent loss\tStyle loss\tOverall")
try:
for i in range(num_epochs):
# Run a forward/backward pass
content_loss, style_loss, overall_loss = model.epoch(
output,
content_targets,
style_targets,
optimizer,
content_guidance
)
if verbose:
print("{}/{}\t{:.2f}\t{:.2f}\t{:.2f}".format(
str(i+1).zfill(len(str(num_epochs))),
num_epochs,
content_loss,
style_loss,
overall_loss
))
except KeyboardInterrupt: # Handle manual interruption through Ctrl-C
if verbose:
print("Manual interruption")
# Convert the output image
output_image = neurartist.utils.output_transforms()(
output
)
# Luminance-only
if color_control == "luminance_only":
output_image = neurartist.utils.luminance_only(
neurartist.utils.output_transforms()(
content_image
),
output_image,
luminance_only_normalize
)
# Finally save the output
output_image.save(output_path)
| gjorando/style-transfer | neurartist/cli.py | cli.py | py | 10,810 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_numb... |
33512429756 | # -*- coding: utf-8 -*-
#
# File: setuphandlers.py
#
#
# GNU General Public License (GPL)
#
__docformat__ = 'plaintext'
from collective.contact.core.interfaces import IContactCoreParameters
from plone import api
from z3c.relationfield.relation import RelationValue
from zope import component
from zope.intid.interfaces import IIntIds
import datetime
import logging
# from plone.registry.interfaces import IRegistry
logger = logging.getLogger('collective.contact.core: setuphandlers')
def isNotCollectiveContactContentProfile(context):
return context.readDataFile("collective_contact_core_marker.txt") is None
def isNotTestDataProfile(context):
return context.readDataFile("collective_contact_core_test_data_marker.txt") is None
def postInstall(context):
"""Called as at the end of the setup process. """
# the right place for your custom code
if isNotCollectiveContactContentProfile(context):
return
# we CAN NOT reinstall the product using portal_quickinstaller because
# it removes manualy added fields for dexterity types
import traceback
for line in traceback.format_stack():
if 'QuickInstallerTool.py' in line and 'reinstallProducts' in line:
raise Exception('You can not reinstall this product, use portal_setup to re-apply the relevant profile !')
# Set default values in registry
for name in ('person_contact_details_private', 'person_title_in_title', 'use_held_positions_to_search_person',
'use_description_to_search_person'):
val = api.portal.get_registry_record(name=name, interface=IContactCoreParameters)
if val is None:
api.portal.set_registry_record(name=name, value=True, interface=IContactCoreParameters)
# we need to remove the default model_source added to our portal_types
# XXX to be done
def create_test_contact_data(portal):
"""Create test contact data in portal"""
position_types = [{'name': u'General', 'token': u'general'},
{'name': u'Sergeant', 'token': u'sergeant'},
{'name': u'Colonel', 'token': u'colonel'},
{'name': u'Lieutenant', 'token': u'lieutenant'},
{'name': u'Captain', 'token': u'captain'},
{'name': u'Admiral', 'token': u'admiral'},
]
organization_types = [{'name': u'Navy', 'token': u'navy'},
{'name': u'Army', 'token': u'army'},
{'name': u'Air force', 'token': u'air_force'},
]
organization_levels = [{'name': u'Corps', 'token': u'corps'},
{'name': u'Division', 'token': u'division'},
{'name': u'Regiment', 'token': u'regiment'},
{'name': u'Squad', 'token': u'squad'},
]
# Examples structure
# ------------------
# organizations (* = organization, £ = position)
# * Armée de terre
# * Corps A
# * Division Alpha
# * Régiment H
# * Brigade LH
# £ Sergent
# £ Capitaine
# * Division Beta
# * Corps B
# £ Général
#
# persons (> = person, @ = held_position)
# > De Gaulle
# @ Armée de terre
# @ Général
# > Pepper
# @ Sergent
# > Rambo
# @ Brigade LH
# > Draper
# @ Capitaine
# @ Division Beta
params = {'title': u"Military directory",
'position_types': position_types,
'organization_types': organization_types,
'organization_levels': organization_levels,
}
portal.invokeFactory('directory', 'mydirectory', **params)
mydirectory = portal['mydirectory']
params = {'lastname': u'De Gaulle',
'firstname': u'Charles',
'gender': u'M',
'person_title': u'Général',
'birthday': datetime.date(1901, 11, 22),
'email': u'charles.de.gaulle@private.com',
'country': u'France',
'city': u"Colombey les deux églises",
'number': u'6bis',
'street': u'rue Jean Moulin',
'zip_code': u'52330',
'additional_address_details': u'bâtiment D',
'use_parent_address': False,
'website': 'www.charles-de-gaulle.org'
}
mydirectory.invokeFactory('person', 'degaulle', **params)
degaulle = mydirectory['degaulle']
params = {'lastname': u'Pepper',
'gender': u'M',
'person_title': u'Mister',
'birthday': datetime.date(1967, 6, 1),
'email': u'stephen.pepper@private.com',
'phone': u'0288443344',
'city': u'Liverpool',
'country': u'England',
'use_parent_address': False,
'website': 'http://www.stephen-pepper.org'
}
mydirectory.invokeFactory('person', 'pepper', **params)
pepper = mydirectory['pepper']
params = {'lastname': u'Rambo',
'firstname': u'John',
'phone': u'0788556644',
'use_parent_address': True,
}
mydirectory.invokeFactory('person', 'rambo', **params)
rambo = mydirectory['rambo']
params = {'lastname': u'Draper',
'firstname': u'John',
'person_title': u'Mister',
'use_parent_address': False,
}
mydirectory.invokeFactory('person', 'draper', **params)
draper = mydirectory['draper']
params = {'title': u"Armée de terre",
'organization_type': u'army',
'phone': u'01000000001',
'email': u'contact@armees.fr',
'use_parent_address': False,
'city': u'Paris',
'street': u'Avenue des Champs-Élysées',
'number': u'1',
'zip_code': u'75008',
'country': u'France',
'enterprise_number': 'BE123456789',
}
mydirectory.invokeFactory('organization', 'armeedeterre', **params)
armeedeterre = mydirectory['armeedeterre']
params = {'title': u"Corps A",
'organization_type': u'corps',
'street': u"rue Philibert Lucot",
'city': u'Orléans',
'country': u'France',
'use_parent_address': False,
}
armeedeterre.invokeFactory('organization', 'corpsa', **params)
corpsa = armeedeterre['corpsa']
params = {'title': u"Corps B",
'organization_type': u'corps',
'use_parent_address': True,
}
armeedeterre.invokeFactory('organization', 'corpsb', **params)
params = {'title': u"Division Alpha",
'organization_type': u'division',
'use_parent_address': True,
}
corpsa.invokeFactory('organization', 'divisionalpha', **params)
params = {'title': u"Division Beta",
'organization_type': u'division',
'use_parent_address': True,
}
corpsa.invokeFactory('organization', 'divisionbeta', **params)
divisionalpha = corpsa['divisionalpha']
divisionbeta = corpsa['divisionbeta']
params = {'title': u"Régiment H",
'organization_type': u'regiment',
'number': u"11",
'street': u"rue de l'harmonie",
'city': u"Villeneuve d'Ascq",
'zip_code': u'59650',
'country': u'France',
'use_parent_address': False,
}
divisionalpha.invokeFactory('organization', 'regimenth', **params)
regimenth = divisionalpha['regimenth']
params = {'title': u"Brigade LH",
'organization_type': u'squad',
'use_parent_address': True,
}
regimenth.invokeFactory('organization', 'brigadelh', **params)
brigadelh = regimenth['brigadelh']
params = {'title': u"Général de l'armée de terre",
'position_type': u'general',
'email': u'general@armees.fr',
'use_parent_address': False,
'city': u'Lille',
'street': u"Rue de la Porte d'Ypres",
'number': u'1',
'zip_code': u'59800',
'country': u'France',
}
armeedeterre.invokeFactory('position', 'general_adt', **params)
params = {'title': u"Capitaine de la division Alpha",
'position_type': u'captain',
'use_parent_address': True,
}
divisionalpha.invokeFactory('position', 'capitaine_alpha', **params)
capitaine_alpha = divisionalpha['capitaine_alpha']
params = {'title': u"Sergent de la brigade LH",
'position_type': u'sergeant',
'cell_phone': u'0654875233',
'email': u'brigade_lh@armees.fr',
'im_handle': u'brigade_lh@jabber.org',
'use_parent_address': True,
}
brigadelh.invokeFactory('position', 'sergent_lh', **params)
sergent_lh = brigadelh['sergent_lh']
intids = component.getUtility(IIntIds)
params = {'start_date': datetime.date(1940, 5, 25),
'end_date': datetime.date(1970, 11, 9),
'position': RelationValue(intids.getId(armeedeterre)),
}
degaulle.invokeFactory('held_position', 'adt', **params)
general_adt = armeedeterre['general_adt']
params = {'start_date': datetime.date(1940, 5, 25),
'end_date': datetime.date(1970, 11, 9),
'position': RelationValue(intids.getId(general_adt)),
'label': u"Émissaire OTAN",
'phone': u'0987654321',
'country': u'France',
'use_parent_address': True,
}
degaulle.invokeFactory('held_position', 'gadt', **params)
params = {'start_date': datetime.date(1980, 6, 5),
'position': RelationValue(intids.getId(sergent_lh)),
'email': u'sgt.pepper@armees.fr',
'phone': u'0288552211',
'city': u'Liverpool',
'street': u'Water Street',
'number': u'1',
'zip_code': u'L3 4FP',
'country': u'England',
'use_parent_address': False,
'website': 'http://www.sergent-pepper.org'
}
pepper.invokeFactory('held_position', 'sergent_pepper', **params)
params = {'position': RelationValue(intids.getId(capitaine_alpha)),
'use_parent_address': True,
}
draper.invokeFactory('held_position', 'captain_crunch', **params)
params = {'position': RelationValue(intids.getId(divisionbeta)),
'use_parent_address': True,
}
draper.invokeFactory('held_position', 'divisionbeta', **params)
params = {'position': RelationValue(intids.getId(brigadelh)),
'use_parent_address': True,
}
rambo.invokeFactory('held_position', 'brigadelh', **params)
def createTestData(context):
"""Create test data for collective.contact.core"""
if isNotTestDataProfile(context):
return
portal = context.getSite()
create_test_contact_data(portal)
| collective/collective.contact.core | src/collective/contact/core/setuphandlers.py | setuphandlers.py | py | 11,311 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "traceback.format_stack",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "plone.api.portal.get_registry_record",
"line_number": 48,
"usage_type": "call"
},
{
"api... |
35217427072 | from urllib.request import urlopen
import urllib
from selenium import webdriver
from bs4 import BeautifulSoup
import http.client
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
import json
import re
import copy
import string
http.client._MAXHEADERS = 1000
def urllib_download(IMAGE_URL, pName):
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(IMAGE_URL, pName.replace("/","").replace("\\","")+'.jpg')
except:
print('no')
def getNodeText(node):
if(node == None):
return ""
else:
return node.get_text().strip()
retryCount = 0
loadCount = 0
def getHtmlFromUrl(url, type="get", para={}):
global retryCount
try:
url = urllib.parse.quote(url, safe=string.printable).replace(' ','%20')
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.193 Safari/537.36"}
request_obj=urllib.request.Request(url=url)
response_obj=urllib.request.urlopen(request_obj)
html_code=response_obj.read()
return html_code
except:
print("retry"+url)
retryCount += 1
print(retryCount)
if retryCount< 5:
getHtmlFromUrl(url)
def getRenderdHtmlFromUrl(url):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument("window-size=1024,768")
chrome_options.add_argument("--no-sandbox")
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get(url)
return browser.page_source
def writeExcel(workSheet, headers, rowIndex, info):
cellIndex=1
for head in headers:
try:
if head in info:
content = ILLEGAL_CHARACTERS_RE.sub(r'', info[head])
workSheet.cell(rowIndex, cellIndex).value = content.strip()
else:
workSheet.cell(rowIndex, cellIndex).value = ""
cellIndex=cellIndex+1
except:
print(rowIndex)
def getProductInfo(url, products):
print(str(len(products)) + url)
tempPinfo = {}
productHtml = getRenderdHtmlFromUrl(url)
if productHtml != None:
tempPinfo["link"] = url
sope = BeautifulSoup(productHtml, "html.parser",from_encoding="utf-8")
nameArea = sope.find("h3",attrs={"itemprop":"name"})
imgArea = sope.find("img",attrs={"itemprop":"image"})
tempPinfo["name"] = getNodeText(nameArea)
if imgArea!=None:
src = imgArea["src"]
if src.find("https:") < 0:
src = "https:"+src
urllib_download(src, tempPinfo["name"])
tempPinfo["img"] = tempPinfo["name"]+".jpg"
specs = sope.find_all("label",attrs={"class":"pull-left"})
for spec in specs:
title = getNodeText(spec)
tempPinfo[title] = getNodeText(spec.nextSibling.nextSibling)
products.append(tempPinfo.copy())
def getProductList(url, products):
print(url)
productListHtml = getRenderdHtmlFromUrl(url)
sope = BeautifulSoup(productListHtml, "html.parser",from_encoding="utf-8")
pros = sope.find_all("article")
for pro in pros:
link = pro.find("a")
if link!=None:
getProductInfo("https://www.001chemical.com"+link["href"], products)
excelFileName="cfmats.xlsx"
wb = Workbook()
workSheet = wb.active
products = []
# getProductInfo('https://www.001chemical.com/chem/204580-28-9', products)
# getProductList('https://www.001chemical.com/chem/organic-siliconeslist1', products)
for pageIndex in range(1, 20):
getProductList("https://www.001chemical.com/chem/organic-siliconeslist"+str(pageIndex), products)
headers=['link','name','img','CAS Number','Catalog Number','Molecular Formula',
'Molecular Weight']
rindex = 1
for p in products:
writeExcel(workSheet, headers, rindex, p)
if rindex%100 == 0:
wb.save(excelFileName)
rindex = rindex+1
print("flish")
wb.save(excelFileName) | Just-Doing/python-caiji | src/work/2021年3月15日/chemical.py | chemical.py | py | 3,863 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "http.client.client",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "http.client",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "urllib.request.build_opener",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "urlli... |
72467060903 | from edgar3.filing_13f import Filing_13F
from edgar3 import __version__
import os
import datetime
import csv
from google.cloud import storage
from distutils import util
from io import StringIO
def save_filing(fil: Filing_13F, year: int, quarter: int):
path_with_name = f"etl-13f/processed/reports/{year}/{quarter}/{fil.accession_number}.csv"
blob = storage_bucket.blob(path_with_name)
si = StringIO()
csv_writer = csv.writer(si, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(
[
"ManagerName",
"CIK",
"Street1",
"Street2",
"City",
"StateOrCountry",
"ZipCode",
"AccessionNumber",
"PeriodOfReport",
"SignatureDate",
"CUSIP",
"SecurityName",
"TitleOfClass",
"ValueOfShares",
"NumberOfShares",
"TypeOfShares",
]
)
for holding in fil.holdings:
csv_writer.writerow(
[
fil.manager_name,
fil.cik,
fil.street1,
fil.street2,
fil.city,
fil.state_or_country,
fil.zip_code,
fil.accession_number,
fil.period_of_report,
fil.signature_date,
holding.cusip,
holding.name_of_issuer,
holding.title_of_class,
holding.value,
holding.number,
holding.type,
]
)
blob.upload_from_string(si.getvalue().strip("\r\n"))
def process_filing(path: str, year: int, quarter: int) -> bool:
text = storage_bucket.blob(path).download_as_string().decode("utf-8")
if len(text) == 0:
print("Zero length")
log_failed_process(path, year, quarter)
return True # allowed failure??
elif text.startswith("<!DOCTYPE html>"):
print("Invalid download")
log_failed_process(path, year, quarter)
return True
fil = Filing_13F(text)
if "13F-NT" in fil.documents:
return True # we don't care about these
elif "13F-NT/A" in fil.documents:
return True # don't care about these either
elif "13F-HR/A" in fil.documents:
return True
try:
if fil.process():
save_filing(fil, year, quarter)
else:
return False
except Exception as e:
print(f"Exception on {path}: {e}")
print(path)
log_failed_process(path, year, quarter)
return False
return True
def log_failed_process(path: str, year: int, quarter: int):
file_name = path.split("/")[-1]
new_path = f"etl-13f/failed_reports/{year}Q{quarter}_{file_name}"
print(f"Failed on {path}, copied to {new_path}")
storage_bucket.copy_blob(storage_bucket.blob(path), storage_bucket, new_path)
print(f"Using Edgar Version: {__version__}", flush=True)
now = datetime.datetime.now()
start_year = int(os.getenv("START_YEAR", now.year))
start_quarter = int(os.getenv("START_QUARTER", (now.month - 1) // 3 + 1))
end_year = int(os.getenv("END_YEAR", now.year))
end_quarter = int(os.getenv("END_QUARTER", (now.month - 1) // 3 + 1))
bucket_name = os.getenv("BUCKET_NAME", "farr-ai-data-lake")
force_process = bool(util.strtobool(os.getenv("FORCE_PROCESS", "False")))
print(f"Processing 13Fs for {start_year}:Q{start_quarter}-{end_year}:Q{end_quarter} into {bucket_name}", flush=True)
storage_client = storage.Client()
storage_bucket = storage_client.get_bucket(bucket_name)
for year in range(start_year, end_year + 1):
# if we're starting, the first quarter of the year can be passed in
if year == start_year:
quarter_low = start_quarter
else:
quarter_low = 1
# and if we're ending, the last quarter of the year can be passed in
if year == end_year:
quarter_high = end_quarter
else:
if year == now.year:
quarter_high = (now.month - 1) // 3 + 1
else:
quarter_high = 4
for quarter in range(quarter_low, quarter_high + 1):
print(f"Processing {year}:Q{quarter}", end="", flush=True)
base_path = f"etl-13f/reports/{year}/{quarter}"
known_blobs = [blob.name for blob in storage_bucket.list_blobs(prefix=base_path)]
for file in known_blobs:
process_filing(file, year, quarter)
print(f" {len(known_blobs)} processed", flush=True)
| kfarr3/etl-13f | process_filings/src/process_filings.py | process_filings.py | py | 4,530 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "edgar3.filing_13f.Filing_13F",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "io.StringIO",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_MINIMAL... |
19500865293 | import csv
from ctypes import pointer
import math
from time import sleep
from unittest import result
from matplotlib import pyplot as plt
import matplotlib.animation as animation
from matplotlib.pyplot import MultipleLocator
import numpy as np
def write_csv_list_a(sk_list, path):
with open(path,'a', newline='') as f:
csv_f = csv.writer(f)
csv_f.writerows(sk_list)
def write_csv_list_w(sk_list, path):
with open(path,'w', newline='') as f:
csv_f = csv.writer(f)
csv_f.writerows(sk_list)
def eval_list(sk_list):
new_sk_list = []
for i in sk_list:
new_sk_list.append(eval(i))
return new_sk_list
def read_csv_list(path):
with open(path,'r') as f:
csv_list = []
f_csv = csv.reader(f)
for i in f_csv:
csv_list.append(eval_list(i))
return csv_list
def str_list_2_float(str_list):
ret_list = []
for item in str_list:
ret_list.append(float(item))
return ret_list
def read_csv_17_list(path, epoch):
with open(path,'r') as f:
f_csv = csv.reader(f)
start_index = epoch*17
end_index = (epoch+1)*17-1
result_list = []
count = 0
for item in f_csv:
if count >= start_index and count<=end_index:
result_list.append(str_list_2_float(item))
count+=1
# print(result_list)
return result_list
# _________________________________________________________________________________________________
# _________________________________________________________________________________________________
# print(sk_list[0][0])
# print(type(sk_list[0][0]))
# print(sk_list)
# print(len(sk_list))
# Set the cs_p P17 as the initial centre point, to entablish the whole spherical coordinates system
# Pre-set the distance of each two skeleton segment points set
# 1 shoulder centre point to left shoulder point
d_cs_l_s = 1
# 2 shoulder centre point to right shoulder point
d_cs_r_s = 1
# 3 left shoulder point to left elbow point
d_l_s_eb = 1.1
# 4 left elbow point to left wrist point
d_l_eb_w = 1.5
# 5 right shoulder point to right elbow point
d_r_s_eb = 1.1
# 6 right elbow point to right wrist point
d_r_eb_w = 1.5
# 7 shoulder centre point to nose point
d_cs_n = 1
# 8 nose point to left eye point
d_n_l_e = 0.3
# 9 nose point to rigth eye point
d_n_r_e = 0.3
# 10 left point eye to left ear point
d_l_e_er = 0.35
# 11 rigth eye point to rigth ear point
d_r_e_er = 0.35
# 12 shoulder centre point to hip centre point
d_cs_ch = 3
# 13 hip centre point to left hip point
d_ch_l_h = 0.9
# 14 hip centre point to right hip point
d_ch_r_h = 0.9
# 15 left hip point to left knee point
d_l_h_k = 1.8
# 16 right hip point to right knee point
d_r_h_k = 1.8
# 17 left knee point to left ankle point
d_l_k_a = 1.8
# 18 right knee point to right ankle point
d_r_k_a = 1.8
# COCO_PERSON_KEYPOINT_NAMES = [0'nose', 1'left_eye', 2'right_eye', 3'left_ear',
# 4'right_ear', 5'left_shoulder', 6'right_shoulder', 7'left_elbow',
# 8'right_elbow', 9'left_wrist', 10'right_wrist', 11'left_hip', 12'right_hip',
# 13'left_knee', 14'right_knee', 15'left_ankle', 16'right_ankle']
# ratio_d = [0:d_cs_l_s, 1:d_cs_r_s, 2:d_l_s_eb, 3:d_l_eb_w, 4:d_r_s_eb, 5:d_r_eb_w, 6:d_cs_n, 7:d_n_l_e, 8:d_n_r_e, 9:d_l_e_er,
# 10:d_r_e_er, 11:d_cs_ch, 12:d_ch_l_h, 13:d_ch_r_h, 14:d_l_h_k, 15:d_r_h_k, 16:d_l_k_a, 17:d_r_k_a]
# Segments length set
ratio_d = [d_cs_l_s, d_cs_r_s, d_l_s_eb, d_l_eb_w, d_r_s_eb, d_r_eb_w, d_cs_n, d_n_l_e, d_n_r_e, d_l_e_er,
d_r_e_er, d_cs_ch, d_ch_l_h, d_ch_r_h, d_l_h_k, d_r_h_k, d_l_k_a, d_r_k_a]
# Define the trainning sequence-(math.pi*0.25) < theta
seq_train_set = [[17, 5], [17, 6], [5, 7], [7, 9], [6, 8], [8, 10],
[17, 0], [0, 1], [0, 2], [1, 3], [2, 4], [17, 18], [18, 11], [18, 12],
[11, 13], [12, 14], [13, 15], [14, 16]]
# Segments available zoom ratio set:17
zr = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# plus or minus for the x value
plus_minus = [1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
pre_plus_minus = [1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# Segments max available zoom ratio set
# max_zr = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
# Initail x values set
x_set = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# Define the learning rate
lr = 0.1
# Define the sk__list of last frame
sk_list_last_d2d = []
# _________________________________________________________________________________________
# Add the addictional points P17 and P18
def add_p1718(sk_list):
# New and add the shoulder centre and hip centre points to sk_list
cs_x2d = (sk_list[5][0] + sk_list[6][0]) / 2
cs_y2d = (sk_list[5][1] + sk_list[6][1]) / 2
ch_x2d = (sk_list[11][0] + sk_list[12][0]) / 2
ch_y2d = (sk_list[11][1] + sk_list[12][1]) / 2
sk_list.append([cs_x2d, cs_y2d]) #P17
sk_list.append([ch_x2d, ch_y2d]) #P18
# print(sk_list)
return sk_list
# ______________________________________________________________________________________________________________________
# Get 2d distance of specific two points in sk_list
def get_points_d2d(sk_list, p_in__1, p_in__2):
return math.sqrt((sk_list[p_in__1][0] - sk_list[p_in__2][0])**2 + (sk_list[p_in__1][1] - sk_list[p_in__2][1])**2)
# ______________________________________________________________________________________________________________________
# Normalizing the 3d data
def normalizing(sk_list):
new_sk_list = []
central_y = sk_list[17][0]
central_z = sk_list[17][1]
# print(central_y)
# print(central_z)
for item in sk_list:
y = item[0] - central_y
z = item[1] - central_z
new_sk_list.append([y, -z])
return new_sk_list
# ______________________________________________________________________________________________________________________
# ______________________________________________________________________________________________________________________
# Transforms the sk_list from 2d to 3d
# d2d^2 = (y1-y2)^2 + (z1-z2)^2
# d3d^2 = (x1-x2)^2 + d2d^2
# x1=0, x2 = -(x1-x2) = -math.sqrt(d3d^2-d2d^2)
def sk_list_to_3d(sk_list):
global zr
global x_set
global ratio_d
global plus_minus
global seq_train_set
global plus_minus
global pre_plus_minus
d2d_17_18 = get_points_d2d(sk_list, 17, 18)
d3d_17_18 = zr[11] * d2d_17_18
# =============================================
update_plus_minus(get_d2d_set(sk_list))
# =============================================
# print('d3d_17_18 = ', d3d_17_18)
# Deal with the plus_minus[]
if get_points_d2d(sk_list, 1, 3) >= get_points_d2d(sk_list, 2, 4):
plus_minus[5] = -1
plus_minus[6] = 1
else:
plus_minus[5] = 1
plus_minus[6] = -1
# plus_minus[11] = 1
# plus_minus[16] = -1
# print(len(x_set))
for i in range(len(seq_train_set)):
global ratio_d
d2d_seg = get_points_d2d(sk_list, seq_train_set[i][0], seq_train_set[i][1])
# print('ratio_d[i] = ', ratio_d[i])
# print('ratio_d[11] = ', ratio_d[11])
d3d_seg = zr[i] * (ratio_d[i]/ratio_d[11]) * d3d_17_18
x_f = x_set[seq_train_set[i][0]]
x_b = -(math.sqrt(abs(d3d_seg**2 - d2d_seg**2)) - x_f)
zoom = 0.25
x_set[seq_train_set[i][1]] = -zoom*plus_minus[seq_train_set[i][1]]*x_b
plus_minus = pre_plus_minus
temp_list = sk_list.copy()
for i in range(len(sk_list)):
sk_list[i] = [x_set[i]] + temp_list[i]
# Judge if change the bnding action of the left hip_kneee_ankle
# x-y-z
# y-z-x
d1 = sk_list[11][0]-sk_list[13][0]
d2 = sk_list[13][0]-sk_list[15][0]
if d1 != 0 and d2 != 0:
k_l_h_k = abs((sk_list[11][1]-sk_list[13][1]) / d1)
k_l_k_a = abs((sk_list[13][1]-sk_list[15][1]) / d2)
# print(k_l_h_k)
# print(k_l_k_a)
if k_l_h_k > k_l_k_a:
sk_list[15][0] = -(abs(sk_list[15][0]))
# print('sk_list[15][0] = ', sk_list[15][0], '\n')
# Judge if change the bnding action of the right hip_kneee_ankle
d3 = sk_list[12][0]-sk_list[14][0]
d4 = sk_list[14][0]-sk_list[16][0]
if d3 != 0 and d4 != 0:
k_r_h_k = abs((sk_list[12][1]-sk_list[14][1]) / d3)
k_r_k_a = abs((sk_list[14][1]-sk_list[16][1]) / d4)
# print(k_r_h_k)
# print(k_r_k_a)
if k_r_h_k > k_r_k_a:
sk_list[16][0] = -(abs(sk_list[16][0]))
# Judge if change the font-back locations of the shoulder ponits
if sk_list[5][0] >= 0:
# sk_list[11][0] = -(abs(sk_list[11][0]))
sk_list[11][0] = abs(sk_list[11][0])
sk_list[6][0] = -(abs(sk_list[6][0]))
else:
sk_list[11][0] = -(abs(sk_list[11][0]))
sk_list[6][0] = abs(sk_list[6][0])
if sk_list[11][0] >= 0:
sk_list[12][0] = -(abs(sk_list[12][0]))
else:
sk_list[12][0] = abs(sk_list[12][0])
if sk_list[7][0] >= sk_list[9][0]:
sk_list[7][0] = -(abs(sk_list[7][0]))
if sk_list[8][0] >= sk_list[10][0]:
sk_list[8][0] = -(abs(sk_list[8][0]))
# print('sk_list_to_3d: sk_list = ', sk_list)
return sk_list
# ______________________________________________________________________________________________________________________
# Get draw set
def get_draw_set(points_list, sk_list_3d):
p_xs = []
p_ys = []
p_zs = []
for i in points_list:
# p_xs.append(1)
p_xs.append(sk_list_3d[i][0])
p_ys.append(sk_list_3d[i][1])
p_zs.append(sk_list_3d[i][2])
return [p_xs, p_ys, p_zs]
# ______________________________________________________________________________________________________________________
# Get d2d set of each segments
def get_d2d_set(sk_list):
global seq_train_set
sk_list_new_d2d = []
for i in range(18):
sk_list_new_d2d.append(get_points_d2d(sk_list, seq_train_set[i][0], seq_train_set[i][1]))
return sk_list_new_d2d
# ______________________________________________________________________________________________________________________
# Update the plus_minus
def update_plus_minus(sk_list_new_d2d):
global x_set
global plus_minus
global sk_list_last_d2d
for i in range(19):
if x_set[i] > 0:
if sk_list_new_d2d[i] <= sk_list_last_d2d[i]:
plus_minus[i] = 1
if x_set[i] < 0:
if sk_list_new_d2d[i] <= sk_list_last_d2d[i]:
plus_minus[i] = -1
sk_list_last_d2d = sk_list_new_d2d
# ______________________________________________________________________________________________________________________
# Define the ax 3d drawing constraint
def ax3d_constraint(ax, sk_list_3d):
left_line_color = 'r'
central_line_color = 'gold'
right_line_color = 'lime'
msize = 8
# ________________________________________________
left_n_e_er = [0, 1, 3]
ax.plot3D(xs=get_draw_set(left_n_e_er, sk_list_3d)[0],
ys=get_draw_set(left_n_e_er, sk_list_3d)[1],
zs=get_draw_set(left_n_e_er, sk_list_3d)[2],
zdir='z',
c=left_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
right_n_e_er = [0, 0, 4]
ax.plot3D(xs=get_draw_set(right_n_e_er, sk_list_3d)[0],
ys=get_draw_set(right_n_e_er, sk_list_3d)[1],
zs=get_draw_set(right_n_e_er, sk_list_3d)[2],
zdir='z',
c=right_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
n_cs_ch = [0, 17, 18]
ax.plot3D(xs=get_draw_set(n_cs_ch, sk_list_3d)[0],
ys=get_draw_set(n_cs_ch, sk_list_3d)[1],
zs=get_draw_set(n_cs_ch, sk_list_3d)[2],
zdir='z',
c=central_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
l_cs_s_e_w = [17, 5, 7, 9]
ax.plot3D(xs=get_draw_set(l_cs_s_e_w, sk_list_3d)[0],
ys=get_draw_set(l_cs_s_e_w, sk_list_3d)[1],
zs=get_draw_set(l_cs_s_e_w, sk_list_3d)[2],
zdir='z',
c=left_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
r_cs_s_e_w = [17, 6, 8, 10]
ax.plot3D(xs=get_draw_set(r_cs_s_e_w, sk_list_3d)[0],
ys=get_draw_set(r_cs_s_e_w, sk_list_3d)[1],
zs=get_draw_set(r_cs_s_e_w, sk_list_3d)[2],
zdir='z',
c=right_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
l_ch_h_k_a = [18, 11, 13, 15]
ax.plot3D(xs=get_draw_set(l_ch_h_k_a, sk_list_3d)[0],
ys=get_draw_set(l_ch_h_k_a, sk_list_3d)[1],
zs=get_draw_set(l_ch_h_k_a, sk_list_3d)[2],
zdir='z',
c=left_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
r_ch_h_k_a = [18, 12, 14, 16]
ax.plot3D(xs=get_draw_set(r_ch_h_k_a, sk_list_3d)[0],
ys=get_draw_set(r_ch_h_k_a, sk_list_3d)[1],
zs=get_draw_set(r_ch_h_k_a, sk_list_3d)[2],
zdir='z',
c=right_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
n_cs_ch = [0, 17, 18]
ax.plot3D(xs=get_draw_set(n_cs_ch, sk_list_3d)[0],
ys=get_draw_set(n_cs_ch, sk_list_3d)[1],
zs=get_draw_set(n_cs_ch, sk_list_3d)[2],
zdir='z',
c=central_line_color, # line color
marker='o', # mark style
mfc='cyan', # marker facecolor
mec='g', # marker edgecolor
ms=msize, # marker size
linewidth=3.0 # linewidth
)
# ________________________________________________
# 设置坐标轴标题和刻度
ax.set(
xlabel='X',
ylabel='Y',
zlabel='Z',
)
x_major_locator = MultipleLocator(100)
y_major_locator = MultipleLocator(300)
ax.xaxis.set_major_locator(x_major_locator)
ax.yaxis.set_major_locator(y_major_locator)
# 调整视角
ax.view_init(elev=30, # 仰角
azim=-20 # 方位角
)
return ax
# ______________________________________________________________________________________________________________________
# Draw the skeleton
# def show3Dske(sk_list_3d):
def show3Dske(csv_path, mod):
# print('show3Dske: sk_list_3d = ', sk_list_3d)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ===================================
# ===================================
ax.set_box_aspect((1, 2, 5))
# ===================================
# ===================================
if mod == 'p':
sk_list_i = read_csv_17_list(csv_path, 0)
sk_list19 = add_p1718(sk_list_i)
# print('1 sk_list19 = ', sk_list19)
sk_list19 = normalizing(sk_list19)
# print('2 sk_list19 = ', sk_list19)
sk_list_3d = sk_list_to_3d(sk_list19)
# Save the 3D data
path_data_3d = csv_path + '_3d.csv'
write_csv_list_w(sk_list_3d, path_data_3d)
ax3d_constraint(ax, sk_list_3d)
if mod == 'v':
def update(i):
# print(i)
sk_list_i = read_csv_17_list(csv_path, i)
sk_list19 = add_p1718(sk_list_i)
# print('1 sk_list19 = ', sk_list19)
sk_list19 = normalizing(sk_list19)
# print('sk_list19 = ', sk_list19)
sk_list_3d = sk_list_to_3d(sk_list19)
# Save the 3D data
path_data_3d = csv_path + '_3d.csv'
write_csv_list_a(sk_list_3d, path_data_3d)
plt.cla()
ax3d_constraint(ax, sk_list_3d)
# anim = animation.FuncAnimation(fig, func=update, blit=False, interval=50, frames=600,
anim = animation.FuncAnimation(fig, func=update, blit=False, interval=50,
repeat=False, cache_frame_data=False)
# Set here to save the result gif
# _____________________________________
anim.save(csv_path + '.gif')
# _____________________________________
plt.autoscale(False)
# Show picture
plt.show()
# Test()
# =============================================================================
# =============================================================================
# =============================================================================
def test_cycle():
csv_path1 = './pictures/7.jpg.csv'
show3Dske(csv_path1, 'p')
# csv_path2 = './csv/3.mp4.csv'
# show3Dske(csv_path2, 'v')
# csv_path3 = './longvideos/1.mp4.csv'
# show3Dske(csv_path3, 'v')
test_cycle()
| JYLinOK/3DSKeleton | showSkeleton.py | showSkeleton.py | py | 19,714 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "csv.writer",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 50,
... |
34603270416 | import tensorflow as tf
import pandas as pd
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
#mnist = input_data.read_data_sets('./data/', one_hot = True)
def numtolist(num):
'将标签转化为01数组'
r1 = np.asarray([0]*21)
r1[20 - num] = 1
return r1
def numto2D(a):
'将标签转化为01数组'
if a >= 18:
return np.asarray([0,0,1])
elif a >= 12:
return np.asarray([0,1,0])
else:
return np.asarray([1,0,0])
def train(hidden_units_size,training_iterations):
num_classes = 3 # 输出大小
input_size = 32 # 输入大小
batch_num = 10
# hidden_units_size = 60 # 隐藏层节点数量
# training_iterations = 20
data = pd.read_csv("student-mat.csv")
# print(data.head())
# 输入数据
# 数据类型转换 - 转换为可计算的数据类型int
for col in data.columns:
if (not str(data[col].dtype).startswith("int")):
# print("Coloum Name ", col, " Type ", data[col].dtype)
# print("Unique values for ", col, data[col].unique(), "\n")
values = data[col].unique() # 取出该列所以可能取值
convertor = dict(zip(values, range(len(values)))) # 定义转换函数
data[col] = [convertor[item] for item in data[col]] # 将定性数据转为定量的数据
# print("Coloum Name ", col, " Type ", data[col].dtype)
# print("Unique values for ", col, data[col].unique(), "\n")
#print(data.describe()) # 查看数据描述信息,均值,max,min
print('Data Size ', data.shape)
training_features = ['school', 'sex', 'age', 'address', 'famsize', 'Pstatus', 'Medu', 'Fedu','Mjob', 'Fjob', 'reason', 'guardian', 'traveltime', 'studytime',
'failures', 'schoolsup', 'famsup', 'paid', 'activities', 'nursery',
'higher', 'internet', 'romantic', 'famrel', 'freetime', 'goout', 'Dalc',
'Walc', 'health', 'absences', 'G1', 'G2']
label_feature = ['G3']
selected_feature_data = data
# 区分训练集和测试集
num_data = data.shape[0]
num_input = data.shape[1] - 1
number_training = int(0.7 * num_data) # 训练数据个数
number_testing = int(num_data - number_training) # 测试数据个数
print("number of traning :",number_training)
print("number of testing :",number_testing)
# -- 训练数据集 --
training_data_features = np.array(selected_feature_data.head(number_training)[training_features])
# 转为4D
training_data_labels = np.zeros([number_training,num_classes], dtype = float, order = 'C')
temp_label = np.array(selected_feature_data.head(number_training)[label_feature])
for i in range(number_training):
label = temp_label[i] # 取出当前标签
training_data_labels[i] = numto2D(int(label))
# print('training:')
# print(training_data_features[0:5])
# print(training_data_labels[0:5])
# -- 测试数据集 --house_info.loc[3:6]
testing_data_features = np.array(selected_feature_data.loc[number_testing:][training_features])
# 转为4D
testing_data_labels = np.zeros([number_training,num_classes], dtype = float, order = 'C')
temp_label = np.array(selected_feature_data.loc[number_testing:][label_feature])
for i in range(number_testing):
label = temp_label[i] # 取出当前标签
testing_data_labels[i] = numto2D(int(label))
# --------------- 以上,数据集已经整理好 ---------------
# print('训练数据集特征:',training_data_features.shape)
# print('训练数据集标签:',training_data_labels.shape)
# print('测试数据集特征:',testing_data_features.shape)
# print('测试数据集标签:',testing_data_labels.shape)
# 300 20 0.05
X = tf.placeholder(tf.float32, shape = [None, input_size])
Y = tf.placeholder(tf.float32, shape = [None, num_classes])
# W1 = tf.Variable(tf.random_normal ([input_size, num_classes], stddev = 0.1))
# B1 = tf.Variable(tf.constant (0.1), [num_classes])
#final_opt = tf.matmul(X, W1) + B1 # 输入层到隐藏层正向传播
# 含一个隐层
W1 = tf.Variable(tf.random_normal ([input_size, hidden_units_size], stddev = 0.1))
B1 = tf.Variable(tf.constant (0.1), [hidden_units_size])
W2 = tf.Variable(tf.random_normal ([hidden_units_size, num_classes], stddev = 0.1))
B2 = tf.Variable(tf.constant (0.1), [num_classes])
hidden_opt = tf.matmul(X, W1) + B1 # 输入层到隐藏层正向传播
hidden_opt = tf.nn.relu(hidden_opt) # 激活函数,用于计算节点输出值
final_opt = tf.matmul(hidden_opt, W2) + B2 # 隐藏层到输出层正向传播
# 对输出层计算交叉熵损失
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=final_opt))
# 梯度下降算法,这里使用了反向传播算法用于修改权重,减小损失
opt = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
# 初始化变量
init = tf.global_variables_initializer()
# 计算准确率
correct_prediction =tf.equal (tf.argmax (Y, 1), tf.argmax(final_opt, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
sess = tf.Session ()
sess.run (init)
train_acc = []
test_acc = []
train_loss = []
for i in range (training_iterations) :
batch_input = training_data_features[i*batch_num:i*batch_num+batch_num]
batch_labels = training_data_labels[i*batch_num:i*batch_num+batch_num]
# 训练
training_loss = sess.run ([opt, loss], feed_dict = {X: batch_input, Y: batch_labels})
train_accuracy = accuracy.eval (session = sess, feed_dict = {X: batch_input,Y: batch_labels})
test_accuracy = accuracy.eval(session=sess, feed_dict={X: testing_data_features, Y: testing_data_labels})
train_acc.append(train_accuracy)
test_acc.append(test_accuracy)
train_loss.append(training_loss[1])
print ("step : %d, training accuracy = %2f ,training loss %2f,test_accuracy %2f "% (i, train_accuracy,training_loss[1],test_accuracy))
return train_acc,test_acc,train_loss
# print('testing -----')
# test_accuracy = accuracy.eval(session=sess, feed_dict={X: testing_data_features, Y: testing_data_labels})
# print("tesing accuracy = %2f " % (test_accuracy))
def averagenum(num):
print(num)
nsum = 0
for i in range(len(num)):
nsum += num[i]
return nsum / len(num)
def lineplot(x_data, y_data, x_label="", y_label="", title=""):
# Create the plot object
_, ax = plt.subplots() # Plot the best fit line, set the linewidth (lw), color and
# transparency (alpha) of the line
ax.plot(x_data, y_data, lw = 2, color = '#539caf', alpha = 1) # Label the axes and provide a title
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
return ax
def plot_trendline1(x_num, y, n):
ax1 = plt.subplot(2, 2, 1)
plt.sca(ax1) # 选择子图1
plt.legend() # 添加这个才能显示图例
x = np.linspace(1, x_num, x_num)
plt.xticks(x)
parameter = np.polyfit(x, y, n) # 计算趋势线
y2 = [0]*len(y)
for i in range(len(y)) :
y2[i] = parameter[0] * i ** 3 + parameter[1] * i ** 2 + parameter[2] * i + 0.6
plt.xlabel('training step', color='black')
plt.ylabel('training accuracy', color='black')
plt.scatter(x, y,label='training accuracy') # 画散点图
plt.plot(x, y2, color='g',label='trendline')# 画趋势线
plt.legend() # 添加这个才能显示图例
def plot_trendline2(x_num, y, n):
ax2 = plt.subplot(2, 2, 2)
plt.sca(ax2) # 选择子图2
x = np.linspace(1, x_num, x_num)
plt.xticks(x)
parameter = np.polyfit(x, y, n)
y2 = [0]*len(y)
print(y2)
for i in range(len(y)) :
y2[i] = parameter[0] * i ** 3 + parameter[1] * i ** 2 + parameter[2] * i + 0.6
avg = averagenum(y)
# for i in range(len(y2)):
# y2[i] = y2[i] + avg
print('----- training svg',avg)
plt.xlabel('training step', color='black')
plt.ylabel('testing accuracy', color='black')
plt.scatter(x, y,label='testing accuracy')
plt.plot(x, y2, color='g',label='trendline')
plt.legend()
def plot_trendline3(x_num, y, n):
ax3 = plt.subplot(2, 2, 3)
plt.sca(ax3) # 选择子图3
x = np.linspace(1, x_num, x_num)
plt.xticks(x)
# plt.bar(x, y, width=0.6, tick_label=x, fc='y',color='blue')
# plt.xlabel('training step', color='black')
# plt.ylabel('cross entropy', color='black')
# plt.legend()
# plt.show()
parameter = np.polyfit(x, y, n)
y2 = [0] * len(y)
print(y2)
for i in range(len(y)):
y2[i] = parameter[0] * i ** 3 + parameter[1] * i ** 2 + parameter[2] * i + 1.2
avg = averagenum(y)
# for i in range(len(y2)):
# y2[i] = y2[i] + avg
print('----- training svg', avg)
plt.xlabel('training step', color='black')
plt.ylabel('training cross entropy', color='black')
plt.scatter(x, y, label='training cross entropy')
plt.plot(x, y2, color='r', label='trendline')
plt.legend() # 添加这个才能显示图例
plt.show()
# plt.barh(range(len(x)), y, tick_label=x)
# plt.show()
# parameter = np.polyfit(x, y, n)
# y2 = [0]*len(y)
# print(y2)
# for i in range(len(y)) :
# y2[i] = parameter[0] * i ** 3 + parameter[1] * i ** 2 + parameter[2] * i + 1.0
# avg = averagenum(y2)
# for i in range(len(y2)):
# y2[i] = y2[i] + avg
# plt.xlabel('testing step', color='black')
# plt.ylabel('cross entropy', color='black')
# plt.scatter(x, y)
# plt.plot(x, y2, color='g')
# plt.show()
if __name__ == '__main__':
train_acc, test_acc, train_loss = train(15, 18)
# print(train_acc)
# print(test_acc)
# print(train_loss)
# lineplot(range(28), test_acc, x_label="step", y_label="test_acc", title="")
# lineplot(range(28), train_acc, x_label="step", y_label="train_acc", title="")
# plt.show(lineplot(range(28), train_loss, x_label="step", y_label="train_acc", title=""))
plot_trendline1(18, test_acc,3)
plot_trendline2(18, train_acc, 3)
plot_trendline3(18, train_loss, 3)
# dotImg(range(21), test_acc)
# dotImg(range(21), train_acc)
# dotImg(range(21), train_loss)
# choose step17 | cloud0606/AI | BP神经网络/bp3d_2l.py | bp3d_2l.py | py | 10,717 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.asarray",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_n... |
16185502227 | import os
from sanic import Sanic
from sanic_session import Session, AIORedisSessionInterface
from ..config import config, log_config
from ..container import Container
from ..adapter.blueprint import handle_exception, message_blueprint,\
post_blueprint, file_blueprint, user_blueprint
os.makedirs(config['DATA_PATH'], 0o755, True)
app = Sanic(config['NAME'].capitalize(), log_config=log_config)
app.config.update(config)
app.error_handler.add(Exception, handle_exception)
app.static('/files', os.path.join(config['DATA_PATH'], config['UPLOAD_DIR']),
stream_large_files=True)
app.blueprint(message_blueprint)
app.blueprint(post_blueprint)
app.blueprint(file_blueprint)
app.blueprint(user_blueprint)
@app.listener('before_server_start')
async def server_init(app, loop):
container = Container(config, log_config)
await container.on_init
Session(app, AIORedisSessionInterface(
container.cache, expiry=config['SESSION_EXPIRY']))
@app.listener('after_server_stop')
async def server_clean(app, loop):
await Container().clean()
if __name__ == '__main__':
app.run(host=config['HOST'], port=config['PORT'], debug=config['DEBUG'],
auto_reload=config['AUTO_RELOAD'], access_log=config['ACCESS_LOG'],
workers=config['WORKERS'])
| jaggerwang/sanic-in-practice | weiguan/api/app.py | app.py | py | 1,296 | python | en | code | 42 | github-code | 36 | [
{
"api_name": "os.makedirs",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "config.config",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sanic.Sanic",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "config.config",
"line_numbe... |
35206134452 | from typing import List
import random
##################################
# GENERAL STUFF
class Queen():
def __init__(self, pos: int, threats: int = -1):
# the position of the queen on the board
self.pos = pos
# the number of threats on the queen
self.threats = threats
def __eq__(self, o: object) -> bool:
return (self.pos == o.pos) & (self.threats == o.threats)
def __str__(self) -> str:
return f'Queen(pos: {self.pos}, threats: {self.threats})'
def calc_threats(queens: List[Queen]) -> List[int]:
# check threats on same row, main diagonal and antidiagonal
# the check for same row is obvious: [0, 2, 1, 2]
# loop and check for duplicated of the number (position)
# the check for the diagonal goes like this:
# the diagonal is formed by pos, pos + 1, pos + 2, pos + 3, and so on....
# the same for pos - 1, pos - 2, pos - 3 ....
# for a conflict to occur on the diagonal, pos will be an increment of ouu current queen position,
# also the increment increases by 1 when the position increse
# example: [1, 3, 0, 2] has no conflict even though the last queen is an increment of the first queen position.
# and that is because the increment is not constant and increses as we go further.
# the same for why the second queen is not counted as a threat, it is not a single increment.
# the increment changes to a decrement for other sides of the diagonal.
length = len(queens)
threats = [0] * length
# check for same row threats
col = 0
while col < length:
# check for +ve same row threats
row = col + 1
while row < length:
if (queens[row].pos == queens[col].pos):
threats[col] += 1
row += 1
# check for -ve same row threats
row = col - 1
while row > -1:
if (queens[row].pos == queens[col].pos):
threats[col] += 1
row -= 1
col += 1
# check for main diagonal threats
col = 0
while col < length:
# check for +ve main diagonal
row = col + 1
inc = 1
while row < length:
if queens[row].pos == queens[col].pos + inc:
threats[col] += 1
inc = inc + 1
row = row + 1
# check for -ve main diagonal
row = col - 1
dec = 1
while row > -1:
if queens[row].pos == queens[col].pos - dec:
threats[col] += 1
dec = dec + 1
row = row - 1
col += 1
# check for antidiagonal threats
col = 0
while col < length:
# check for +ve antidiagonal threats
row = col + 1
dec = 1
while row < length:
if queens[row].pos == queens[col].pos - dec:
threats[col] += 1
dec = dec + 1
row = row + 1
# check for -ve antidiagonal threats
row = col - 1
inc = 1
while row > -1:
if queens[row].pos == queens[col].pos + inc:
threats[col] += 1
inc = inc + 1
row = row - 1
col += 1
return threats
def update_threats(queens: List[Queen], threats: List[int]) -> None:
for i, queen in enumerate(queens):
queen.threats = threats[i]
def copy_queens(queens: List[Queen]) -> List[Queen]:
copy = []
for queen in queens:
copy.append(Queen(queen.pos, queen.threats))
return copy
##################################
# A* STUFF
class BoardNode():
# no. of steps default to 0 representing the initial (start) state
def __init__(self, queens: List[Queen] = None, steps: int = 0):
# the queens that defines the state of this board node
# the index of the array itself represents what col we are at.
# the value in the index represents what row we are at.
# example: [2, 0, 1], represents:
# 0 1 2 <- column
# 0 - x -
# 1 - - x
# 2 x - -
# ^ row
self.queens = queens
# total_threats "h": defines the total number of threats on the board
total_threats = 0
for queen in queens:
total_threats += queen.threats
self.total_threats = total_threats
# steps "g": defines the number of steps taken by a queen to reach this state
self.steps = steps
# f = g + h
self.cost = self.steps + self.total_threats
def __eq__(self, o: object) -> bool:
return self.queens == o.queens
def __str__(self) -> str:
str = '['
for q in self.queens:
str += f'{q.pos}, '
str += ']'
return f'BoardNode({self.cost} = {self.steps} + {self.total_threats}, queens: {str})'
def generate_states(queensList: List[Queen]) -> List[BoardNode]:
# the goal of this function is to generate all possible moves
# or (states), a state represents the board, hence the usage of BoardNode
# a boardNode stores the cost of reaching (steps) it and the amount of threats in it
# this is used to calculate its cost (cost [f] = steps [g] + h [total_threats])
# a boardNode is generated by moving a queen, using the new positions and threats to create it.
# after exploring all possible (vertical) moves, they all will be added to the possible_states.
# the possible states will be used to pick the lowest cost, and then repeat.
possible_states: List[BoardNode] = []
# generate +ve moves
col = 0
while col < len(queensList):
queens = copy_queens(queensList)
steps = 0
row = queens[col].pos
while row < len(queens) - 1:
queens[col].pos += 1
steps += 1
new_threats = calc_threats(queens)
update_threats(queens, new_threats)
qs = copy_queens(queens)
possible_states.append(BoardNode(qs, steps))
row += 1
col += 1
# generate -ve moves
col = 0
while col < len(queensList):
queens = copy_queens(queensList)
steps = 0
row = queens[col].pos
while row > 0:
queens[col].pos -= 1
steps += 1
new_threats = calc_threats(queens)
update_threats(queens, new_threats)
qs = copy_queens(queens)
possible_states.append(BoardNode(qs, steps))
row -= 1
col += 1
def sortKey(e: BoardNode):
return e.cost
possible_states.sort(reverse=True, key=sortKey)
return possible_states
def rand_initial_state(N: int) -> BoardNode:
queens: List[Queen] = []
for n in range(N):
queens.append(Queen(random.randint(0, N - 1)))
threats = calc_threats(queens)
update_threats(queens, threats)
return BoardNode(queens)
def a_star(state: BoardNode, visited_states: List[BoardNode], steps_count):
# generate possible next moves/states
states: List[BoardNode] = generate_states(state.queens)
# get the move/state with lowest cost
next_state: BoardNode = states.pop()
# if the popped state and the one before it has equal cost (f),
# check if the one before it has lower threats (h), if yes choose it.
if next_state.cost == states[-1].cost:
if states[-1].total_threats < next_state.total_threats:
next_state = states.pop()
# check if the goal state has been reached.
# the goal states is defined by the threats (h) being 0
if next_state.total_threats == 0:
visited_states.clear()
print('HOLAAA')
print(f'final state: {next_state}')
steps_count[0] += 1
return next_state
# check if the popped state has already been visited before
# if yes, get the next possible state/move, and repeat.
i = 0
while i < len(visited_states):
if next_state == visited_states[i]:
if (len(states) > 0):
next_state = states.pop()
i = 0
continue
i += 1
steps_count[0] += 1
visited_states.append(next_state)
return next_state
##################################
# GENETIC STUFF
MUTATE_RATE: float = 0.05
CROSSOVER_RATE: float = 1.0
MULTIPOINT: bool = False
class Solution():
def __init__(self, queens: List[Queen]):
# the queens define the solution/chromosome,
# The position of each queen is a gene.
# the queen object itself is just a wrapper class for the queen position and theeats on it.
self.queens = queens
# total_threats (fitness): the fitness of the solution, lower is better. 0 is solved.
total_threats = 0
for queen in queens:
total_threats += queen.threats
self.total_threats = total_threats
def __str__(self) -> str:
str = '['
for q in self.queens:
str += f'{q.pos}, '
str += ']'
return f'Solution(fitness: {self.total_threats}, queens: {str})'
# creates a random solution (random queen positions)
def create_solution(N) -> Solution:
queens: List[Queen] = []
for n in range(N):
queens.append(Queen(random.randint(0, N - 1)))
threats = calc_threats(queens)
update_threats(queens, threats)
return Solution(queens)
# returns a mutated gene (a new position for a queen)
def mutated_gene(N: int) -> int:
return random.randint(0, N - 1)
# where the magic happens,
# depending on the passe paras it will crossover and mutate to produce a new solution out of the two passed solutions.
def mate(parent1: Solution, parent2: Solution, mutate_rate: float = MUTATE_RATE, multipoint: bool = MULTIPOINT, crossover_rate: float = CROSSOVER_RATE) -> Solution:
child: Solution = None
prob = random.random()
if prob < crossover_rate:
child = crossover(parent1, parent2, multipoint)
else:
child = parent1 if parent1.total_threats < parent2.total_threats else parent2
for queen in child.queens:
prob = random.random()
if prob < mutate_rate:
queen.pos = mutated_gene(len(child.queens))
return child
# takes two solutions and crosses them over on a random point,
# this produces to children, the fittest is returned.
def crossover(parent1: Solution, parent2: Solution, multipoint: bool = False) -> Solution:
if not multipoint:
point: int = random.randint(0, len(parent1.queens) - 1)
queens1: List[Queen] = copy_queens(
parent1.queens[:point] + parent2.queens[point:])
queens2: List[Queen] = copy_queens(
parent2.queens[:point] + parent1.queens[point:])
new_threats = calc_threats(queens1)
update_threats(queens1, new_threats)
new_threats = calc_threats(queens2)
update_threats(queens2, new_threats)
child1: Solution = Solution(queens1)
child2: Solution = Solution(queens2)
return child1 if child1.total_threats < child2.total_threats else child2
def genetic(N: int, population_size: int, generations: int, elitism: bool = True, mutate_rate: float = MUTATE_RATE, multipoint: bool = MULTIPOINT, crossover_rate: float = CROSSOVER_RATE, generation_count=[0]) -> Solution:
generation: int = 1
solved: bool = False
population: List[Solution] = []
for _ in range(population_size):
population.append(create_solution(N))
while (generation <= generations) & (not solved):
# sort the population based on fitness (threats)
population.sort(key=lambda solution: solution.total_threats)
if population[0].total_threats == 0:
solved = True
print('Hola FOUND ITTTT')
print(population[0])
generation_count[0] = generation
return population[0]
new_generation: List[Solution] = []
if elitism:
# pass the top 10% solutions to the next generation
top_ten = int((10 * population_size) / 100)
new_generation.extend(population[:top_ten])
# pick and mate parents for the next genration randomly from the top 50%
top_fifty = int((50 * population_size) / 100)
for _ in range(int((90 * population_size) / 100)):
parent1 = random.choice(population[:top_fifty])
parent2 = random.choice(population[:top_fifty])
child = mate(parent1, parent2, mutate_rate,
multipoint, crossover_rate)
new_generation.append(child)
population = new_generation
# print(f'gen: {generation}, {population[0]}')
generation += 1
generation_count[0] = generation
population.sort(key=lambda solution: solution.total_threats)
return population[0]
# print(genetic(8, 100, 100))
| Just-Hussain/n-queen | nqueen.py | nqueen.py | py | 12,806 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number"... |
28493766698 | import time
import os
import numpy as np
import pyaudio
import tensorflow as tf
import speech_recognition as sr
from datetime import datetime
import wave
import threading
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from ThreeCharacterClassicInference import ThreeCharacterClassicInference
from tts import text_to_speech
# Set seeds for reproducibility
seed = 42
tf.random.set_seed(seed)
np.random.seed(seed)
# Constants
FRAMES_PER_BUFFER = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
TAILING_DURATION = 1.5 # Tailing audio duration in seconds
KEYWORD = "你好"
# Global variables
stop_plotting_thread = False
# Load the model
interpreter = tf.lite.Interpreter(model_path="hey_ego_44100_obama_5.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
three_char_classic_model = ThreeCharacterClassicInference(
model_path="3character.tflite", dictionary_path="3character_dict.pickle"
)
def get_spectrogram(waveform):
"""Convert the audio waveform to a spectrogram."""
input_len = 66150
waveform = waveform[:input_len]
zero_padding = tf.zeros([input_len] - tf.shape(waveform), dtype=tf.float32)
waveform = tf.cast(waveform, dtype=tf.float32)
equal_length = tf.concat([waveform, zero_padding], 0)
spectrogram = tf.signal.stft(equal_length, frame_length=512, frame_step=256)
spectrogram = tf.abs(spectrogram)
spectrogram = spectrogram[..., tf.newaxis]
return spectrogram
def preprocess_audiobuffer(waveform):
"""Preprocess the audio buffer for the model."""
waveform = waveform / 32768
waveform = tf.convert_to_tensor(waveform, dtype=tf.float32)
spectogram = get_spectrogram(waveform)
spectogram = tf.expand_dims(spectogram, 0)
return spectogram
def predict_mic(audio):
"""Predict the command from the audio."""
start = time.time()
spec = preprocess_audiobuffer(audio)
interpreter.set_tensor(input_details[0]["index"], spec)
interpreter.invoke()
prediction = tf.nn.softmax(interpreter.get_tensor(output_details[0]["index"]))
label_pred = np.argmax(prediction, axis=1)
time_taken = time.time() - start
print(prediction)
print(label_pred)
print(f"Predicted in: {time_taken}")
return label_pred[0]
def save_audio_to_wav(audio_buffer, output_folder=None, rate=44100):
"""Save the audio buffer to a mono channel WAV file with a unique name."""
output_folder = output_folder or os.getcwd()
os.makedirs(output_folder, exist_ok=True)
# Generate a unique name for the WAV file
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
file_name = f"audio_{timestamp}.wav"
output_file = os.path.join(output_folder, file_name)
# Open a WAV file for writing
with wave.open(output_file, "wb") as wav_file:
wav_file.setnchannels(1) # Mono channel
wav_file.setsampwidth(
pyaudio.get_sample_size(pyaudio.paInt16)
) # 16-bit samples
wav_file.setframerate(rate) # Set the frame rate
wav_file.writeframes(audio_buffer.tobytes()) # Write the audio buffer data
return output_file
# def plot_spectrogram(audio_buffer, spectrogram_func, stop_event):
# while not stop_event.is_set():
# # Set up the initial plot
# fig, ax = plt.subplots()
# spec = spectrogram_func(audio_buffer)
# im = ax.imshow(
# spec,
# aspect="auto",
# origin="lower",
# cmap="viridis",
# vmin=0.0,
# vmax=1.0,
# )
# ax.set_xlabel("Time")
# ax.set_ylabel("Frequency")
# plt.colorbar(im, ax=ax)
# ax.set_title("Spectrogram")
# # Add a text element to display the update frequency
# freq_text = ax.text(
# 0.01, 0.95, "", transform=ax.transAxes, fontsize=10, color="white"
# )
# # Update function for the plot
# def update(frame):
# nonlocal audio_buffer
# start_time = time.time()
# spec = spectrogram_func(audio_buffer)
# im.set_data(spec)
# im.set_clim(vmin=0.0, vmax=1.0)
# # Calculate and display the update frequency
# end_time = time.time()
# update_freq = 1 / (end_time - start_time)
# freq_text.set_text(f"{update_freq:.2f} fps")
# return [im, freq_text]
# # Create the animation
# ani = FuncAnimation(fig, update, blit=True, interval=RATE / FRAMES_PER_BUFFER)
# # Show the plot
# plt.show()
# if stop_event.is_set():
# plt.close()
# break
def plot_spectrogram(audio_buffer, spectrogram_func, stop_event):
# Initialize the stop_event flag
stopped = [False]
def handle_close(evt):
stopped[0] = True
while not stop_event.is_set():
# Set up the initial plot
fig, ax = plt.subplots()
spec = spectrogram_func(audio_buffer)
im = ax.imshow(
spec,
aspect="auto",
origin="lower",
cmap="viridis",
vmin=0.0,
vmax=1.0,
)
ax.set_xlabel("Time")
ax.set_ylabel("Frequency")
plt.colorbar(im, ax=ax)
ax.set_title("Spectrogram")
# Add a text element to display the update frequency
freq_text = ax.text(
0.01, 0.95, "", transform=ax.transAxes, fontsize=10, color="white"
)
# Update function for the plot
def update(frame):
nonlocal audio_buffer
start_time = time.time()
spec = spectrogram_func(audio_buffer)
im.set_data(spec)
im.set_clim(vmin=0.0, vmax=1.0)
# Calculate and display the update frequency
end_time = time.time()
update_freq = 1 / (end_time - start_time)
freq_text.set_text(f"{update_freq:.2f} fps")
if stopped[0]:
return []
return [im, freq_text]
# Create the animation
ani = FuncAnimation(fig, update, blit=True, interval=RATE / FRAMES_PER_BUFFER)
# Connect the event handler
fig.canvas.mpl_connect("close_event", handle_close)
# Show the plot
plt.show(block=False)
while not stop_event.is_set() and not stopped[0]:
fig.canvas.flush_events()
time.sleep(0.1)
if stop_event.is_set() or stopped[0]:
plt.close()
break
def record_and_detect_keyword():
"""Continuously record audio and predict the command."""
global stop_plotting_thread
audio_buffer = np.zeros(int(TAILING_DURATION * RATE), dtype=np.int16)
p = pyaudio.PyAudio()
stream = p.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=FRAMES_PER_BUFFER,
)
stop_event = threading.Event()
try:
plot_thread = threading.Thread(
target=plot_spectrogram,
args=(
audio_buffer,
lambda buf: preprocess_audiobuffer(buf).numpy().squeeze(),
stop_event,
),
daemon=True,
)
plot_thread.start()
is_awake = False
recognizer = sr.Recognizer()
while True and not is_awake:
data = stream.read(FRAMES_PER_BUFFER)
new_audio = np.frombuffer(data, dtype=np.int16)
# Update the audio buffer
audio_buffer[:-FRAMES_PER_BUFFER] = audio_buffer[FRAMES_PER_BUFFER:]
audio_buffer[-FRAMES_PER_BUFFER:] = new_audio
# Save the audio buffer to a WAV file
# output_file = save_audio_to_wav(
# audio_buffer, output_folder="recorded_audio"
# )
# Predict using the tailing audio data
if not is_awake:
result = predict_mic(audio_buffer)
if result == 0:
print(f"Obama model detected {KEYWORD}")
# is_awake = True
audio_data = sr.AudioData(
audio_buffer.tobytes(), sample_rate=RATE, sample_width=2
)
try:
text = recognizer.recognize_google(audio_data, language="zh-CN")
print("You said: ", text)
if KEYWORD in text:
is_awake = True
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print(
"Could not request results from Google Speech Recognition service; {0}".format(
e
)
)
if is_awake:
p.terminate()
stop_event.set()
except Exception as e:
print(e)
p.terminate()
stop_event.set()
def three_char_classic_reply():
previous_sr_text = ""
audio_buffer = np.zeros(int(5 * RATE), dtype=np.int16)
p = pyaudio.PyAudio()
stream = p.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=FRAMES_PER_BUFFER,
)
recognizer = sr.Recognizer()
buffers_per_second = int(RATE / FRAMES_PER_BUFFER) * 2
idel_start_time = time.time()
while True:
audio_data = np.empty((buffers_per_second, FRAMES_PER_BUFFER), dtype=np.int16)
for i in range(buffers_per_second):
audio_data[i] = np.frombuffer(
stream.read(FRAMES_PER_BUFFER), dtype=np.int16
)
audio_data = audio_data.flatten()
audio_buffer[: -audio_data.shape[0]] = audio_buffer[audio_data.shape[0] :]
audio_buffer[-audio_data.shape[0] :] = audio_data
audio_data = sr.AudioData(
audio_buffer.tobytes(), sample_rate=RATE, sample_width=2
)
try:
text = recognizer.recognize_google(audio_data, language="zh-CN")
print("You said: ", text)
if len(text) >= 3 and text != previous_sr_text:
previous_sr_text = text
reply = three_char_classic_model.predict_next_3(text[0:3])
text_to_speech(reply, "zh")
print(f"Model reply: {reply}")
idel_start_time = time.time()
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print(
"Could not request results from Google Speech Recognition service; {0}".format(
e
)
)
if (time.time() - idel_start_time) > 10:
text_to_speech("晚安寶貝兒", "zh")
return
if __name__ == "__main__":
text_to_speech("開始了", "zh")
while True:
print("start...")
record_and_detect_keyword()
print("awake...")
print("Your three char classic?..")
text_to_speech("你好呀。 請讀出你的三字經典三連音。", "zh")
three_char_classic_reply()
| charis2324/SoundCube | src/main.py | main.py | py | 11,703 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.random.set_seed",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tensorflow.random",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "n... |
43361270196 | from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.request import Request
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from api.utils import paginate
from message.models import Message
from message.serializer import MessageSerializer, MessageUserSerializer
from people.models import Friend
from user.models import User
from django.db.models import Q
from django.utils import timezone
@api_view(["POST"])
@permission_classes([IsAuthenticated])
def send_message(request:Request, pk):
receiver=get_object_or_404(User, pk=pk)
user = request.user
message = request.data.get("message")
if not message:
return Response("Provide message")
message = Message.objects.create(
sender=user,
receiver=receiver,
content=message
)
return Response(MessageSerializer(message).data)
@api_view(["GET"])
@permission_classes([IsAuthenticated])
def get_message(request, pk):
user = get_object_or_404(User, pk=pk)
messages=Message.objects.filter(Q(
Q(sender=user, receiver=request.user)|
Q(sender=request.user, receiver=user))).order_by("-dateSend")
paginated = paginate(messages, request, MessageSerializer)
return Response(paginated)
@api_view(["GET"])
@permission_classes([IsAuthenticated])
def get_user_last_message(request:Request):
user = request.user
friends = Friend.objects.filter(user1=user)
last_message=[]
for friend in friends:
message = Message.objects.filter(Q(
Q(sender=user, receiver=friend.user2)|
Q(sender=friend.user2, receiver=user)
)).order_by("-dateSend").first()
if not message:
last_message.append({
"friend":friend.user2,
"content":"You can send message to your friend",
"dateSend":timezone.now(),
"lastSenderMe":False
})
else:
last_message.append({
"friend":friend.user2,
"content":message.content,
"dateSend":message.dateSend,
"lastSenderMe":message.sender.id==user.id
})
paginated = paginate(last_message, request, MessageUserSerializer)
return Response(paginated) | Hosea2003/meet-alumni-back | message/views.py | views.py | py | 2,371 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "rest_framework.request.Request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "user.models.User",
"line_number": 18,
"usage_type": "argument"
},
{... |
8926252017 | ##############################################################################################
# File: diceSim.py
# Author: Sam Wareing
# Description: script to determine probability of dice rolls using monte carlo simulation
#
#
#
##############################################################################################
import sys
from random import randint
from collections import Counter
def simulateDiceRolls(dice, num_simulations):
counts = Counter()
for roll in range(num_simulations):
counts[sum((randint(1, sides) for sides in dice))] += 1
print("\nOUTCOME\tPROBABILITY")
for outcome in range(len(dice), sum(dice)+1):
print('{}\t{:0.2f}%'.format(outcome, counts[outcome]*100/num_simulations))
def usage():
print("diceSim.py # # #....")
if __name__ == "__main__":
print("let's sim some dice")
if len(sys.argv) < 2:
usage()
exit()
num_simulations = input("How many simulations? press enter for default 1000000 ")
if num_simulations == "":
num_simulations = 1000000
else:
num_simulations = int(num_simulations)
n = len(sys.argv)
dice = [int(sys.argv[i]) for i in range(1, n)]
simulateDiceRolls(dice, num_simulations)
| sjwar455/PythonCodeChallenges | diceSim.py | diceSim.py | py | 1,183 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line... |
28678725652 | from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_manager.firefox import GeckoDriverManager
browser = webdriver.Firefox(executable_path=GeckoDriverManager().install())
codigos = ['22316', '21897', '22469', '22192', '22567', '22153', '21778', '22281', '22941', '22882', '21603', '22740', '21587', '23009', '22500', '22424',
'23167', '22423', '22816', '23103', '22592', '23317', '23067', '22804']
url = 'http://redmine.rs.dbseller.com.br/issues/'
file = open('redmines.txt', 'w')
titulos = []
descricoes = []
descr = []
for codigo in codigos:
browser.get(url+codigo)
file.write(browser.title)
file.write("\n\n")
descricao = browser.find_elements(By.CLASS_NAME, "wiki")
resultado = descricao[1].find_elements(By.TAG_NAME, "p")
for x in resultado:
linha = x.get_attribute('innerHTML')
linha = linha.replace('<br>','')
file.write(linha+"\n")
file.write("\n\n")
file.write("\n\n")
browser.quit() | DaviRamosUC/web_scraping_redmine | index.py | index.py | py | 1,001 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "webdriver_manager.firefox.GeckoDriverManager",
"line_number": 5,
"usage_type": "call"
},
... |
626133283 |
import os
from collections import OrderedDict
import tensorflow as tf
class KBest(tf.keras.callbacks.Callback):
"""
A subclass of the callback preset class which implements the functionality to keep only the best k checkpoints of
the execution (instead of the best one implemented in Tf).
Attributes
----------
output_path: str
Path where the checkpoints are saved
file_loss: dict
Dictionary where we keep the loss of each of the checkpoints saved
k: int
Number of checkpoints to keep
Methods:
----------
on_epoch_end(self, src_input, indices)
At the end of each epoch, we check which of the checkpoints we need to delete (if any)
"""
def __init__(self, output_path, k=None):
"""
Parameters
----------
output_path: str
Path where the checkpoints are saved
k: int
Number of checkpoints to keep
"""
super().__init__()
self.output_path = output_path
self.files_loss = {}
self.k = k
def on_epoch_end(self, epoch, logs=None):
"""
Parameters
----------
epoch: int
Epoch number
logs: dict
Dictionary with the information of the current epoch
"""
if logs is None:
logs = {}
if self.k is not None:
loss = logs["loss"]
name = "weights." + str("{:02d}".format(self.epoch)) + '-' + str("{:.2f}".format(loss)) + '.hdf5'
self.files_loss[name] = loss
if len(self.files_loss) >= self.k:
# sort by value in decreasing order
d_descending = OrderedDict(sorted(self.files_loss.items(), key=lambda kv: kv[1], reverse=True))
n = len(d_descending)
# delete the len(d_descending - k) first files
num_deletions = n - self.k
file_delete = list(d_descending.items())[0:num_deletions]
for name, _ in file_delete:
path = self.output_path + '/ckpt/' + name
os.remove(path)
del self.files_loss[name]
| BNN-UPC/ignnition | ignnition/custom_callbacks.py | custom_callbacks.py | py | 2,203 | python | en | code | 40 | github-code | 36 | [
{
"api_name": "tensorflow.keras",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "collections.OrderedDict",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 69,
"usage_type": "call"
}
] |
17736050506 | import re
import requests
import json
import osascript
import hashlib
from bs4 import BeautifulSoup
def main():
# Get browser and player data via AppleScript
code, output, err = getBrowserAndPlayerData()
# print(output,err)
current_data = output.split(', ')
# Separate output
player_data = current_data[0:4]
browser_data = current_data[4:]
# Process player and browser data
player_type, player_artist, player_song, player_state = processPlayerData(
player_data)
browser_type, browser_artist, browser_song, browser_state = processBrowserData(
browser_data)
# Determine priority, player or browser
priority = (playerOrBrowser(
player_type, player_state, browser_type, browser_state))
# print(priority)
if priority == "player":
artist = player_artist
song = player_song
elif priority == "browser":
artist = browser_artist
song = browser_song
else:
return
# Remove extra information from title
song = cleanSong(song)
artist_1, artist_2 = multipleArtistCheck(artist)
# Prepare array of artists
artist_array = [artist, artist_1, artist_2]
# print('\nPlayer Full Artist: ' + player_artist + '\nPlayer Artist 1: ' + player_artist_1 + '\nPlayer Artist 2: ' + player_artist_2 + '\nPlayer Song: ' + player_song)
# Access Genius API 'https://docs.genius.com'
accesstoken = 'ORYExHGED-rUDNu6wEqCt42NCg9nFuBiCiVKAYkjSrS6aQ1RHdyyjp5gl7GlpXZH'
headers = {'Authorization': 'Bearer ' + accesstoken, 'User-Agent': 'Kashi',
'Accept': 'application/json', 'Host': 'api.genius.com'}
params = {'q': artist + ' ' + song}
hits = requests.get('https://api.genius.com/search',
params=params, headers=headers).json()['response']['hits']
# for hit in hits:
# print ("Artist: " + hit['result']['primary_artist']['name'] + "\nSong: " + hit['result']['full_title'])
hitcount = 0
if len(hits) > 0:
# Get info from top search hit that contains player artist
while hitcount < len(hits) - 1 and not any([x in hits[hitcount]['result']['primary_artist']['name'].lower() for x in artist_array]):
hitcount += 1 # Go to next hit
genius_artist = hits[hitcount]['result']['primary_artist']['name'].lower(
)
genius_song = hits[hitcount]['result']['full_title'].lower()
genius_url = hits[hitcount]['result']['url']
# print('\nGenius Artist: ' + genius_artist + '\nGenius Song: ' + genius_song + '\nGenius URL: ' + genius_url + '\n')
if any([y in genius_artist for y in artist_array]):
# Parse Genius HTML with BeautifulSoup and format lyrics
lyrics = parseAndFormat(genius_url)
# FINAL STEP: Print to touch bar
print(lyrics)
else:
# Print music quote if lyrics not found
printWisdom(song)
else:
printWisdom(song)
return
def getBrowserAndPlayerData():
applescript = '''
on run
if application "Spotify" is running then
tell application "Spotify"
set playerData to {"Spotify", artist of current track, name of current track, player state}
end tell
else if application "Music" is running then
tell application "Music"
set playerData to {"Music", artist of current track, name of current track, player state}
end tell
else
set playerData to {"none", "none", "none", "none"}
end if
if (application "Google Chrome" is running) and (exists (front window of application "Google Chrome")) then
tell application "Google Chrome"
set browserData to {"Chrome", title of active tab of front window}
end tell
else if (application "Safari" is running) and (exists (front window of application "Safari")) then
tell application "Safari"
set browserData to {"Safari", name of current tab of front window}
end tell
else
set browserData to {"none", "none"}
end if
set currentData to {playerData, browserData}
return currentData
end run
'''
return osascript.run(applescript, background=False)
def processBrowserData(browser_data):
browser_artist = browser_song = ""
# Check that tab is a Youtube video
if " - YouTube" in browser_data[1]:
# Remove "Youtube" from title
browser_data[1] = browser_data[1][0:-10]
# Check for music video
if " - " in browser_data[1]:
# Music video likely. Parse for Artist/Song
browser_artist = re.search(
r'^([^\-]+)', browser_data[1]).group(0).strip().lower()
browser_song = re.search(
r'([^\-]+)$', browser_data[1]).group(0).strip().lower()
browser_state = 'playing'
else:
# Music video not likely
browser_state = 'paused'
else:
# Not a Youtube video page
browser_state = 'paused'
return browser_data[0], browser_artist, browser_song, browser_state
def processPlayerData(player_data):
player_type = player_data[0]
# Recombine artist or title that may have been split up if commas in title
player_data = normalizeCommas(player_type, player_data)
player_artist = player_data[1].lower()
player_song = player_data[2].lower()
player_state = player_data[3].lower()
return player_type, player_artist, player_song, player_state
def playerOrBrowser(player_type, player_state, browser_type, browser_state):
if player_state == "playing":
return "player"
elif browser_state == "playing":
return "browser"
else:
return
def normalizeCommas(engine, player_data):
while len(player_data) > 5:
if engine == 'Music': # Music: Combine artists split by comma
player_data[1] = player_data[1] + ', ' + player_data[2]
player_data.pop(2)
else: # Spotify: Combine songs split by comma
player_data[2] = player_data[2] + ', ' + player_data[3]
player_data.pop(3)
return player_data
def cleanSong(songtitle):
# Remove everything after dash
songtitle = re.sub(r' -.*$', '', songtitle)
songtitle = re.sub(r' \(.*\)', '', songtitle) # Remove parentheses
songtitle = re.sub(r' \[.*\]', '', songtitle) # Remove brackets
return songtitle
def multipleArtistCheck(artist):
if '&' in artist:
artist_1 = re.sub(r' \&.*$', '', artist)
artist_2 = re.sub(r'^.*\& ', '', artist)
else:
artist_1 = 'n/a'
artist_2 = 'n/a'
return artist_1, artist_2
def parseAndFormat(url):
source_soup = BeautifulSoup(requests.get(
url).text, 'html.parser') # Parse HTML
# Get text from the lyrics <div>
lyricstext = source_soup.find('div', class_='lyrics').get_text()
# Remove song sections in brackets
lyricstext = re.sub(r'\[.*\n*.*\]', '', lyricstext).strip()
# Remove parentheticals
lyricstext = re.sub(r'\(.*\n*.*\)', '', lyricstext).strip()
while '\n\n' in lyricstext: # Line breaks, flatten, and replace
lyricstext = lyricstext.replace('\n\n', '\n')
lyricstext = lyricstext.replace('\n', ', ').replace('?,', '?').replace('!,', '!').replace(' ,', ',').replace(
' .', '.').replace('.,', '.').replace(',.', '.').replace('...', '..').replace('...', '..').replace(' ', ' ')
return lyricstext
def printWisdom(player_song):
wisdom = [
'\"Music expresses that which cannot be said and on which it is impossible to be silent.\" - Victor Hugo ',
'\"If music be the food of love, play on.\" - William Shakespeare ',
'\"Where words fail, music speaks.\" - Hans Christian Anderson ',
'\"One good thing about music, when it hits you, you feel no pain.\" - Bob Marley ',
'\"And those who were seen dancing were thought to be insane by those who could not hear the music.\" - Nietzsche ',
'\"There is geometry in the humming of the strings, there is music in the spacing of the spheres.\" - Pythagoras ',
'\"You are the music while the music lasts.\" - T. S. Eliot ',
'\"After silence, that which comes nearest to expressing the inexpressible is music.\" - Aldous Huxley '
]
# Hash songname for constant quote when script refires
songhash = hashlib.sha224(player_song.encode('utf-8')).hexdigest()
songhash_int = int(songhash, base=16)
# Reduce hash to within array length
print(wisdom[(songhash_int % (len(wisdom) + 1)) - 1])
if __name__ == '__main__':
main()
| jimu-gh/Kashi | kashi.py | kashi.py | py | 8,841 | python | en | code | 50 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "osascript.run",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number":... |
14029690592 | from django import forms
from . models import Contact
class ContactForm(forms.ModelForm):
name = forms.CharField(label = "",widget = forms.TextInput(attrs={
'class':'form-control',
'placeholder' : 'Full Name',
'required' : 'required',
}))
email = forms.EmailField(label= '',widget = forms.EmailInput(attrs={
'class':'form-control',
'placeholder' : 'Email',
'required' : 'required',
}))
phone = forms.CharField(label='',widget = forms.TextInput(attrs={
'class':'form-control',
'placeholder' : 'Phone',
'required' : 'required',
}))
message = forms.CharField(label='',widget = forms.Textarea(attrs={
'class':'form-control',
'placeholder' : 'Message',
'required' : 'required',
}))
class Meta:
model = Contact
fields = ['name','email','phone','message']
labels = {
'name':'İsim Soyisim',
'email':'eposta',
} | IbrahimFarukInce/Django-Course-App | smartedu_con/pages/forms.py | forms.py | py | 996 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.f... |
72774382185 | import tkinter as tk
from tkinter import messagebox
from pymongo import MongoClient
from UserInterface.GlobalResources.GuiObjectsFactories import \
MessageBox, \
ImagedButtonWithText
from UserInterface.MainMenu.MonitoredLeagues.ManageMonitoredLeagues.ManageMonitoredLeagues import \
manage_monitored_leagues
from UserInterface.MainMenu.MonitoredLeagues.AddLeague.AddLeague import add_new_league
def mon_leagues_window(parent):
# ------------------------------ Data handling functions ------------------------------------------
def call_league_data():
"""
Compile league data required for sub-menus
"""
def retrieve_all_leagues():
client = MongoClient('mongodb+srv://RubenFerreira:TPVXAliOZt3OqFpk@11sixteen.zzyri.mongodb.net/test?')
db = client['football_data']
collection = db['leagues']
return collection
def collection_to_list(collection):
leagues_lst = []
for document in collection.find():
leagues_lst.append(document)
return leagues_lst
# --------- main DB call processes ------------
collection = retrieve_all_leagues()
all_leagues_list = collection_to_list(collection)
return all_leagues_list
# ---------------------- Window sub-menu initialisation functions --------------------------
def manage_mon_lea():
# Clear activities panel
for child in act_panel.winfo_children():
child.destroy()
# Send activities panel to function to populated
manage_monitored_leagues(act_panel, leagues_list)
act_panel.grid(column=1, row=0, padx=10, pady=10, sticky='NW')
def add_new_lea():
# Clear activities panel
for child in act_panel.winfo_children():
child.destroy()
# Send activities panel to function to populated
add_new_league(act_panel, leagues_list)
act_panel.grid(column=1, row=0, padx=10, pady=10, sticky='NW')
# ---------------------------------------- Window management --------------------------------
def on_closing():
MsgBox = messagebox.askquestion('Exit Window',
f'Are you sure you want to close this window - Any unsaved changes will be '
f'lost?',
icon='question')
if MsgBox == 'yes':
mon_league_win.destroy()
# ------------------------------------ Main processes -----------------------------------------------------
if parent is not None:
parent.destroy()
# Window Setup
mon_league_win = tk.Tk()
mon_league_win.title("11Sixteen Database Management Controller - Manage Monitored Leagues")
mon_league_win.geometry("%dx%d+0+0" % (mon_league_win.winfo_screenwidth(), mon_league_win.winfo_screenheight()))
mon_league_win.protocol("WM_DELETE_WINDOW", on_closing)
# ----------------- DB call -------------------------
# Message user of delay (while gathering data from DB)
message_box = MessageBox(mon_league_win)
message_box.place(relx=0.5, rely=0.5, anchor='center')
message_box.update_content(mon_league_win, "Collecting monitored league data - one moment")
leagues_list = call_league_data()
message_box.destroy()
# Object creation
nav_panel = tk.Frame(mon_league_win, borderwidth=1, highlightbackground="black", relief='solid')
nav_messenger = MessageBox(nav_panel, "Sub-menu navigation panel", width=25, height=4,
wraplength=100, justify='center')
act_panel = tk.Frame(mon_league_win, borderwidth=1, highlightbackground="black", relief='solid')
man_mon_lea_btn = ImagedButtonWithText(nav_panel,
'C:\\Users\\rferreira\\GitHub\\11Sixteen\\UserInterface\\GlobalResources\\Images_Icons\\manage_monitored_leagues_icon.png',
"LargeGroove", "Manage Monitored Leagues")
add_lea_btn = ImagedButtonWithText(nav_panel,
'C:\\Users\\rferreira\\GitHub\\11Sixteen\\UserInterface\\GlobalResources\\Images_Icons\\add_league.png',
"LargeGroove", "Add New League")
# Object binding
man_mon_lea_btn.btn.config(command=manage_mon_lea)
add_lea_btn.btn.config(command=add_new_lea)
# Object placement
nav_panel.grid(column=0, row=0, padx=10, pady=10, sticky='NW')
nav_messenger.grid(column=0, row=0, padx=10, pady=10, sticky='N')
act_panel.grid(column=1, row=0, padx=10, pady=10, sticky='NW')
man_mon_lea_btn.frame.grid(column=0, row=1, padx=10, pady=10, sticky='N')
add_lea_btn.frame.grid(column=0, row=2, padx=10, pady=10, sticky='N')
# Main window mainloop
mon_league_win.mainloop()
if __name__ == "__main__":
mon_leagues_window(None)
| SigmaFireFox/SigmaFox | apps/eleven10ths/src/app/11sixteen-desktop-app/UserInterface/MainMenu/MonitoredLeagues/MonitoredLeagues.py | MonitoredLeagues.py | py | 4,954 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "UserInterface.MainMenu.MonitoredLeagues.ManageMonitoredLeagues.ManageMonitoredLeagues.manage_monitored_leagues",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "UserInterface.... |
15062690781 | from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
cloud_config= {
'secure_connect_bundle': 'C:/Users/Damian/Documents/leren-programmeren/python/databasestuff/secure-connect-signup.zip'
}
auth_provider = PlainTextAuthProvider('ckjSgHZotmWyYFbJXRYYcYxU', 'FwJ1SjYdckK26ur43yzeZJQci5uvXzffDF1z31P+E-zBlQFNbNARf.pvEw8YA33A2Q1+XhJOxeq9Y1DqM4n1HK.,_mo2sZ1zTqlQnoeSdnKgm74Z,6BIR70+AKdN.k+J')
cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider)
session = cluster.connect()
row = session.execute("select release_version from system.local").one()
if row:
print(row[0])
else:
print("An error occurred.") | damianslavenburg/leren-programmeren | python/databasestuff/connect_database.py | connect_database.py | py | 668 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cassandra.auth.PlainTextAuthProvider",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cassandra.cluster.Cluster",
"line_number": 9,
"usage_type": "call"
}
] |
16830841390 | #!/usr/bin/env python
# coding: utf-8
# # Random forest DMS
#
# This script runs the random forest model on the data from the differences in fitness effects: deltaS_weak (S_weak - S_opt)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.tree import DecisionTreeRegressor
from IPython.display import HTML
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from statistics import mean
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
from rfpimp import permutation_importances
from sklearn.base import clone
from sklearn import tree
import graphviz
from sklearn.tree import _tree
import sklearn
sklearn.__version__
dataset = pd.read_csv('../../Data/Complete_datasets/dataset_diffNorm_ara0.2_ara0.01_index_differences.txt', sep='\t')
dataset
# Remove stop codons and rows that are not present in the PDB structure
dataset = dataset[(dataset['Residue'] != '*') &
(pd.notna(dataset['Mean_ddG_stab_HET'])) &
(pd.notna(dataset['diffNormScore']))
]
df = dataset.drop(['Entropy','Position','WT_Residue', 'Residue', 'Arabinose',
'Secondary_structure','Solvent_accessibility', 'mean_sel_coeff',
'mean_sel_coeff_2', 'Arabinose_2'],axis=1)
df
X = df.drop(['diffNormScore'],axis=1)
Y = df['diffNormScore']
# Normalize all the features to the range of -1 , 1
X = X / X.max()
## Train a random forest model
X_train,X_test,y_train,y_test =train_test_split(X,Y,test_size=0.2, random_state=100)
model_rf = RandomForestRegressor(n_estimators=500, oob_score=True, random_state=100)
model_rf.fit(X_train, y_train)
pred_train_rf= model_rf.predict(X_train)
print('Mean squared error (train):', np.sqrt(mean_squared_error(y_train,pred_train_rf)))
print('R2 score (train):', r2_score(y_train, pred_train_rf))
pred_test_rf = model_rf.predict(X_test)
print('Mean squared error (test):', np.sqrt(mean_squared_error(y_test,pred_test_rf)))
print('R2 score (test):', r2_score(y_test, pred_test_rf))
## Train the random forest again but adding a random variable
np.random.seed(100)
X['random_var'] = np.random.normal(loc = 0, scale = 1, size = X.shape[0])
X_train,X_test,y_train,y_test =train_test_split(X,Y,test_size=0.2, random_state = 100)
model_rf = RandomForestRegressor(n_estimators=500, oob_score=True, random_state=100)
model_rf.fit(X_train, y_train)
pred_train_rf= model_rf.predict(X_train)
print('Mean squared error (train):', np.sqrt(mean_squared_error(y_train,pred_train_rf)))
print('R2 score (train):', r2_score(y_train, pred_train_rf))
pred_test_rf = model_rf.predict(X_test)
print('Mean squared error (test):', np.sqrt(mean_squared_error(y_test,pred_test_rf)))
print('R2 score (test):', r2_score(y_test, pred_test_rf))
# Use cross-validation on this preliminary model
cross_val_n = 5
print('Five-fold cross validation of the random forest model:')
cross_validations = cross_val_score(estimator = model_rf, X = X_train, y = y_train, cv=cross_val_n , scoring = r2)
print(cross_validations)
print(np.mean(cross_validations), np.std(cross_validations) / np.sqrt(cross_val_n))
print('------')
cross_validations = cross_val_score(estimator = model_rf, X = X_train, y = y_train, cv=5, scoring = r2)
print(cross_validations)
## Check cross-validation accuracy
cross_validations_pred = cross_val_predict(estimator = model_rf, X = X_train, y = y_train, cv=5)
get_ipython().run_line_magic('matplotlib', 'inline')
## Scatterplot of the random forest predictions
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.scatter(cross_validations_pred, y_train)
ax.set_xlabel('Predicted deltaS (random forest, cross-validations)', fontsize = 20)
ax.set_ylabel('Observed deltaS', fontsize = 20)
cross_validations_pred[1, ]
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
plt.show()
# Figures for the accuracy of the predictions and selecting the best variables
get_ipython().run_line_magic('matplotlib', 'inline')
## Scatterplot of the random forest predictions
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.scatter(pred_test_rf, y_test)
ax.set_xlabel('Predicted fitness effects (random forest)', fontsize = 20)
ax.set_ylabel('Observed fitness effects', fontsize = 20)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
plt.show()
## Save the corresponding files for predictions on validation set and test set
## Results with all variables
df_pred_test = pd.DataFrame(list(zip(y_test, pred_test_rf)), columns = ['test_data', 'pred_data'])
df_pred_test.to_csv('../../Data/Random_forest_results/diffNorm_ara0.2_ara0.01/pred_rf_allVariables.txt', sep = '\t')
## Results of predictions in the cross-validation
df_pred_crossval = pd.DataFrame(list(zip(y_train, cross_validations_pred)), columns = ['test_data', 'pred_data'])
df_pred_crossval.to_csv('../../Data/Random_forest_results/diffNorm_ara0.2_ara0.01/crossval_rf_allVariables.txt', sep = '\t')
# ## Feature selection
# Define a function to use permutation to estimate relative importances
def r2(rf, X_train, y_train):
return r2_score(y_train, rf.predict(X_train))
# Use permutation to estimate relative importances
perm_imp_rfpimp = permutation_importances(model_rf, X_train, y_train, r2)
get_ipython().run_line_magic('matplotlib', 'inline')
fig = plt.figure()
ax = fig.add_axes([0,0,1,3])
ax.barh(list(perm_imp_rfpimp.index), perm_imp_rfpimp['Importance'])
ax.set_xlabel('Relative importance', fontsize = 20)
ax.set_ylabel('Feature', fontsize = 20)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
plt.show()
best_features = perm_imp_rfpimp[perm_imp_rfpimp['Importance'] >= perm_imp_rfpimp['Importance']['random_var']]
best_features
new_X = X[list(best_features.index)]
# Train a new random forest with the selected variables and the random variable
X_train,X_test,y_train,y_test =train_test_split(new_X,Y,test_size=0.2, random_state = 100)
model_rf = RandomForestRegressor(n_estimators=500, oob_score=True, random_state=100)
model_rf.fit(X_train, y_train)
pred_train_rf= model_rf.predict(X_train)
print('Mean squared error (train):', np.sqrt(mean_squared_error(y_train,pred_train_rf)))
print('R2 score (train):', r2_score(y_train, pred_train_rf))
pred_test_rf = model_rf.predict(X_test)
print('Mean squared error (test):', np.sqrt(mean_squared_error(y_test,pred_test_rf)))
print('R2 score (test):', r2_score(y_test, pred_test_rf))
get_ipython().run_line_magic('matplotlib', 'inline')
## Scatterplot of the random forest predictions
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.scatter(pred_test_rf, y_test)
ax.set_xlabel('Predicted fitness effects (random forest)', fontsize = 20)
ax.set_ylabel('Observed fitness effects', fontsize = 20)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
plt.show()
# Cross-validation
cross_val_n = 5
print('Five-fold cross validation of the random forest model:')
cross_validations = cross_val_score(estimator = model_rf, X = X_train, y = y_train, cv=cross_val_n, scoring = r2)
print(cross_validations)
print(np.mean(cross_validations), np.std(cross_validations) / np.sqrt(cross_val_n))
print('------')
cross_validations_pred = cross_val_predict(estimator = model_rf, X = X_train, y = y_train, cv=cross_val_n)
get_ipython().run_line_magic('matplotlib', 'inline')
## Scatterplot of the random forest predictions
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.scatter(cross_validations_pred, y_train)
ax.set_xlabel('Predicted deltaS (random forest, cross-validations)', fontsize = 20)
ax.set_ylabel('Observed deltaS', fontsize = 20)
cross_validations_pred[1, ]
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
plt.show()
# Since this is a simpler model, we can test relative importance by leaving one
# variable out and retraining
# Function from https://explained.ai/rf-importance/index.html#intro
def dropcol_importances(rf, X_train, y_train):
rf_ = clone(rf)
rf_.random_state = 100
rf_.fit(X_train, y_train)
baseline = rf_.oob_score_
imp = []
for col in X_train.columns:
X = X_train.drop(col, axis=1)
rf_ = clone(rf)
rf_.random_state = 100
rf_.fit(X, y_train)
o = rf_.oob_score_
imp.append(baseline - o)
imp = np.array(imp)
I = pd.DataFrame(
data={'Feature':X_train.columns,
'Importance':imp})
I = I.set_index('Feature')
I = I.sort_values('Importance', ascending=True)
return I
importances_drop_col = dropcol_importances(model_rf, X_train, y_train)
get_ipython().run_line_magic('matplotlib', 'inline')
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.barh(list(importances_drop_col.index), importances_drop_col['Importance'])
ax.set_xlabel('Relative importance', fontsize = 16)
ax.set_ylabel('Feature', fontsize = 16)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(12)
plt.show()
### Save the tables
## Relative importances permutation (all)
perm_imp_rfpimp.to_csv('../../Data/Random_forest_results/model_diffFit_permImportances_allVariables.txt', sep = '\t')
## Predictions for test set(best variables)
df_pred_test_best = pd.DataFrame(list(zip(y_test, pred_test_rf)), columns = ['test_data', 'pred_data'])
df_pred_test_best.to_csv('../../Data/Random_forest_results/pred_rf_bestVariables.txt', sep = '\t')
## Predictions for cross-validation (best variables)
df_pred_crossval_best = pd.DataFrame(list(zip(y_train, cross_validations_pred)), columns = ['test_data', 'pred_data'])
df_pred_crossval_best.to_csv('../../Data/Random_forest_results/crossval_rf_bestVariables.txt', sep = '\t')
## Relative importances drop column (best variables)
importances_drop_col.to_csv('../../Data/Random_forest_results/model_diffFit_dropCol_bestVariables.txt', sep = '\t')
| Landrylab/DfrB1_DMS_2022 | Scripts/Random_forest/Random_forest_DfrB1_DMS.py | Random_forest_DfrB1_DMS.py | py | 10,422 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.__version__",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pandas.notna",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pandas.notna",
... |
36132831318 | """
Dialog for editing first arrivals.
"""
from PyQt5 import QtCore, QtGui, QtWidgets
import numpy as np
import pyqtgraph as qtg
class FirstArrivalDlg(QtWidgets.QDialog):
def __init__(self, measurement, genie, parent=None):
super().__init__(parent)
self._measurement = measurement
self.genie = genie
self._sampling_rate = self._measurement.data["data"][0].stats.sampling_rate
title = "First arrival editor - source: {}, file: {}".format(self._measurement.source_id, self._measurement.file)
self.setWindowTitle(title)
grid = QtWidgets.QGridLayout(self)
# plot axis wiget
qtg.setConfigOptions(background="w", foreground="k")
graphic_axis_wiget = qtg.GraphicsLayoutWidget(self)
plot = graphic_axis_wiget.addPlot(enableMenu=False)
plot.setLabel('left', "")
plot.setMouseEnabled(False, False)
x_max = len(self._measurement.data["data"][0].data) / self._sampling_rate
plot.setXRange(0, x_max * 1.001, padding=0)
plot.getAxis('bottom').setStyle(showValues=False)
plot.getAxis('bottom').hide()
plot.getAxis('left').setStyle(showValues=False)
plot.getAxis('left').setHeight(0)
plot.hideButtons()
plot.setLabel('top', "Time", units='s')
plot.getAxis('top').setStyle(showValues=True)
plot.setLabel('left', " ")
scroll = QtWidgets.QScrollArea()
scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
scroll.verticalScrollBar().setVisible(False)
scroll.setWidgetResizable(True)
grid.addWidget(scroll, 0, 0, 1, 6)
sw=QtWidgets.QWidget()
sw.setMaximumHeight(80)
scroll.setMaximumHeight(85)
scroll.setMinimumHeight(85)
scroll.setWidget(sw)
hbox = QtWidgets.QHBoxLayout()
sw.setLayout(hbox)
label = QtWidgets.QLabel("Use")
label.setMinimumWidth(30)
label.setMaximumWidth(30)
hbox.addWidget(label)
hbox.addWidget(graphic_axis_wiget)
# plot wiget
self._graphic_wiget = qtg.GraphicsLayoutWidget(self)
self._plot_list = []
self._line_list = []
self._line_auto_list = []
self._checkbox_list = []
scroll = QtWidgets.QScrollArea()
scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
scroll.setWidgetResizable(True)
grid.addWidget(scroll, 1, 0, 3, 6)
sw=QtWidgets.QWidget()
scroll.setWidget(sw)
hbox = QtWidgets.QHBoxLayout()
sw.setLayout(hbox)
self._vbox = QtWidgets.QVBoxLayout()
hbox.addLayout(self._vbox)
hbox.addWidget(self._graphic_wiget)
lay = QtWidgets.QHBoxLayout()
label = QtWidgets.QLabel("Auto")
label.setStyleSheet("QLabel { color: #00ff00;}")
lay.addWidget(label)
label = QtWidgets.QLabel("Manual")
label.setStyleSheet("QLabel { color : blue; }")
lay.addWidget(label)
lay.addStretch()
grid.addLayout(lay, 6, 0)
self._close_button = QtWidgets.QPushButton("Close", self)
self._close_button.clicked.connect(self.reject)
grid.addWidget(self._close_button, 6, 5)
self.setLayout(grid)
self.setMinimumSize(500, 250)
self.resize(1000, 800)
self._create_plot()
def _create_plot(self):
row = 0
meas = self._measurement
data = meas.data["data"]
for i in range(meas.channel_start - 1, meas.channel_start + abs(meas.receiver_stop - meas.receiver_start)):
row += 1
trace = data[i]
inc = 1 if meas.receiver_stop > meas.receiver_start else -1
title = "receiver: {}".format(meas.receiver_start + i * inc)
plot = self._graphic_wiget.addPlot(row=row, col=1, enableMenu=False)
plot.setLabel('left', title)
plot.setMouseEnabled(False, False)
self._plot_list.append(plot)
checkbox = QtWidgets.QCheckBox()
checkbox.setMinimumSize(30, 150)
checkbox.setMaximumWidth(30)
self._checkbox_list.append(checkbox)
self._vbox.addWidget(checkbox)
x_max = len(trace.data) / self._sampling_rate
x = np.linspace(0, x_max, len(trace.data))
y = trace.data / np.max(np.abs(trace.data))
plot.plot(x, y, pen="r")
plot.setXRange(0, x_max * 1.001, padding=0)
plot.setYRange(-1, 1, padding=0)
plot.getAxis('bottom').setStyle(showValues=False)
plot.getAxis('left').setStyle(showValues=False)
plot.showGrid(x=True, y=True)
plot.hideButtons()
# cross hair auto
vLineAuto = qtg.InfiniteLine(angle=90, movable=False, pen=qtg.mkPen(qtg.mkColor("g")))
self._line_auto_list.append(vLineAuto)
plot.addItem(vLineAuto, ignoreBounds=True)
# cross hair
vLine = qtg.InfiniteLine(angle=90, movable=True, pen=qtg.mkPen(qtg.mkColor("b")))
self._line_list.append(vLine)
plot.addItem(vLine, ignoreBounds=True)
fa = self._find_fa(i)
if fa is not None:
if fa.verified:
t = fa.time
else:
t = -0.1
vLine.setPos(t)
checkbox.setChecked(fa.use)
vLineAuto.setPos(fa.time_auto)
if self._plot_list:
self._plot_list[0].scene().sigMouseClicked.connect(self.mouseClickEvent)
#plot.setLabel('bottom', "Time", units='s')
#plot.getAxis('bottom').setStyle(showValues=True)
self._graphic_wiget.setMinimumSize(100, 150 * row)
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.RightButton:
for i, plot in enumerate(self._plot_list):
if plot.sceneBoundingRect().contains(ev.scenePos()):
self._line_list[i].setPos(self._line_auto_list[i].getPos()[0])
ev.accept()
break
def _find_fa(self, channel):
for fa in self.genie.current_inversion_cfg.first_arrivals:
if fa.file == self._measurement.file and fa.channel == channel:
return fa
return None
def reject(self):
for i, vLine in enumerate(self._line_list):
fa = self._find_fa(i)
if fa is not None:
line_pos = float(vLine.getPos()[0])
if line_pos > 0:
fa.time = line_pos
fa.verified = True
else:
fa.time = 0.0
fa.verified = False
fa.use = self._checkbox_list[i].isChecked()
super().reject()
| GeoMop/Genie | src/genie/ui/dialogs/first_arrival_dialog.py | first_arrival_dialog.py | py | 6,825 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QGridLayout",
"line_number": 23,
"usage_type": "call"
},
{
"api_name... |
1828127496 | import json
import os
from pathlib import Path
from typing import List, Tuple
import numpy as np
from arch import arch_model
from systems_util import get_futures_list, get_settings, normalize_weights
def myTradingSystem(DATE: List[int], CLOSE: np.ndarray, settings) -> Tuple[np.ndarray, dict]:
print(f"Predicting for: {DATE[-1]}")
CLOSE = np.transpose(CLOSE)[1:]
log_return = np.diff(np.log(CLOSE))
positions: List[int] = []
storage_dir = Path(os.path.dirname(__file__)).parent / "../models/garch/correlation.txt"
with open(storage_dir) as f:
cor_dict = json.load(f)
for index, ticker in enumerate(settings["markets"]):
if ticker == "CASH":
positions.append(0)
continue
print(f"Predicting for: {ticker}")
params_dir = Path(os.path.dirname(__file__)).parent / f"../models/garch/params/{ticker}_params.txt"
ticker_returns = log_return[:, index-1]
with open(params_dir) as f:
params = json.load(f)
p = params['p']
q = params['q']
fixed_params = params['params']
model = arch_model(ticker_returns * 10 , p=p, q=q)
fixed_model = model.fix(fixed_params)
forecast_vol = fixed_model.forecast()
var = forecast_vol.variance.iloc[-1:]['h.1']
# flip the inequality signs lol
# if (cor_dict[ticker] > 0.03)
"""
if (float(np.sqrt(var)) > np.std(ticker_returns)):
positions.append(1)
elif (float(np.sqrt(var)) < np.std(ticker_returns)):
positions.append(-1)
else:
positions.append(0)
"""
if (cor_dict[ticker] < 0.3):
if (float(np.sqrt(var)) > np.std(ticker_returns)):
positions.append(1)
else:
positions.append(0)
elif (cor_dict[ticker] > 0.3):
if (float(np.sqrt(var)) > np.std(ticker_returns)):
positions.append(-1)
else:
positions.append(0)
else:
positions.append(0)
positions = normalize_weights(weights=positions)
return positions, settings
def mySettings():
settings = get_settings()
futures = get_futures_list(filter_insignificant_lag=2)
futures_list = ["F_AD", "F_ES"]
settings["markets"] = ["CASH", *futures]
return settings
if __name__ == '__main__':
import quantiacsToolbox
results = quantiacsToolbox.runts(__file__)
| weixue123/quantiacs_algo_trading | systems/garch_system.py | garch_system.py | py | 2,506 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.transpose",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line... |
15947746512 | '''
用于将dea模型中的构件拆分到单独的dae文件中
'''
def process(dae_path, base_output_dae_path):
'''
将dae文件中的一个个构件分开到多个dae文件中
'''
import xml.dom.minidom
import os
import time
if not os.path.exists(dae_path):
print('路径%s不存在' % dae_path)
# 文件夹路径
output_dir = base_output_dae_path + '\\' + dae_path[dae_path.rfind('\\') + 1: dae_path.find('.')]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
doc = xml.dom.minidom.parse(dae_path)
impl = xml.dom.minidom.getDOMImplementation()
# 获取基础的Tag
asset_tag = doc.getElementsByTagName('asset')[0]
effect_tags = doc.getElementsByTagName('effect')
material_tags = doc.getElementsByTagName('material')
geometry_tags = doc.getElementsByTagName('geometry')
node_tags = doc.getElementsByTagName('node')
scene_tag = doc.getElementsByTagName('scene')[0]
print('effect_tags: ', len(effect_tags))
print('material_tags: ', len(material_tags))
print('geometry_tags: ', len(geometry_tags))
print('node_tags: ', len(node_tags))
# 遍历所有的node
count = 0
for node in node_tags:
# 新建一个dae文档对象
new_doc = impl.createDocument(None, 'COLLADA', None)
# 获取新建dae文档对象的根节点COLLADA节点
new_doc_root = new_doc.documentElement
new_doc_root.setAttribute('xmlns', 'http://www.collada.org/2005/11/COLLADASchema')
new_doc_root.setAttribute('version', '1.4.1')
# 将asset节点添加到新建的dae文档对象中
new_doc_root.appendChild(asset_tag)
# 将当前node节点添加到新建的dae文档对象中
library_visual_scenes = new_doc.createElement('library_visual_scenes')
visual_scene = new_doc.createElement('visual_scene')
visual_scene.setAttribute('id', 'IfcOpenShell')
visual_scene.appendChild(node)
library_visual_scenes.appendChild(visual_scene)
new_doc_root.appendChild(library_visual_scenes)
# 获取当前node对应的geometry的id
instance_geometry = node.getElementsByTagName('instance_geometry')[0]
geometry_id = instance_geometry.getAttribute('url')[1:]
geometrys = getElementsById(geometry_tags, geometry_id)
# 将当前geometry节点添加到新建的dae文档对象中
library_geometries = new_doc.createElement('library_geometries')
for geometry in geometrys:
library_geometries.appendChild(geometry)
new_doc_root.appendChild(library_geometries)
# 将material节点和effect节点的父节点
library_materials = new_doc.createElement('library_materials')
library_effects = new_doc.createElement('library_effects')
instance_materials = node.getElementsByTagName('instance_material')
for instance_material in instance_materials:
material_id = instance_material.getAttribute('target')[1:]
materials = getElementsById(material_tags, material_id)
for material in materials:
library_materials.appendChild(material)
instance_effect = material.getElementsByTagName('instance_effect')[0]
effect_id = instance_effect.getAttribute('url')[1:]
effects = getElementsById(effect_tags, effect_id)
for effect in effects:
library_effects.appendChild(effect)
# 将material节点和effect节点添加到新建的dae文档对象中
new_doc_root.appendChild(library_materials)
new_doc_root.appendChild(library_effects)
# 将scene节点添加到新建的dae文档对象中
new_doc_root.appendChild(scene_tag)
# encoding='utf-8'很重要,解决了编码问题
output_file = output_dir + '\\' + dae_path[dae_path.rfind('\\') + 1: dae_path.find('.')] + '-' + geometry_id + '-' + str(count) + '.dae'
with open(output_file, mode='w', encoding='utf-8') as f:
print('start writing...')
print(count)
new_doc.writexml(f, addindent='', newl='', encoding='utf-8')
print('done writing...')
print('#'*100)
count += 1
print('current count', count)
print('-'*20)
def getElementsById(doms, id):
'''
根据id从查找相关dom
'''
result = list()
for dom in doms:
dom_id = dom.getAttribute('id')
if dom_id == id:
result.append(dom)
return result
if __name__ == '__main__':
dae_path = 'C:\\Users\\dell\\Desktop\\Lab.dae'
base_output_dae_path = 'C:\\Users\\dell\\Desktop'
# 1iUTeNLx945xp4Fd_hV3Bb存在两次
# dae_path = input('请输入dae文件路径: ')
# base_output_dae_path = input('请输入生成文件所在的路径: ')
process(dae_path, base_output_dae_path) | XinJack/UsefulScripts | daeProcessor_python/daeProcessor.py | daeProcessor.py | py | 4,418 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
71193874663 | #Importing the Dependencies
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import os
#reading data folder and assigning to classes
data_file = r"data"
classes = ["with_mask", "without_mask"]
data = []
labels = []
for c in classes:
path = os.path.join(data_file, c)
for i in os.listdir(path):
image_path = os.path.join(path, i)
img = load_img(image_path, target_size=(224, 224))
img = img_to_array(img)
image_array = preprocess_input(img)
data.append(image_array)
labels.append(c)
# binarizing labels
binarizer = LabelBinarizer() #creating an instance of LabelBinarizer
binary_labels = binarizer.fit_transform(labels)
labels = to_categorical(binary_labels)
#converting image_array and labels to numpy array
X = np.array(data, dtype="float32")
y = np.array(labels)
#splitting the data and labels
(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size = 0.2, stratify = labels, random_state = 42)
#generating batches of image data with data augmentation
aug = ImageDataGenerator(rotation_range = 20,
zoom_range = 0.15,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.15,
horizontal_flip = True)
#transfer learning using MobileNet architecture
base_model = MobileNetV2(include_top = False, input_tensor = Input(shape = (224, 224, 3)))
head_model = base_model.output
head_model = AveragePooling2D(pool_size=(7, 7))(head_model)
head_model = Flatten(name="flatten")(head_model)
head_model = Dense(128, activation="relu")(head_model)
head_model = Dropout(0.5)(head_model)
head_model = Dense(2, activation="softmax")(head_model)
model = Model(inputs=base_model.input, outputs=head_model)
#keeping parameters of base_model layers fixed
for layer in base_model.layers:
layer.trainable = False
print("Compiling model...")
#compiling the model
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate= 0.0001),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
print("Model compiled.")
#training the model
print("Training head...")
hist = model.fit(
aug.flow(X_train, y_train, batch_size=32),
steps_per_epoch=len(X_train) // 32,
validation_data=(X_test, y_test),
validation_steps=len(X_test) // 32,
epochs=20)
#evaluating the model
print("Evaluating model...")
pred_prob = model.predict(X_test, batch_size=32)
pred_id = np.argmax(pred_prob, axis=1)
print(classification_report(y_test.argmax(axis=1), pred_id, target_names=binarizer.classes_))
#saving the model
print("Saving mask detector model...")
model.save("mask_detector2.model", save_format="h5")
#plotting the training loss and accuracy against epoch
epochs = 20
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), hist.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), hist.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), hist.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), hist.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig("plot.png") | OmololaOkebiorun/Face_Mask_Detection | training.py | training.py | py | 4,037 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number"... |
26446464278 | from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from .forms import SignUpForm, ProfileForm, LoginForm
from django.shortcuts import render, redirect
from .models import Profile, Search
from product.models import Product
import json
def signup_view(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
# We use the new user credentials to log him in
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=password)
login(request, user)
# We make a new profile when a new user is created
profile = Profile()
profile.user = request.user
profile.save()
return redirect('home')
else:
form = SignUpForm()
return render(request, 'user/signup.html', {'form': form})
def login_view(request):
if request.method == 'POST':
form = LoginForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
login(request, user)
return redirect('home')
else:
form = LoginForm()
return render(request, 'user/login.html', {'form': form})
@login_required
def profile(request):
try:
profile = Profile.objects.get(user=request.user)
except Profile.DoesNotExist:
# We make a new profile if it doesn't exist for the current user
profile = Profile(user=request.user).save()
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES, instance=profile)
if form.is_valid():
form_profile = form.save(commit=False)
# We set the profile info
request.user.username = request.POST['username']
request.user.first_name = request.POST['first_name']
request.user.last_name = request.POST['last_name']
request.user.email = request.POST['email']
request.user.save()
form_profile.user = request.user
# If the user sets a profile picture
if 'image' in request.FILES:
image = request.FILES['image']
if image:
form_profile.image = image
form_profile.save()
return redirect('profile')
# When the user opens the profile page all the fields are already filled out
form = ProfileForm()
form.fields['image'].label = 'Mynd'
form.fields['username'].initial = request.user.username
form.fields['first_name'].initial = request.user.first_name
form.fields['last_name'].initial = request.user.last_name
form.fields['email'].initial = request.user.email
return render(request, 'user/profile.html', {
'form': form,
'image': profile.image.url
})
def __get_product_list(user_id):
# Get a list of all the ids for the products the current user has viewed
search = Search.objects.filter(profile__user_id=user_id).order_by('-date_of_search').values_list('product_id',
flat=True)
product_list = []
# Get each product by id and add to the products_list
for i in search:
curr_product = Product.objects.get(id=i)
product_list.append(curr_product)
return product_list
@login_required
def viewed_products(request):
return render(request, 'user/viewed_products.html', {
# Limit the list to the last 10 products viewed
'products': __get_product_list(request.user.id)[:10]
})
# Utilized for viewed products, each view for a user gets appended to the list
search_list = []
def add_to_search(request):
if request.method == 'POST':
if request.user.is_authenticated:
try:
user_profile = Profile.objects.get(user__id=request.user.id)
# The user has no profile so we create one
except Profile.DoesNotExist:
user_profile = Profile()
user_profile.user = request.user
user_profile.save()
# We make a new search instance and use the profile of the user
search = Search()
search.profile = user_profile
# Get the id for the viewed product
try:
product_id = json.loads(request.body)
except json.JSONDecodeError:
return send_json('JSON was invalid', 400)
# If the user has viewed the product, then we don't add it again to the search_list
if (user_profile.id, product_id) in search_list:
return send_json('The search already exists', 409)
elif Search.objects.filter(profile__id=user_profile.id, product__id=product_id).exists():
search_list.remove((user_profile.id, product_id))
return send_json('The search already exists', 409)
else:
search_list.append((user_profile.id, product_id))
try:
product = Product.objects.get(id=product_id)
except Product.DoesNotExist:
return send_json('The viewed product was not found', 404)
search.product = product
search.save()
return send_json('', 201, product_id)
else:
return JsonResponse({'data': request.GET})
else:
return send_json('Request method not supported', 400)
def send_json(message, status_code, data={}):
response = JsonResponse({
'data': data,
'message': message
})
response.status_code = status_code
return response
| RunarVestmann/verklegtnamskeid2 | captain_console/user/views.py | views.py | py | 6,004 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "forms.SignUpForm",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 20,
"usage_type": "call"
},
{
"api_n... |
11604655493 | import ipaddress
import json
import os
import uuid
import encryption_helper
import phantom.app as phantom
import phantom.rules as ph_rules
import phantom.utils as ph_utils
import requests
import xmltodict
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
from requests.auth import AuthBase, HTTPBasicAuth
from requests.structures import CaseInsensitiveDict
import ews_soap
from ewsonprem_consts import *
try:
from urllib.parse import quote_plus, urlparse
except ImportError:
from urllib import quote_plus
from urlparse import urlparse
import base64
import email
import quopri
import re
import time
from datetime import datetime, timedelta
from email.header import decode_header
from email.parser import HeaderParser
import outlookmsgfile
import six
from bs4 import BeautifulSoup, UnicodeDammit
from process_email import ProcessEmail
from request_handler import RequestStateHandler # noqa
from request_handler import _get_dir_name_from_app_name
app_dir = os.path.dirname(os.path.abspath(__file__))
os.sys.path.insert(0, '{}/dependencies/ews_dep'.format(app_dir)) # noqa
class RetVal3(tuple):
def __new__(cls, val1, val2=None, val3=None):
return tuple.__new__(RetVal3, (val1, val2, val3))
class RetVal2(tuple):
def __new__(cls, val1, val2=None):
return tuple.__new__(RetVal2, (val1, val2))
class OAuth2TokenAuth(AuthBase):
def __init__(self, token, token_type="Bearer"):
self._token = token
self._token_type = token_type
def __call__(self, r):
# modify and return the request
r.headers['Authorization'] = "{0} {1}".format(self._token_type, self._token)
return r
class EWSOnPremConnector(BaseConnector):
# actions supported by this script
ACTION_ID_RUN_QUERY = "run_query"
ACTION_ID_DELETE_EMAIL = "delete_email"
ACTION_ID_UPDATE_EMAIL = "update_email"
ACTION_ID_COPY_EMAIL = "copy_email"
ACTION_ID_MOVE_EMAIL = "move_email"
ACTION_ID_BLOCK_SENDER = "block_sender"
ACTION_ID_UNBLOCK_SENDER = "unblock_sender"
ACTION_ID_EXPAND_DL = "expand_dl"
ACTION_ID_RESOLVE_NAME = "resolve_name"
ACTION_ID_ON_POLL = "on_poll"
ACTION_ID_GET_EMAIL = "get_email"
ACTION_ID_TRACE_EMAIL = "trace_email"
REPLACE_CONST = "C53CEA8298BD401BA695F247633D0542" # pragma: allowlist secret
def __init__(self):
""" """
self.__id_to_name = {}
# Call the BaseConnectors init first
super(EWSOnPremConnector, self).__init__()
self._session = None
# Target user in case of impersonation
self._target_user = None
self._state_file_path = None
self._state = {}
self._headers = None
self._base_url = None
self._host = None
self._impersonate = False
self._less_data = False
self._dup_data = 0
self._is_token_test_connectivity = False
self._is_client_id_changed = False
self._timeout = None
self.auth_type = None
self.rsh = None
self._skipped_emails = 0
def _handle_preprocess_scipts(self):
config = self.get_config()
script = config.get('preprocess_script')
self._preprocess_container = lambda x: x
if script:
try: # Try to load in script to preprocess artifacts
import importlib.util
preprocess_methods = importlib.util.spec_from_loader('preprocess_methods', loader=None)
self._script_module = importlib.util.module_from_spec(preprocess_methods)
exec(script, self._script_module.__dict__)
except Exception as e:
self.save_progress("Error loading custom script. Error: {}".format(str(e)))
return self.set_status(phantom.APP_ERROR, EWSONPREM_CONNECTIVITY_TEST_ERROR)
try:
self._preprocess_container = self._script_module.preprocess_container
except Exception:
self.save_progress("Error loading custom script. Does not contain preprocess_container function")
return self.set_status(phantom.APP_ERROR, EWSONPREM_CONNECTIVITY_TEST_ERROR)
return phantom.APP_SUCCESS
def _get_ping_fed_request_xml(self, config):
try:
dt_now = datetime.utcnow()
dt_plus = dt_now + timedelta(minutes=10)
dt_now_str = "{0}Z".format(dt_now.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3])
dt_plus_str = "{0}Z".format(dt_plus.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3])
ret_val = EWS_FED_REQUEST_XML.format(
ping_url=config[EWS_JSON_FED_PING_URL].split('?')[0],
created_date=dt_now_str,
expiry_date=dt_plus_str,
username=config[phantom.APP_JSON_USERNAME],
password=config[phantom.APP_JSON_PASSWORD]
)
except Exception as e:
return None, "Unable to create request xml data. Error: {0}".format(self._get_error_message_from_exception(e))
return ret_val, "Done"
def _set_federated_auth(self, config):
ret_val, message = self._check_password(config)
if phantom.is_fail(ret_val):
self.save_progress(message)
return None, message
required_params = [EWS_JSON_CLIENT_ID, EWS_JSON_FED_PING_URL, EWS_JSON_AUTH_URL, EWS_JSON_FED_VERIFY_CERT]
for required_param in required_params:
if required_param not in config:
return None, "ERROR: {0} is a required parameter for Azure/Federated Authentication, please specify one.".format(required_param)
client_id = config[EWS_JSON_CLIENT_ID]
# create the xml request that we need to send to the ping fed
fed_request_xml, message = self._get_ping_fed_request_xml(config)
if fed_request_xml is None:
return None, message
# Now create the request to the server
headers = {'Content-Type': 'application/soap_xml; charset=utf8'}
url = config[EWS_JSON_FED_PING_URL]
# POST the request
try:
r = requests.post(
url,
data=fed_request_xml,
headers=headers,
verify=config[EWS_JSON_FED_VERIFY_CERT],
timeout=self._timeout
)
except Exception as e:
return None, "Unable to send POST to ping url: {0}, Error: {1}".format(url, self._get_error_message_from_exception(e))
if r.status_code != 200:
return None, "POST to ping url failed. Status Code: {0}".format(r.status_code)
# process the xml response
xml_response = r.text
start_pos = xml_response.find('<saml:Assertion')
end_pos = xml_response.find('</saml:Assertion>') + len('</saml:Assertion>')
# validate that the saml assertion is present
if start_pos == -1 or end_pos == -1:
return None, "Could not find Saml Assertion"
saml_assertion = xml_response[start_pos:end_pos]
# base64 encode the assertion
saml_assertion_encoded = base64.encodebytes(saml_assertion.encode('utf8'))
# Now work on sending th assertion, to get the token
url = '{0}/oauth2/token'.format(config[EWS_JSON_AUTH_URL])
# headers
client_req_id = str(uuid.uuid4())
headers = {'Accept': 'application/json', 'client-request-id': client_req_id, 'return-client-request-id': 'True'}
# URL
parsed_auth_url = urlparse(self._base_url)
# Form data
data = {
'resource': '{0}://{1}'.format(parsed_auth_url.scheme, parsed_auth_url.netloc),
'client_id': client_id,
'grant_type': 'urn:ietf:params:oauth:grant-type:saml1_1-bearer',
'assertion': saml_assertion_encoded,
'scope': 'openid'
}
try:
r = requests.post(url, data=data, headers=headers, timeout=self._timeout)
except Exception as e:
return None, "Failed to acquire token. POST request failed for {0}, Error: {1}".format(
url, self._get_error_message_from_exception(e))
if r.status_code != 200:
return None, "POST to office365 url failed. Status Code: {0}".format(r.status_code)
resp_json = None
try:
resp_json = r.json()
except Exception as e:
return None, "Unable to parse auth token response as JSON. Error: {0}".format(self._get_error_message_from_exception(e))
if 'token_type' not in resp_json:
return None, "token_type not found in response from server"
if 'access_token' not in resp_json:
return None, "token not found in response from server"
self.save_progress("Got Access Token")
return OAuth2TokenAuth(resp_json['access_token'], resp_json['token_type']), ""
def _make_rest_calls_to_phantom(self, action_result, url):
# Ignored the verify semgrep check as the following is a call to the phantom's REST API on the instance itself
r = requests.get(url, verify=False) # nosemgrep
if not r:
message = 'Status Code: {0}'.format(r.status_code)
if r.text:
message = "{} Error from Server: {}".format(message, r.text.replace('{', '{{').replace('}', '}}'))
return action_result.set_status(phantom.APP_ERROR, "Error retrieving system info, {0}".format(message)), None
try:
resp_json = r.json()
except Exception as e:
return action_result.set_status(phantom.APP_ERROR, "Error processing response JSON", e), None
return phantom.APP_SUCCESS, resp_json
def _get_phantom_base_url_ews(self, action_result):
ret_val, resp_json = self._make_rest_calls_to_phantom(action_result, '{}rest/system_info'.format(self.get_phantom_base_url()))
if phantom.is_fail(ret_val):
return action_result.get_status(), None
phantom_base_url = resp_json.get('base_url')
if not phantom_base_url:
return action_result.set_status(
phantom.APP_ERROR, "Phantom Base URL is not configured, please configure it in System Settings"), None
phantom_base_url = phantom_base_url.strip("/")
return phantom.APP_SUCCESS, phantom_base_url
def _get_asset_name(self, action_result):
ret_val, resp_json = self._make_rest_calls_to_phantom(
action_result, '{}rest/asset/{}'.format(self.get_phantom_base_url(), self.get_asset_id()))
if phantom.is_fail(ret_val):
return action_result.get_status(), None
asset_name = resp_json.get('name')
if not asset_name:
return action_result.set_status(phantom.APP_ERROR, "Error retrieving asset name"), None
return phantom.APP_SUCCESS, asset_name
def _get_url_to_app_rest(self, action_result=None):
if not action_result:
action_result = ActionResult()
# get the phantom ip to redirect to
ret_val, phantom_base_url = self._get_phantom_base_url_ews(action_result)
if phantom.is_fail(ret_val):
return action_result.get_status(), action_result.get_message()
# get the asset name
ret_val, asset_name = self._get_asset_name(action_result)
if phantom.is_fail(ret_val):
return action_result.get_status(), action_result.get_message()
self.save_progress('Using Phantom base URL as: {0}'.format(phantom_base_url))
app_json = self.get_app_json()
app_name = app_json['name']
app_dir_name = _get_dir_name_from_app_name(app_name)
url_to_app_rest = "{0}/rest/handler/{1}_{2}/{3}".format(phantom_base_url, app_dir_name, app_json['appid'], asset_name)
return phantom.APP_SUCCESS, url_to_app_rest
def _azure_int_auth_initial(self, client_id, client_secret):
state = self.rsh.load_state()
asset_id = self.get_asset_id()
ret_val, message = self._get_url_to_app_rest()
if phantom.is_fail(ret_val):
return None, message
app_rest_url = message
request_url = 'https://login.microsoftonline.com/common/oauth2'
proxy = {}
if 'HTTP_PROXY' in os.environ:
proxy['http'] = os.environ.get('HTTP_PROXY')
if 'HTTPS_PROXY' in os.environ:
proxy['https'] = os.environ.get('HTTPS_PROXY')
state['proxy'] = proxy
state['client_id'] = client_id
state['redirect_url'] = app_rest_url
state['request_url'] = request_url
# This handling is for the python version 3, working fine with both the python version 2 and 3
client_secret = client_secret.encode('ascii')
client_secret = base64.b64encode(client_secret)
state['client_secret'] = client_secret.decode('ascii')
self.rsh.save_state(state)
self.save_progress("Redirect URI: {}".format(app_rest_url))
params = {
'response_type': 'code',
'response_mode': 'query',
'client_id': client_id,
'state': asset_id,
'redirect_uri': app_rest_url
}
url = requests.Request('GET', '{}/authorize'.format(request_url), params=params).prepare().url
url = '{}&'.format(url)
self.save_progress("To continue, open this link in a new tab in your browser")
self.save_progress(url)
for _ in range(0, 60):
time.sleep(5)
state = self.rsh.load_state()
oauth_token = state.get('oauth_token')
if oauth_token:
break
elif state.get('error'):
self._reset_the_state()
return None, "Error retrieving OAuth token"
else:
return None, "Timed out waiting for login"
self._state['oauth_token'] = oauth_token
# NOTE: This state is in the app directory, it is
# different from the app state (i.e. self._state)
self.rsh.delete_state()
return OAuth2TokenAuth(oauth_token['access_token'], oauth_token['token_type']), ""
def _azure_int_auth_refresh(self, client_id, client_secret):
oauth_token = self._state.get('oauth_token')
if not (oauth_token and oauth_token.get("refresh_token")):
self._reset_the_state()
return None, "Unable to get refresh token. Please run Test Connectivity again"
if client_id != self._state.get('client_id', ''):
self._reset_the_state()
return None, "Client ID has been changed. Please run Test Connectivity again"
refresh_token = oauth_token['refresh_token']
request_url = 'https://login.microsoftonline.com/common/oauth2/token'
body = {
'grant_type': 'refresh_token',
'resource': 'https://outlook.office365.com/',
'client_id': client_id,
'refresh_token': refresh_token,
'client_secret': client_secret
}
try:
r = requests.post(request_url, data=body, timeout=self._timeout)
except Exception as e:
return None, "Error refreshing token: {}".format(str(e))
try:
oauth_token = r.json()
if "error" in oauth_token:
if oauth_token["error"] in EWS_ASSET_PARAM_CHECK_LIST_ERRORS:
self._reset_the_state()
return None, oauth_token["error_description"]
except Exception:
return None, "Error retrieving OAuth Token"
self._state['oauth_token'] = oauth_token
return OAuth2TokenAuth(oauth_token['access_token'], oauth_token['token_type']), ""
def _set_azure_int_auth(self, config):
client_id = config.get(EWS_JSON_CLIENT_ID)
client_secret = config.get(EWS_JSON_CLIENT_SECRET)
if not client_id:
return None, "ERROR: {0} is a required parameter for Azure Authentication, please specify one.".format(EWS_JSON_CLIENT_ID)
if not client_secret:
return None, "ERROR: {0} is a required parameter for Azure Authentication, please specify one.".format(EWS_JSON_CLIENT_SECRET)
if self.get_action_identifier() != phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY:
self.debug_print("Try to generate token from refresh token")
ret = self._azure_int_auth_refresh(client_id, client_secret)
else:
self.debug_print("Try to generate token from authorization code")
ret = self._azure_int_auth_initial(client_id, client_secret)
if ret[0]:
self._state['client_id'] = client_id
return ret
def _get_domain(self, username, client_req_id):
""" This method is used to obtain domain from the username.
:param username: Username
:param client_req_id: Request ID
:return: status, domain/message
"""
headers = {'Accept': 'application/json', 'client-request-id': client_req_id, 'return-client-request-id': 'True'}
url = "{0}/common/UserRealm/{1}".format(EWS_LOGIN_URL, username)
params = {'api-version': '1.0'}
try:
r = self._session.get(url, params=params, headers=headers, timeout=self._timeout)
except Exception as e:
return phantom.APP_ERROR, str(e)
if r.status_code != 200:
return phantom.APP_ERROR, r.text
resp_json = None
try:
resp_json = r.json()
except Exception as e:
return phantom.APP_ERROR, str(e)
domain = resp_json.get('domain_name')
if not domain:
return phantom.APP_ERROR, "Did not find domain in response. Cannot continue"
return phantom.APP_SUCCESS, domain
def _set_header_for_rest_call(self, config):
"""This function is used to update the headers with access_token before making REST call."""
if self.get_action_identifier() != phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY:
resp_json = None
if self._state.get("oauth_token", {}):
resp_json = self._state.get("oauth_token", {})
if self._state.get("oauth_client_token", {}):
resp_json = self._state.get("oauth_client_token", {})
if resp_json:
self._session.auth = OAuth2TokenAuth(resp_json['access_token'], resp_json['token_type'])
elif self.get_action_identifier() == phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY and not self._is_token_test_connectivity:
self._is_token_test_connectivity = True
return self.set_authentication_method(config)
return phantom.APP_SUCCESS, ""
def _set_azure_auth(self, config):
ret_val, message = self._check_password(config)
if phantom.is_fail(ret_val):
self.save_progress(message)
return None, message
username = config[phantom.APP_JSON_USERNAME]
password = config[phantom.APP_JSON_PASSWORD]
client_id = config.get(EWS_JSON_CLIENT_ID)
client_secret = config.get(EWS_JSON_CLIENT_SECRET)
if not client_id:
return None, "ERROR: {0} is a required parameter for Azure Authentication, please specify one.".format(EWS_JSON_CLIENT_ID)
if not client_secret:
return None, "ERROR: {0} is a required parameter for Azure Authentication, please specify one.".format(EWS_JSON_CLIENT_SECRET)
oauth_token = self._state.get('oauth_token')
is_oauth_token = oauth_token and oauth_token.get("access_token") and oauth_token.get("refresh_token")
if self.get_action_identifier() != phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY and is_oauth_token and not self._is_client_id_changed:
self.debug_print("Try to generate token from refresh token")
ret = self._azure_int_auth_refresh(client_id, client_secret)
return ret
client_req_id = str(uuid.uuid4())
ret_val, domain = self._get_domain(username, client_req_id)
if phantom.is_fail(ret_val):
return None, domain
headers = {'client-request-id': client_req_id, 'return-client-request-id': 'True'}
url = "{0}/{1}/oauth2/token".format(EWS_LOGIN_URL, domain)
params = None
parsed_base_url = urlparse(self._base_url)
data = {
'resource': '{0}://{1}'.format(parsed_base_url.scheme, parsed_base_url.netloc),
'client_id': client_id,
'username': username,
'password': password,
'grant_type': 'password',
'scope': 'openid',
'client_secret': client_secret
}
try:
r = self._session.post(url, params=params, headers=headers, data=data, verify=True, timeout=self._timeout)
except Exception as e:
return None, str(e)
if r.status_code != 200:
return None, self._extract_error(r)
resp_json = None
try:
resp_json = r.json()
except Exception as e:
return None, str(e)
if 'token_type' not in resp_json:
return None, "token_type not found in response from server"
if 'access_token' not in resp_json:
return None, "token not found in response from server"
self._state["oauth_token"] = resp_json
self._state['client_id'] = client_id
self.save_progress("Got Access Token")
return OAuth2TokenAuth(resp_json['access_token'], resp_json['token_type']), ""
def _check_password(self, config):
if phantom.APP_JSON_PASSWORD not in list(config.keys()):
return phantom.APP_ERROR, "Password not present in asset configuration"
return phantom.APP_SUCCESS, ''
def _validate_integer(self, action_result, parameter, key, allow_zero=False):
try:
if not float(parameter).is_integer():
return action_result.set_status(
phantom.APP_ERROR, "Please provide a valid integer value in the '{0}' parameter".format(key)), None
parameter = int(parameter)
except Exception:
return action_result.set_status(phantom.APP_ERROR, "Please provide a valid integer value in the '{0}' parameter".format(key)), None
if not allow_zero and parameter <= 0:
return action_result.set_status(
phantom.APP_ERROR, "Please provide a non-zero positive integer in the '{0}' parameter".format(key)), None
elif allow_zero and parameter < 0:
return action_result.set_status(
phantom.APP_ERROR, "Please provide a valid non-negative integer value in the '{0}' parameter".format(key)), None
return phantom.APP_SUCCESS, parameter
def _get_error_message_from_exception(self, e):
""" This method is used to get appropriate error message from the exception.
:param e: Exception object
:return: error message
"""
error_code = None
error_message = "Error message unavailable. Please check the asset configuration and|or action parameters."
self.error_print("Error occurred.", e)
try:
if hasattr(e, "args"):
if len(e.args) > 1:
error_code = e.args[0]
error_message = e.args[1]
elif len(e.args) == 1:
error_message = e.args[0]
except Exception as e:
self.error_print("Error occurred while fetching exception information. Details: {}".format(
self._get_error_message_from_exception(e)))
if not error_code:
error_text = "Error Message: {}".format(error_message)
else:
error_text = "Error Code: {}. Error Message: {}".format(error_code, error_message)
return error_text
def _get_string(self, input_str, charset):
try:
if input_str:
input_str = UnicodeDammit(input_str).unicode_markup.encode(charset).decode(charset)
except Exception:
self.debug_print("Error occurred while converting to string with specific encoding")
return input_str
def _is_ip(self, input_ip_address):
"""
Function that checks given address and return True if address is valid IPv4 or IPV6 address.
:param input_ip_address: IP address
:return: status (success/failure)
"""
try:
ipaddress.ip_address(input_ip_address)
except Exception:
return False
return True
def _extract_error(self, r):
""" This method generates an error message from the error response.
:param r: Response object
:return: error message
"""
try:
error_json = r.json()
error = error_json["error"]
if error in EWS_ASSET_PARAM_CHECK_LIST_ERRORS:
self._reset_the_state()
error_desc = error_json["error_description"]
error_text = "An error occurred. Error: {}, description: {}".format(error, error_desc)
return error_text
except Exception:
return r.text
def _set_client_cred_auth(self, config):
""" This method generates OAuth token using the client credentials grant.
:param config: Dictionary of asset configuration variables
:return: An OAuth2TokenAuth object in case of success otherwise, an error message
"""
oauth_token = self._state.get("oauth_client_token", {})
if self.get_action_identifier() != phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY and oauth_token:
if oauth_token.get('access_token') and oauth_token.get('token_type'):
return OAuth2TokenAuth(oauth_token['access_token'], oauth_token['token_type']), ""
client_id = config.get("client_id")
client_secret = config.get("client_secret")
if not (client_id and client_secret):
return None, MISSING_CLIENT_CREDS
client_req_id = str(uuid.uuid4())
username = config[phantom.APP_JSON_USERNAME]
ret_val, domain = self._get_domain(username, client_req_id)
if phantom.is_fail(ret_val):
return None, domain
url = "{0}/{1}/oauth2/token".format(EWS_LOGIN_URL, domain)
parsed_base_url = urlparse(self._base_url)
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'application/json'}
data = {
"client_id": client_id,
"client_secret": client_secret,
"grant_type": "client_credentials",
"resource": "{0}://{1}".format(parsed_base_url.scheme, parsed_base_url.netloc),
}
self.debug_print("Requesting a new token for OAuth client credentials authentication")
try:
r = self._session.post(url, headers=headers, data=data, verify=True, timeout=self._timeout)
except Exception as e:
self._state.pop("oauth_client_token", None)
return None, str(e)
if r.status_code != 200:
return None, self._extract_error(r)
oauth_token = None
try:
oauth_token = r.json()
except Exception as e:
return None, str(e)
self.save_progress("Received access token")
self._state['oauth_client_token'] = oauth_token
self._state['client_id'] = client_id
return OAuth2TokenAuth(oauth_token['access_token'], oauth_token['token_type']), ""
def _encrypt_client_token(self, state):
""" This method encrypts the oauth client token.
:param config: State dictionary
:return: Encrypted state
"""
try:
if "oauth_client_token" in state and self.auth_type == AUTH_TYPE_CLIENT_CRED:
self.debug_print("Encrypting the oauth client token")
token = state["oauth_client_token"]
state["oauth_client_token"]["access_token"] = encryption_helper.encrypt(token["access_token"], self.get_asset_id())
except Exception as e:
self.debug_print("Error occurred while encrypting the token: {}. Deleting the token".format(
self._get_error_message_from_exception(e)))
state.pop("oauth_client_token", None)
return state
def _decrypt_client_token(self, state):
""" This method decrypts the oauth client token.
:param config: State dictionary
:return: Decrypted state
"""
try:
if "oauth_client_token" in state:
self.debug_print("Decrypting the oauth client token")
token = encryption_helper.decrypt(state["oauth_client_token"]["access_token"], self.get_asset_id())
state["oauth_client_token"]["access_token"] = token
except Exception as e:
self.debug_print("Error occurred while decrypting the token: {}. Deleting the token".format(
self._get_error_message_from_exception(e)))
state.pop("oauth_client_token", None)
return state
def finalize(self):
if self.auth_type == AUTH_TYPE_CLIENT_CRED:
self._state = self._encrypt_client_token(self._state)
else:
self._state = self.rsh._encrypt_state(self._state)
self.save_state(self._state)
return phantom.APP_SUCCESS
def _clean_the_state(self):
""" This method cleans the state. """
self.debug_print("Cleaning the state")
if self.auth_type != AUTH_TYPE_CLIENT_CRED:
self._state.pop("oauth_client_token", None)
if self.auth_type not in (AUTH_TYPE_AZURE_INTERACTIVE, AUTH_TYPE_AZURE):
self._state.pop("oauth_token", None)
def _reset_the_state(self):
self.debug_print("Resetting the state file")
self._state = {"app_version": self.get_app_json().get("app_version")}
def initialize(self):
""" Called once for every action, all member initializations occur here"""
config = self.get_config()
self.auth_type = config.get(EWS_JSON_AUTH_TYPE, AUTH_TYPE_AZURE)
self.rsh = RequestStateHandler(self.get_asset_id())
self._state = self.load_state()
if not isinstance(self._state, dict):
self.debug_print("Resetting the state file with the default format")
self._state = {"app_version": self.get_app_json().get("app_version")}
if self.auth_type == AUTH_TYPE_AZURE_INTERACTIVE:
return self.set_status(phantom.APP_ERROR, EWSONPREM_STATE_FILE_CORRUPT_ERROR)
if self.auth_type == AUTH_TYPE_CLIENT_CRED:
self._state = self._decrypt_client_token(self._state)
else:
self._state, message = self.rsh._decrypt_state(self._state)
if message:
return self.set_status(phantom.APP_ERROR, message)
# The headers, initialize them here once and use them for all other REST calls
self._headers = {'Content-Type': 'text/xml; charset=utf-8', 'Accept': 'text/xml'}
self._session = requests.Session()
self._base_url = config[EWSONPREM_JSON_DEVICE_URL]
message = ''
self._clean_the_state()
is_oauth_token_exist = self.auth_type in [AUTH_TYPE_AZURE, AUTH_TYPE_AZURE_INTERACTIVE] and \
not self._state.get("oauth_token", {}).get("access_token")
is_oauth_client_token_exist = self.auth_type == AUTH_TYPE_CLIENT_CRED and \
not self._state.get("oauth_client_token", {}).get("access_token")
self._is_client_id_changed = (self._state.get('client_id') and config.get("client_id")) and \
self._state.get('client_id') != config.get("client_id")
if self._is_client_id_changed or is_oauth_token_exist or is_oauth_client_token_exist:
self._is_token_test_connectivity = self.get_action_identifier() == phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY
ret, message = self.set_authentication_method(config)
if phantom.is_fail(ret):
return self.set_status(ret, message)
if self._base_url.endswith('/'):
self._base_url = self._base_url[:-1]
# The host member extracts the host from the URL, is used in creating status messages
self._host = self._base_url[self._base_url.find('//') + 2:]
self._impersonate = config[EWS_JSON_USE_IMPERSONATE]
ret = self._handle_preprocess_scipts()
if phantom.is_fail(ret):
return ret
self.set_validator('ipv6', self._is_ip)
self._timeout = config.get("timeout", DEFAULT_REQUEST_TIMEOUT)
ret_val, self._timeout = self._validate_integer(self, self._timeout, "Request Timeout")
if phantom.is_fail(ret_val):
return self.get_status()
return phantom.APP_SUCCESS
def _get_error_details(self, resp_json):
""" Function that parses the error json received from the device and placed into a json"""
error_details = {"message": "Not Found", "code": "Not supplied"}
if not resp_json:
return error_details
error_details['message'] = resp_json.get('m:MessageText', 'Not Specified')
error_details['code'] = resp_json.get('m:ResponseCode', 'Not Specified')
return error_details
def _create_aqs(self, subject, sender, body):
aqs = {'subject': subject, 'from': sender, 'body': body}
return ' '.join('{}:"{}"'.format(key, value) for key, value in aqs.items() if value)
# TODO: Should change these function to be parameterized, instead of one per type of request
def _check_get_attachment_response(self, resp_json):
resp_body = resp_json['s:Envelope']['s:Body']
return resp_body['m:GetAttachmentResponse']['m:ResponseMessages']['m:GetAttachmentResponseMessage']
def _check_getitem_response(self, resp_json):
resp_body = resp_json['s:Envelope']['s:Body']
return resp_body['m:GetItemResponse']['m:ResponseMessages']['m:GetItemResponseMessage']
def _check_find_response(self, resp_json):
resp_body = resp_json['s:Envelope']['s:Body']
return resp_body['m:FindItemResponse']['m:ResponseMessages']['m:FindItemResponseMessage']
def _check_delete_response(self, resp_json):
resp_body = resp_json['s:Envelope']['s:Body']
return resp_body['m:DeleteItemResponse']['m:ResponseMessages']['m:DeleteItemResponseMessage']
def _check_update_response(self, resp_json):
resp_body = resp_json['s:Envelope']['s:Body']
return resp_body['m:UpdateItemResponse']['m:ResponseMessages']['m:UpdateItemResponseMessage']
def _check_copy_response(self, resp_json):
resp_body = resp_json['s:Envelope']['s:Body']
return resp_body['m:CopyItemResponse']['m:ResponseMessages']['m:CopyItemResponseMessage']
def _check_markasjunk_response(self, resp_json):
resp_body = resp_json['s:Envelope']['s:Body']
return resp_body['m:MarkAsJunkResponse']['m:ResponseMessages']['m:MarkAsJunkResponseMessage']
def _check_move_response(self, resp_json):
resp_body = resp_json['s:Envelope']['s:Body']
return resp_body['m:MoveItemResponse']['m:ResponseMessages']['m:MoveItemResponseMessage']
def _check_expand_dl_response(self, resp_json):
resp_body = resp_json['s:Envelope']['s:Body']
return resp_body['m:ExpandDLResponse']['m:ResponseMessages']['m:ExpandDLResponseMessage']
def _check_findfolder_response(self, resp_json):
resp_body = resp_json['s:Envelope']['s:Body']
return resp_body['m:FindFolderResponse']['m:ResponseMessages']['m:FindFolderResponseMessage']
def _check_getfolder_response(self, resp_json):
resp_body = resp_json['s:Envelope']['s:Body']
return resp_body['m:GetFolderResponse']['m:ResponseMessages']['m:GetFolderResponseMessage']
def _check_resolve_names_response(self, resp_json):
resp_body = resp_json['s:Envelope']['s:Body']
return resp_body['m:ResolveNamesResponse']['m:ResponseMessages']['m:ResolveNamesResponseMessage']
def _parse_fault_node(self, result, fault_node):
fault_code = fault_node.get('faultcode', {}).get('#text', 'Not specified')
fault_string = fault_node.get('faultstring', {}).get('#text', 'Not specified')
return result.set_status(phantom.APP_ERROR, 'Error occurred, Code: {0} Detail: {1}'.format(fault_code, fault_string))
def _clean_xml(self, input_xml):
# But before we do that clean up the xml,
# MS is known to send invalid xml chars,
# that its own msxml library deems as invalid
# https://support.microsoft.com/en-us/kb/315580
replace_regex = r"&#x([0-8]|[b-cB-C]|[e-fE-F]|1[0-9]|1[a-fA-F]);"
clean_xml, number_of_substitutes = re.subn(replace_regex, '', input_xml)
self.debug_print("Cleaned xml with {0} substitutions".format(number_of_substitutes))
return clean_xml
def _get_http_error_details(self, r):
if 'text/xml' in r.headers.get('Content-Type', ''):
# Try a xmltodict parse
try:
resp_json = xmltodict.parse(self._clean_xml(r.text))
# convert from OrderedDict to plain dict
resp_json = json.loads(json.dumps(resp_json))
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Error occurred while parsing the HTTP error response. {0}".format(error_text))
return "Unable to parse error details"
try:
return resp_json['s:Envelope']['s:Body']['s:Fault']['detail']['e:Message']['#text']
except Exception:
pass
return ""
def set_authentication_method(self, config):
"Method for setting authentication"
if self.auth_type == AUTH_TYPE_AZURE:
self.save_progress("Using Azure AD authentication")
self._session.auth, message = self._set_azure_auth(config)
elif self.auth_type == AUTH_TYPE_AZURE_INTERACTIVE:
self.save_progress("Using Azure AD authentication (interactive)")
self._session.auth, message = self._set_azure_int_auth(config)
elif self.auth_type == AUTH_TYPE_FEDERATED:
self.save_progress("Using Federated authentication")
self._session.auth, message = self._set_federated_auth(config)
elif self.auth_type == AUTH_TYPE_CLIENT_CRED:
self._state = self._decrypt_client_token(self._state)
self.save_progress("Using Client credentials authentication")
self._session.auth, message = self._set_client_cred_auth(config)
else:
# Make sure username and password are set
ret_val, message = self._check_password(config)
if phantom.is_fail(ret_val):
self.save_progress(message)
return ret_val
password = config[phantom.APP_JSON_PASSWORD]
username = config[phantom.APP_JSON_USERNAME]
username = username.replace('/', '\\')
self._session.auth = HTTPBasicAuth(username, password)
self.save_progress("Using HTTP Basic authentication")
if not self._session.auth:
return phantom.APP_ERROR, message
return phantom.APP_SUCCESS, ""
def _make_rest_call(self, result, data, check_response, data_string=False):
""" Function that makes the REST call to the device, generic function that can be called from various action handlers
Needs to return two values, 1st the phantom.APP_[SUCCESS|ERROR], 2nd the response
"""
config = self.get_config()
resp_json = None
ret, message = self._set_header_for_rest_call(config)
if phantom.is_fail(ret):
return result.set_status(ret, message), resp_json
if self._impersonate and (not self._target_user):
return result.set_status(phantom.APP_ERROR, "Impersonation is required, but target user not set. Cannot continue execution"), None
if self._impersonate:
data = ews_soap.add_to_envelope(data, self._target_user)
else:
data = ews_soap.add_to_envelope(data)
data = ews_soap.get_string(data)
# Make the call
try:
r = self._session.post(self._base_url, data=data, headers=self._headers, timeout=self._timeout, verify=True)
except Exception as e:
error_text = self._get_error_message_from_exception(e)
return result.set_status(phantom.APP_ERROR, EWSONPREM_SERVER_CONNECTIVITY_ERROR, error_text), resp_json
if hasattr(result, 'add_debug_data'):
result.add_debug_data({'r_status_code': r.status_code})
result.add_debug_data({'r_text': r.text if r else 'r is None'})
result.add_debug_data({'r_headers': r.headers})
if r.status_code == 401:
if self.auth_type == AUTH_TYPE_CLIENT_CRED:
self._state.pop("oauth_client_token", None)
ret, message = self.set_authentication_method(config)
if phantom.is_fail(ret):
return result.set_status(ret, message), resp_json
try:
r = self._session.post(self._base_url, data=data, headers=self._headers, timeout=self._timeout, verify=True)
except Exception as e:
error_text = self._get_error_message_from_exception(e)
return result.set_status(phantom.APP_ERROR, EWSONPREM_SERVER_CONNECTIVITY_ERROR, error_text), resp_json
if not (200 <= r.status_code <= 399):
# error
detail = self._get_http_error_details(r)
if r.status_code == 401:
detail = "{0}. {1}".format(detail, EWS_MODIFY_CONFIG)
message = "Call failed with HTTP Code: {0}".format(r.status_code)
if r.reason:
message = "{}. Reason: {}".format(message, r.reason)
if detail:
message = "{}. Details: {}".format(message, detail)
return result.set_status(phantom.APP_ERROR, message), None
# Try a xmltodict parse
try:
resp_json = xmltodict.parse(self._clean_xml(r.text))
# convert from OrderedDict to plain dict
resp_json = json.loads(json.dumps(resp_json))
except Exception as e:
# r.text is guaranteed to be NON None, it will be empty, but not None
msg_string = EWSONPREM_JSON_PARSE_ERROR.format(raw_text=r.text)
error_text = self._get_error_message_from_exception(e)
return result.set_status(phantom.APP_ERROR, msg_string, error_text), resp_json
# Check if there is a fault node present
fault_node = resp_json.get('s:Envelope', {}).get('s:Body', {}).get('s:Fault')
if fault_node:
return self._parse_fault_node(result, fault_node), None
# Now try getting the response message
try:
resp_message = check_response(resp_json)
except Exception as e:
msg_string = EWSONPREM_JSON_PARSE_ERROR.format(raw_text=r.text)
error_text = self._get_error_message_from_exception(e)
return result.set_status(phantom.APP_ERROR, msg_string, error_text), resp_json
if not isinstance(resp_message, dict):
return phantom.APP_SUCCESS, resp_message
resp_class = resp_message.get('@ResponseClass', '')
if resp_class == 'Error':
return result.set_status(phantom.APP_ERROR, EWSONPREM_FROM_SERVER_ERROR.format(**(self._get_error_details(resp_message)))), resp_json
return phantom.APP_SUCCESS, resp_message
def _test_connectivity(self, param):
""" Function that handles the test connectivity action, it is much simpler than other action handlers."""
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val, email_infos = self._get_email_infos_to_process(0, 1, action_result)
# Process errors
if phantom.is_fail(ret_val):
# Dump error messages in the log
self.debug_print(action_result.get_message())
# action_result.append_to_message(EWS_MODIFY_CONFIG)
# Set the status of the complete connector result
action_result.set_status(phantom.APP_ERROR, action_result.get_message())
# Append the message to display
self.save_progress(EWSONPREM_CONNECTIVITY_TEST_ERROR)
# return error
return phantom.APP_ERROR
# Set the status of the connector result
self.save_progress(EWSONPREM_CONNECTIVITY_TEST_SUCCESS)
return action_result.set_status(phantom.APP_SUCCESS)
def _get_child_folder_infos(self, user, action_result, parent_folder_info):
step_size = 500
folder_infos = list()
for curr_step_value in range(0, 10000, step_size):
curr_range = "{0}-{1}".format(curr_step_value, curr_step_value + step_size - 1)
input_xml = ews_soap.xml_get_children_info(user, parent_folder_id=parent_folder_info['id'], query_range=curr_range)
ret_val, resp_json = self._make_rest_call(action_result, input_xml, self._check_findfolder_response)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
total_items = resp_json.get('m:RootFolder', {}).get('@TotalItemsInView', '0')
if total_items == '0':
# total_items gives the total items in the view, not just items returned in the current call
return action_result.set_status(phantom.APP_ERROR, "Children not found, possibly not present."), None
folders = []
if resp_json.get('m:RootFolder', {}).get('t:Folders', {}):
folders_list = resp_json.get('m:RootFolder', {}).get('t:Folders', {}).get('t:Folder', [])
if not isinstance(folders_list, list):
folders_list = [folders_list]
folders.extend(folders_list)
search_folders_list = resp_json.get('m:RootFolder', {}).get('t:Folders', {}).get('t:SearchFolder', [])
if not isinstance(search_folders_list, list):
search_folders_list = [search_folders_list]
folders.extend(search_folders_list)
if not folders:
return action_result.set_status(phantom.APP_ERROR, "Folder information not found in response, possibly not present"), None
folder_infos.extend([{
'id': x['t:FolderId']['@Id'],
'display_name': x['t:DisplayName'],
'children_count': x['t:ChildFolderCount'],
'folder_path': self._extract_folder_path(x.get('t:ExtendedProperty'))} for x in folders])
curr_folder_len = len(folders)
if curr_folder_len < step_size:
# got less than what we asked for, so looks like we got all that we wanted
break
'''
for folder_info in folder_infos:
if (int(folder_info['children_count']) <= 0):
continue
curr_ar = ActionResult()
ret_val, child_folder_infos = self._get_child_folder_infos(user, curr_ar, folder_info)
if (ret_val):
folder_infos.extend(child_folder_infos)
'''
return phantom.APP_SUCCESS, folder_infos
def _cleanse_key_names(self, input_dict):
if not input_dict:
return input_dict
if not isinstance(input_dict, dict):
return input_dict
for k, v in list(input_dict.items()):
if k.find(':') != -1:
new_key = k.replace(':', '_')
input_dict[new_key] = v
del input_dict[k]
if isinstance(v, dict):
input_dict[new_key] = self._cleanse_key_names(v)
if isinstance(v, list):
new_v = []
for curr_v in v:
new_v.append(self._cleanse_key_names(curr_v))
input_dict[new_key] = new_v
return input_dict
def _validate_range(self, email_range, action_result):
try:
mini, maxi = (int(x) for x in email_range.split('-'))
except Exception:
return action_result.set_status(phantom.APP_ERROR, "Unable to parse the range. Please specify the range as min_offset-max_offset")
if mini < 0 or maxi < 0:
return action_result.set_status(phantom.APP_ERROR, "Invalid min or max offset value specified in range")
if mini > maxi:
return action_result.set_status(phantom.APP_ERROR, "Invalid range value, min_offset greater than max_offset")
if maxi > EWSONPREM_MAX_END_OFFSET_VAL:
return action_result.set_status(
phantom.APP_ERROR, "Invalid range value. The max_offset value cannot be greater than {0}".format(EWSONPREM_MAX_END_OFFSET_VAL))
return phantom.APP_SUCCESS
def _process_query(self, action_result, params, flag=False):
subject = params.get("subject")
sender = params.get("sender")
body = params.get("body")
int_msg_id = params.get("int_msg_id")
aqs = params.get("aqs")
is_public_folder = params.get("is_public_folder", False)
user = params.get("user")
folder_path = params.get("folder_path")
email_range = params.get("email_range", "0-10")
ignore_subfolders = params.get("ignore_subfolders")
folder_infos = []
if folder_path:
# get the id of the folder specified
ret_val, folder_info = self._get_folder_info(user, folder_path, action_result, is_public_folder)
else:
ret_val, folder_info = self._get_root_folder_id(action_result, is_public_folder)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
parent_folder_info = folder_info
folder_infos.append(folder_info)
if not ignore_subfolders:
if int(parent_folder_info['children_count']) != 0:
ret_val, child_folder_infos = self._get_child_folder_infos(user, action_result, parent_folder_info=parent_folder_info)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
folder_infos.extend(child_folder_infos)
items_matched = 0
msg_items = list()
num_folder_ids = len(folder_infos)
self.save_progress('Will be searching in {0} folder{1}', num_folder_ids, 's' if num_folder_ids > 1 else '')
for i, folder_info in enumerate(folder_infos):
folder_id = folder_info['id']
ar_folder = ActionResult()
if aqs:
data = ews_soap.get_search_request_aqs([folder_id], aqs, email_range)
else:
data = ews_soap.get_search_request_filter([folder_id], subject=subject, sender=sender,
body=body, int_msg_id=int_msg_id, email_range=email_range)
ret_val, resp_json = self._make_rest_call(ar_folder, data, self._check_find_response)
# Process errors
if phantom.is_fail(ret_val):
self.debug_print("Rest call failed: {0}".format(ar_folder.get_message()))
continue
resp_json = resp_json.get('m:RootFolder')
if not resp_json:
self.debug_print('Result does not contain RootFolder key')
continue
items = resp_json.get('t:Items')
if items is None:
self.debug_print("There are no items in the response")
continue
items = resp_json.get('t:Items', {}).get('t:Message', [])
if not isinstance(items, list):
items = [items]
items_matched += len(items)
for curr_item in items:
self._cleanse_key_names(curr_item)
curr_item['folder'] = folder_info['display_name']
curr_item['folder_path'] = folder_info.get('folder_path')
if flag:
msg_items.append(curr_item)
else:
action_result.add_data(curr_item)
if flag:
return phantom.APP_SUCCESS, msg_items
return phantom.APP_SUCCESS, items_matched
def _run_query(self, param):
""" Action handler for the 'run query' action"""
action_result = self.add_action_result(ActionResult(dict(param)))
subject = param.get(EWSONPREM_JSON_SUBJECT, "")
sender = param.get(EWSONPREM_JSON_FROM, "")
body = param.get(EWSONPREM_JSON_BODY, "")
int_msg_id = param.get(EWSONPREM_JSON_INT_MSG_ID, "")
aqs = param.get(EWSONPREM_JSON_QUERY, "")
is_public_folder = param.get(EWS_JSON_IS_PUBLIC_FOLDER, False)
try:
if aqs:
UnicodeDammit(aqs).unicode_markup
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Parameter validation failed for the AQS query. {0}".format(error_text))
return action_result.set_status(phantom.APP_ERROR, "Parameter validation failed for the query. Unicode value found.")
if not subject and not sender and not aqs and not body and not int_msg_id:
return action_result.set_status(phantom.APP_ERROR, "Please specify at-least one search criteria")
# Use parameters to create an aqs string
'''
if (not aqs):
aqs = self._create_aqs(subject, sender, body)
'''
self.debug_print("AQS_STR: {}".format(aqs))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
user = param[EWSONPREM_JSON_EMAIL]
folder_path = param.get(EWSONPREM_JSON_FOLDER)
self._target_user = user
ignore_subfolders = param.get('ignore_subfolders', False)
# self.save_progress("Searching in {0}\\{1}{2}".format(
# self._clean_str(user),
# folder_path if folder_path else 'All Folders',
# ' (and the children)' if (not ignore_subfolders) else ''))
email_range = param.get(EWSONPREM_JSON_RANGE, "0-10")
ret_val = self._validate_range(email_range, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
params = {
"subject": subject,
"sender": sender,
"body": body,
"int_msg_id": int_msg_id,
"aqs": aqs,
"is_public_folder": is_public_folder,
"user": user,
"folder_path": folder_path,
"email_range": email_range,
"ignore_subfolders": ignore_subfolders
}
ret_val, items_matched = self._process_query(action_result, params)
if phantom.is_fail(ret_val):
return action_result.get_status()
action_result.update_summary({'emails_matched': items_matched})
# Set the Status
return action_result.set_status(phantom.APP_SUCCESS)
def _get_container_id(self, email_id):
email_id = quote_plus(email_id)
url = '{}rest/container?_filter_source_data_identifier="{}"&_filter_asset={}'.format(
self.get_phantom_base_url(), email_id, self.get_asset_id())
try:
# Ignored the verify semgrep check as the following is a call to the phantom's REST API on the instance itself
r = requests.get(url, verify=False) # nosemgrep
resp_json = r.json()
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Unable to query Email container", error_text)
return None
if resp_json.get('count', 0) <= 0:
self.debug_print("No container matched")
return None
try:
container_id = resp_json.get('data', [])[0]['id']
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Container results, not proper", error_text)
return None
return container_id
def _get_email_data_from_container(self, container_id, action_result):
email_data = None
email_id = None
resp_data = {}
try:
ret_val, resp_data, status_code = self.get_container_info(container_id)
except ValueError as e:
error_text = self._get_error_message_from_exception(e)
return RetVal3(action_result.set_status(
phantom.APP_ERROR, 'Validation failed for the container_id. {0}'.format(error_text)), email_data, email_id)
if phantom.is_fail(ret_val):
return RetVal3(action_result.set_status(phantom.APP_ERROR, str(resp_data)), email_data, email_id)
# Keep pylint happy
resp_data = dict(resp_data)
email_data = resp_data.get('data', {}).get('raw_email')
email_id = resp_data['source_data_identifier']
if not email_data:
return RetVal3(action_result.set_status(
phantom.APP_ERROR, "Container does not seem to be created by the same app, raw_email data not found."), None, None)
return RetVal3(phantom.APP_SUCCESS, email_data, email_id)
def _get_email_data_from_vault(self, vault_id, action_result):
file_path = None
try:
success, message, file_info = ph_rules.vault_info(vault_id=vault_id)
file_info = list(file_info)[0]
file_path = file_info.get('path')
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print(error_text)
return RetVal2(action_result.set_status(phantom.APP_ERROR, "Could not get file path for vault item"), None)
if not file_path:
return RetVal2(action_result.set_status(phantom.APP_ERROR, "Could not get file path for vault item"), None)
try:
mail = outlookmsgfile.load(file_path)
except UnicodeDecodeError as e:
error_text = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "Failed to parse message. {0}".format(error_text)), None
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Failed to parse message. {0}".format(error_text))
return action_result.set_status(
phantom.APP_ERROR, "Failed to parse message. Please check if the provided file is valid msg file."), None
return phantom.APP_SUCCESS, mail
def _decode_subject(self, subject, charset):
# Decode subject unicode
decoded_subject = ''
subject = subject.split('?=\r\n\t=')
for sub in subject:
if '?UTF-8?B?' in sub:
sub = sub.replace('?UTF-8?B?', '').replace('?=', '')
sub = base64.b64decode(sub)
elif '?UTF-8?Q?' in sub:
sub = sub.replace('?UTF-8?Q?', '').replace('?=', '')
sub = quopri.decodestring(sub)
sub = sub.decode(charset)
decoded_subject = "{}{}".format(decoded_subject, sub)
return decoded_subject
def _decode_uni_string(self, input_str, def_name):
# try to find all the decoded strings, we could have multiple decoded strings
# or a single decoded string between two normal strings separated by \r\n
# YEAH...it could get that messy
input_str = input_str.replace('\r\n', '')
encoded_strings = re.findall(r'=\?.*\?=', input_str, re.I)
# return input_str as is, no need to do any conversion
if not encoded_strings:
return input_str
# get the decoded strings
try:
decoded_strings = [decode_header(x)[0] for x in encoded_strings]
decoded_strings = [{'value': x[0], 'encoding': x[1]} for x in decoded_strings]
except Exception as e:
error_code, error_message = self._base_connector._get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_message)
self._debug_print("Decoding: {0}. {1}".format(encoded_strings, err))
return def_name
# convert to dict for safe access, if it's an empty list, the dict will be empty
decoded_strings = dict(enumerate(decoded_strings))
for i, encoded_string in enumerate(encoded_strings):
decoded_string = decoded_strings.get(i)
if not decoded_string:
# nothing to replace with
continue
value = decoded_string.get('value')
encoding = decoded_string.get('encoding')
if not encoding or not value:
# nothing to replace with
continue
try:
# Some non-ascii characters were causing decoding issue with
# the UnicodeDammit and working correctly with the decode function.
# keeping previous logic in the except block incase of failure.
value = value.decode(encoding)
input_str = input_str.replace(encoded_string, value)
except Exception:
try:
if encoding != 'utf-8':
value = str(value, encoding)
except Exception:
pass
try:
if value:
value = UnicodeDammit(value).unicode_markup
input_str = input_str.replace(encoded_string, value)
except Exception:
pass
return input_str
def _get_email_headers_from_mail(self, mail, charset=None, email_headers=None):
if mail:
email_headers = list(mail.items()) # it's gives message headers
# TODO: the next 2 ifs can be condensed to use 'or'
if charset is None:
charset = mail.get_content_charset()
if not charset:
charset = 'utf-8'
if not email_headers:
return {}
# Convert the header tuple into a dictionary
headers = CaseInsensitiveDict()
try:
[headers.update({x[0]: self._get_string(x[1], charset)}) for x in email_headers]
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Error occurred while converting the header tuple into a dictionary. {}".format(error_text))
# Decode unicode subject
# if '?UTF-8?' in headers['Subject']:
# chars = 'utf-8'
# headers['Subject'] = self._decode_subject(headers['Subject'], chars)
# Handle received separately
received_headers = list()
try:
received_headers = [self._get_string(x[1], charset) for x in email_headers if x[0].lower() == 'received']
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Error occurred while handling the received header tuple separately. {}".format(error_text))
if received_headers:
headers['Received'] = received_headers
# handle the subject string, if required add a new key
subject = headers.get('Subject')
if subject:
if isinstance(subject, str):
headers['decodedSubject'] = self._decode_uni_string(subject, subject)
return headers
def _get_mail_header_dict(self, email_data, action_result):
try:
mail = email.message_from_string(email_data)
except Exception:
return RetVal2(action_result.set_status(
phantom.APP_ERROR, "Unable to create email object from data. Does not seem to be valid email"), None)
headers = mail.__dict__.get('_headers')
if not headers:
return RetVal2(action_result.set_status(
phantom.APP_ERROR, "Could not extract header info from email object data. Does not seem to be valid email"), None)
ret_val = {}
for header in headers:
ret_val[header[0]] = header[1]
return RetVal2(phantom.APP_SUCCESS, ret_val)
def _handle_email_with_container_id(self, action_result, container_id):
ret_val, email_data, email_id = self._get_email_data_from_container(container_id, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
action_result.update_summary({"email_id": email_id})
ret_val, header_dict = self._get_mail_header_dict(email_data, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
action_result.add_data(header_dict)
return phantom.APP_SUCCESS, email_id
def _handle_email_with_vault_id(self, action_result, vault_id, ingest_email, target_container_id=None, charset=None, user=None):
ret_val, mail = self._get_email_data_from_vault(vault_id, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
try:
if mail:
headers = self._get_email_headers_from_mail(mail, charset)
except Exception as e:
error_text = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "Unable to get email header string from message. {0}".format(error_text)), None
if not headers:
return action_result.set_status(phantom.APP_ERROR, "Unable to fetch the headers information from the provided MSG file"), None
action_result.add_data(dict(headers))
if not ingest_email:
return phantom.APP_SUCCESS, None
int_msg_id = headers.get("Message-ID")
if not int_msg_id:
return action_result.set_status(phantom.APP_ERROR, "Unable to fetch the message_id information from the provided MSG file"), None
params = {
"int_msg_id": str(int_msg_id)
}
ret_val, item_matched = self._process_query(action_result, params, flag=True)
if phantom.is_fail(ret_val):
return action_result.get_status(), None
if not item_matched:
err_msg = "Unable to ingest the message from the provided MSG file, " \
"the MSG file should be associated with the logged in SMTP user to ingest message from vault item."
return action_result.set_status(phantom.APP_ERROR, err_msg), None
item = item_matched[0]
message_id = item.get("t_ItemId", {}).get("@Id")
return phantom.APP_SUCCESS, message_id
def _handle_email_with_message_id(self, action_result, email_id):
try:
data = ews_soap.xml_get_emails_data([email_id])
except Exception as e:
error_text = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "Parameter validation failed for the ID. {0}".format(error_text)), None
action_result.update_summary({"email_id": email_id})
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_getitem_response)
# Process errors
if phantom.is_fail(ret_val):
message = "Error while getting email data for id {0}. Error: {1}".format(email_id, action_result.get_message())
self.debug_print(message)
self.send_progress(message)
return action_result.set_status(phantom.APP_ERROR, message), None
self._cleanse_key_names(resp_json)
"""
ret_val, rfc822_format = self._get_rfc822_format(resp_json, action_result)
if (phantom.is_fail(ret_val)):
return phantom.APP_ERROR
if (not rfc822_format):
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain rfc822 data')
"""
message = resp_json.get('m_Items', {}).get('t_Message', {})
# Remove mime content because it can be very large
if 't_MimeContent' in message:
message.pop('t_MimeContent')
action_result.add_data(message)
recipients_mailbox = message.get('t_ToRecipients', {}).get('t_Mailbox')
if recipients_mailbox and (not isinstance(recipients_mailbox, list)):
message['t_ToRecipients']['t_Mailbox'] = [recipients_mailbox]
summary = {
'subject': message.get('t_Subject'),
'create_time': message.get('t_DateTimeCreated'),
'sent_time': message.get('t_DateTimeSent')
}
action_result.update_summary(summary)
return phantom.APP_SUCCESS, email_id
def _get_email(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
message_id = param.get(EWSONPREM_JSON_ID)
container_id = param.get(EWS_JSON_CONTAINER_ID)
vault_id = param.get(EWS_JSON_VAULT_ID)
self._target_user = param.get(EWSONPREM_JSON_EMAIL)
use_current_container = param.get('use_current_container')
target_container_id = None
flag = False
email_id = None
if container_id is not None:
ret_val, container_id = self._validate_integer(action_result, container_id, "container_id")
if phantom.is_fail(ret_val):
return action_result.get_status()
if use_current_container:
target_container_id = self.get_container_id()
ingest_email = param.get(EWSONPREM_JSON_INGEST_EMAIL, False)
if not message_id and not container_id and not vault_id:
return action_result.set_status(phantom.APP_ERROR, "Please specify id, container_id or vault_id to get the email")
if container_id or vault_id:
if container_id:
ret_val, email_id = self._handle_email_with_container_id(action_result, container_id)
if phantom.is_fail(ret_val):
return action_result.set_status(phantom.APP_ERROR, action_result.get_message())
if not ingest_email:
return action_result.set_status(phantom.APP_SUCCESS, "Successfully retrieved an email for container ID")
elif vault_id:
ret_val, email_id = self._handle_email_with_vault_id(action_result, vault_id, ingest_email, target_container_id)
if phantom.is_fail(ret_val):
return action_result.set_status(phantom.APP_ERROR, action_result.get_message())
if not ingest_email:
return action_result.set_status(phantom.APP_SUCCESS, "Successfully retrieved an email for vault item")
elif message_id:
ret_val, email_id = self._handle_email_with_message_id(action_result, message_id)
if phantom.is_fail(ret_val):
return action_result.set_status(phantom.APP_ERROR, action_result.get_message())
if not ingest_email:
return action_result.set_status(phantom.APP_SUCCESS, "Successfully retrieved an email for message ID")
else:
return action_result.set_status(phantom.APP_ERROR, "Please specify id, container_id or vault_id to get the email")
# if the container_id or vault_id is given to fetch email and ingest_email is True, then,
# while ingesting email to create artifacts of attachments, domains, hashes, ips and urls, flag has been set to True.
# if message_id is given then artifacts has been created on the basis of asset configuration parameter while ingesting.
if container_id or vault_id:
flag = True
if not email_id:
return action_result.set_status(phantom.APP_ERROR, "Unable to get message ID from the given parameters")
try:
self._process_email_id(email_id, target_container_id, flag=flag)
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Error occurred in _process_email_id with Message ID: {0}. {1}".format(email_id, error_text))
action_result.update_summary({"container_id": None})
return action_result.set_status(phantom.APP_ERROR, "Error processing email. {0}".format(error_text))
if target_container_id is None:
# get the container id that of the email that was ingested
container_id = self._get_container_id(email_id)
action_result.update_summary({"container_id": container_id})
else:
action_result.update_summary({"container_id": target_container_id})
return action_result.set_status(phantom.APP_SUCCESS)
def _valid_xml_char_ordinal(self, c):
codepoint = ord(c)
# conditions ordered by presumed frequency
return 0x20 <= codepoint <= 0xD7FF or codepoint in (0x9, 0xA, 0xD) or 0xE000 <= codepoint <= 0xFFFD or 0x10000 <= codepoint <= 0x10FFFF
def _update_email(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
email_id = param[EWSONPREM_JSON_ID]
self._target_user = param.get(EWSONPREM_JSON_EMAIL)
category = param.get('category')
subject = param.get('subject')
if subject is None and category is None:
return action_result.set_status(phantom.APP_ERROR, "Please specify one of the email properties to update")
# do a get on the message to get the change id
try:
data = ews_soap.xml_get_emails_data([email_id])
except Exception as e:
error_text = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "Parameter validation failed for the ID. {0}".format(error_text))
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_getitem_response)
# Process errors
if phantom.is_fail(ret_val):
message = "Error while getting email data for id {0}. Error: {1}".format(email_id, action_result.get_message())
self.debug_print(message)
self.send_progress(message)
return phantom.APP_ERROR
try:
change_key = resp_json['m:Items']['t:Message']['t:ItemId']['@ChangeKey']
except Exception:
return action_result.set_status(phantom.APP_ERROR, "Unable to get the change key of the email to update")
if category is not None:
category = [x.strip() for x in category.split(',')]
try:
data = ews_soap.get_update_email(email_id, change_key, category, subject)
except ValueError as e:
error_text = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "Validation failed for the given input parameter. {0}".format(error_text))
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_update_response)
# Process errors
if phantom.is_fail(ret_val):
return action_result.get_status()
if not resp_json:
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key')
try:
data = ews_soap.xml_get_emails_data([email_id])
except Exception as e:
error_text = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "Parameter validation failed for the ID. Error: {}".format(error_text))
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_getitem_response)
# Process errors
if phantom.is_fail(ret_val):
return action_result.get_status()
self._cleanse_key_names(resp_json)
message = resp_json.get('m_Items', {}).get('t_Message', {})
categories = message.get('t_Categories', {}).get('t_String')
if categories:
if not isinstance(categories, list):
categories = [categories]
message['t_Categories'] = categories
action_result.add_data(message)
recipients_mailbox = message.get('t_ToRecipients', {}).get('t_Mailbox')
if recipients_mailbox and (not isinstance(recipients_mailbox, list)):
message['t_ToRecipients']['t_Mailbox'] = [recipients_mailbox]
summary = {
'subject': message.get('t_Subject'),
'create_time': message.get('t_DateTimeCreated'),
'sent_time': message.get('t_DateTimeSent')
}
action_result.update_summary(summary)
# Set the Status
return action_result.set_status(phantom.APP_SUCCESS)
def _delete_email(self, param):
action_result = ActionResult(dict(param))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
message_id = param[EWSONPREM_JSON_ID]
self._target_user = param.get(EWSONPREM_JSON_EMAIL)
message_ids = ph_utils.get_list_from_string(message_id)
try:
data = ews_soap.get_delete_email(message_ids)
except Exception as e:
self.add_action_result(action_result)
error_text = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Parameter validation failed for the ID. {0}'.format(error_text))
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_delete_response)
# Process errors
if phantom.is_fail(ret_val):
self.add_action_result(action_result)
return action_result.get_status()
if not resp_json:
self.add_action_result(action_result)
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key')
if not isinstance(resp_json, list):
resp_json = [resp_json]
for msg_id, resp_message in zip(message_ids, resp_json):
curr_param = dict(param)
curr_param.update({"id": msg_id})
curr_ar = self.add_action_result(ActionResult(curr_param))
resp_class = resp_message.get('@ResponseClass', '')
if resp_class == 'Error':
curr_ar.set_status(phantom.APP_ERROR, EWSONPREM_FROM_SERVER_ERROR.format(**(self._get_error_details(resp_message))))
continue
curr_ar.set_status(phantom.APP_SUCCESS, "Email deleted successfully")
# Set the Status
return phantom.APP_SUCCESS
def _clean_str(self, string):
if not string:
return ''
return string.replace('{', '-').replace('}', '-')
def _extract_folder_path(self, extended_property):
if not extended_property:
return ''
# As of right now, the folder path is the only extended property
# that the app extracts, so parse the value directly, once the app starts
# parsing other extended properties, the 't:ExtendedFieldURI dictionary will
# require to be parsed and validated
value = extended_property.get('t:Value')
if not value:
return ''
value = value.lstrip('\\')
# I don't know why exchange gives back the path with
# '\\' separators since '\' is a valid char allowed in a folder name
# makes things confusing and extra parsing code to be written.
# Therefore, the app treats folder paths with '/' as the separator, keeps
# things less confusing for users.
# value = value.replace('\\', '/')
if not value:
return ''
try:
str(value)
except UnicodeEncodeError:
return UnicodeDammit(value).unicode_markup
return value
def _get_root_folder_id(self, action_result, is_public_folder=False):
if is_public_folder:
root_folder_id = 'publicfoldersroot'
else:
root_folder_id = 'root'
folder_info = {'id': root_folder_id, 'display_name': root_folder_id, 'children_count': -1, 'folder_path': ''}
return phantom.APP_SUCCESS, folder_info
def _get_matching_folder_path(self, folder_list, folder_name, folder_path, action_result):
""" The input folder is a list, meaning the folder name matched multiple folder
Given the folder path, this function will return the one that matches, or fail
"""
if not folder_list:
return action_result(phantom.APP_ERROR, "Unable to find info about folder '{0}'. Returned info list empty".format(folder_name)), None
for curr_folder in folder_list:
curr_folder_path = self._extract_folder_path(curr_folder.get('t:ExtendedProperty'))
if UnicodeDammit(curr_folder_path).unicode_markup == UnicodeDammit(folder_path).unicode_markup:
return phantom.APP_SUCCESS, curr_folder
return action_result.set_status(
phantom.APP_ERROR, "Folder paths did not match while searching for folder: '{0}'".format(folder_path)), None
def _get_folder_info(self, user, folder_path, action_result, is_public_folder=False):
# hindsight is always 20-20, set the folder path separator to be '/', thinking folder names allow '\' as a char.
# turns out even '/' is supported by office365, so let the action escape the '/' char if it's part of the folder name
folder_path = folder_path.replace('\\/', self.REPLACE_CONST)
folder_names = folder_path.split('/')
folder_names = list(filter(None, folder_names))
if not folder_names:
return action_result.set_status(phantom.APP_ERROR, "Please provide a valid value for folder path"), None
for i, folder_name in enumerate(folder_names):
folder_names[i] = folder_name.replace(self.REPLACE_CONST, '/')
if is_public_folder:
parent_folder_id = 'publicfoldersroot'
else:
parent_folder_id = 'root'
for i, folder_name in enumerate(folder_names):
curr_valid_folder_path = '\\'.join(folder_names[:i + 1])
self.save_progress('Getting info about {0}\\{1}'.format(self._clean_str(user), curr_valid_folder_path))
input_xml = ews_soap.xml_get_children_info(user, child_folder_name=folder_name, parent_folder_id=parent_folder_id)
ret_val, resp_json = self._make_rest_call(action_result, input_xml, self._check_findfolder_response)
if phantom.is_fail(ret_val):
return ret_val, None
total_items = resp_json.get('m:RootFolder', {}).get('@TotalItemsInView', '0')
if total_items == '0':
return action_result.set_status(
phantom.APP_ERROR, "Folder '{0}' not found, possibly not present".format(curr_valid_folder_path)), None
folder = resp_json.get('m:RootFolder', {}).get('t:Folders', {}).get('t:Folder')
if not folder:
return action_result.set_status(
phantom.APP_ERROR,
"Information about '{0}' not found in response, possibly not present".format(curr_valid_folder_path)
), None
if not isinstance(folder, list):
folder = [folder]
ret_val, folder = self._get_matching_folder_path(folder, folder_name, curr_valid_folder_path, action_result)
if phantom.is_fail(ret_val):
return ret_val, None
if not folder:
return action_result.set_status(
phantom.APP_ERROR,
"Information for folder '{0}' not found in response, possibly not present".format(curr_valid_folder_path)
), None
folder_id = folder.get('t:FolderId', {}).get('@Id')
if not folder_id:
return action_result.set_status(
phantom.APP_ERROR,
"Folder ID information not found in response for '{0}', possibly not present".format(curr_valid_folder_path)
), None
parent_folder_id = folder_id
folder_info = {
'id': folder_id,
'display_name': folder.get('t:DisplayName'),
'children_count': folder.get('t:ChildFolderCount'),
'folder_path': self._extract_folder_path(folder.get('t:ExtendedProperty'))
}
return phantom.APP_SUCCESS, folder_info
def _mark_as_junk(self, param, action):
action_result = self.add_action_result(ActionResult(dict(param)))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
message_id = param[EWSONPREM_JSON_ID]
move_email = param.get('move_to_junk_folder', param.get('move_from_junk_folder', False))
is_junk = True if action == 'block' else False
if EWSONPREM_JSON_EMAIL in param:
self._target_user = param[EWSONPREM_JSON_EMAIL]
message = "Sender blocked" if action == "block" else "Sender unblocked"
try:
data = ews_soap.xml_get_mark_as_junk(message_id, is_junk=is_junk, move_item=move_email)
except Exception as e:
return action_result.set_status(phantom.APP_ERROR, "Parameter validation failed for the ID. Error: {}".format(
self._get_error_message_from_exception(e)))
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_markasjunk_response)
# Process errors
if phantom.is_fail(ret_val):
return action_result.get_status()
if move_email:
try:
new_email_id = resp_json['m:MovedItemId']['@Id']
except Exception:
return action_result.set_status(phantom.APP_SUCCESS, "Unable to get moved Email ID")
action_result.add_data({'new_email_id': new_email_id})
action_result.update_summary({'new_email_id': new_email_id})
if new_email_id != message_id:
# Looks like the email was actually moved
message = "{}{}".format(message, ". Message moved to Junk Folder" if action == "block" else ". Message moved out of Junk Folder")
# Set the Status
return action_result.set_status(phantom.APP_SUCCESS, message)
def _copy_move_email(self, param, action="copy"):
action_result = self.add_action_result(ActionResult(dict(param)))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
message_id = param[EWSONPREM_JSON_ID]
folder_path = param[EWSONPREM_JSON_FOLDER]
user = param[EWSONPREM_JSON_EMAIL]
is_public_folder = param.get(EWS_JSON_IS_PUBLIC_FOLDER, False)
# Set the user to impersonate (i.e. target_user), by default it is the destination user
self._target_user = user
# finally, see if impersonation has been enabled/disabled for this action
# as of right now copy or move email is the only action that allows over-ride
impersonate = not(param.get(EWS_JSON_DONT_IMPERSONATE, False))
# Use a different email if specified
impersonate_email = param.get(EWS_JSON_IMPERSONATE_EMAIL)
if impersonate_email:
self._target_user = impersonate_email
self._impersonate = impersonate
ret_val, folder_info = self._get_folder_info(user, folder_path, action_result, is_public_folder)
if phantom.is_fail(ret_val):
return action_result.get_status()
try:
data = ews_soap.get_copy_email(message_id, folder_info['id'])
except Exception as e:
error_text = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Parameter validation failed for the ID. {0}'.format(error_text))
response_checker = self._check_copy_response
if action == "move":
try:
data = ews_soap.get_move_email(message_id, folder_info['id'])
except Exception as e:
error_text = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, 'Parameter validation failed for the ID. {0}'.format(error_text))
response_checker = self._check_move_response
ret_val, resp_json = self._make_rest_call(action_result, data, response_checker)
# Process errors
if phantom.is_fail(ret_val):
return action_result.get_status()
if not resp_json:
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key')
new_email_id = None
action_verb = 'copied' if action == "copy" else 'moved'
try:
new_email_id = resp_json['m:Items']['t:Message']['t:ItemId']['@Id']
except Exception:
return action_result.set_status(
phantom.APP_SUCCESS, "Email {0} successfully, but its message ID could not be retrieved".format(action_verb))
action_result.add_data({'new_email_id': new_email_id})
# Set the Status
return action_result.set_status(phantom.APP_SUCCESS, "Email {0} successfully".format(action_verb))
def _resolve_name(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
email = param[EWSONPREM_JSON_EMAIL]
self._target_user = param.get(EWS_JSON_IMPERSONATE_EMAIL)
data = ews_soap.xml_get_resolve_names(email)
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_resolve_names_response)
# Process errors
if phantom.is_fail(ret_val):
message = action_result.get_message()
if 'ErrorNameResolutionNoResults' in message:
message = 'No email found. The input parameter might not be a valid alias or email.'
return action_result.set_status(phantom.APP_ERROR, message)
if not resp_json:
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key')
resolution_set = resp_json.get('m:ResolutionSet', {}).get('t:Resolution')
if not resolution_set:
return action_result.set_summary({'total_entries': 0})
if not isinstance(resolution_set, list):
resolution_set = [resolution_set]
action_result.update_summary({'total_entries': len(resolution_set)})
for curr_resolution in resolution_set:
self._cleanse_key_names(curr_resolution)
contact = curr_resolution.get('t_Contact')
if contact:
email_addresses = contact.get('t_EmailAddresses', {}).get('t_Entry', [])
if email_addresses:
if not isinstance(email_addresses, list):
email_addresses = [email_addresses]
contact['t_EmailAddresses'] = email_addresses
action_result.add_data(curr_resolution)
# Set the Status
return action_result.set_status(phantom.APP_SUCCESS)
def _expand_dl(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
group = param[EWSONPREM_JSON_GROUP]
self._target_user = param.get(EWS_JSON_IMPERSONATE_EMAIL)
data = ews_soap.get_expand_dl(group)
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_expand_dl_response)
# Process errors
if phantom.is_fail(ret_val):
message = action_result.get_message()
if 'ErrorNameResolutionNoResults' in message:
message = '{} The input parameter might not be a distribution list.'.format(message)
action_result.add_data({"t_EmailAddress": group})
return action_result.set_status(phantom.APP_ERROR, message)
if not resp_json:
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key')
mailboxes = resp_json.get('m:DLExpansion', {}).get('t:Mailbox')
if not mailboxes:
action_result.set_summary({'total_entries': 0})
return action_result.set_status(phantom.APP_SUCCESS)
if not isinstance(mailboxes, list):
mailboxes = [mailboxes]
action_result.update_summary({'total_entries': len(mailboxes)})
for mailbox in mailboxes:
if param.get('recursive', False) and "DL" in mailbox['t:MailboxType']:
param[EWSONPREM_JSON_GROUP] = mailbox['t:EmailAddress']
self._expand_dl(param)
self._cleanse_key_names(mailbox)
action_result.add_data(mailbox)
# Set the Status
return action_result.set_status(phantom.APP_SUCCESS)
def _get_email_epoch(self, resp_json):
return None
def _get_rfc822_format(self, resp_json, action_result):
try:
mime_content = resp_json['m:Items']['t:Message']['t:MimeContent']['#text']
except Exception:
return action_result.set_status(phantom.APP_ERROR, "Email MimeContent missing in response.")
try:
rfc822_email = base64.b64decode(mime_content)
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Unable to decode Email Mime Content. {0}".format(error_text))
return action_result.set_status(phantom.APP_ERROR, "Unable to decode Email Mime Content")
return phantom.APP_SUCCESS, rfc822_email
def _get_attachment_meta_info(self, attachment, curr_key, parent_internet_message_id, parent_guid):
attach_meta_info = dict()
try:
attach_meta_info['attachmentId'] = attachment['t:AttachmentId']['@Id']
except Exception:
pass
try:
attach_meta_info['attachmentType'] = curr_key[2:].replace('Attachment', '').lower()
except Exception:
pass
attach_meta_info['parentInternetMessageId'] = parent_internet_message_id
attach_meta_info['parentGuid'] = parent_guid
# attachmentID, attachmentType
for k, v in six.iteritems(attachment):
if not isinstance(v, str):
continue
# convert the key to the convention used by cef
cef_key_name = k[2:]
cef_key_name = "{}{}".format(cef_key_name[0].lower(), cef_key_name[1:])
attach_meta_info[cef_key_name] = v
return attach_meta_info
def _extract_ext_properties_from_attachments(self, resp_json):
email_headers_ret = list()
attach_meta_info_ret = list()
if 'm:Items' not in resp_json:
k = list(resp_json.keys())[0]
resp_json['m:Items'] = resp_json.pop(k)
# Get the attachments
try:
attachments = resp_json['m:Items']['t:Message']['t:Attachments']
except Exception:
return RetVal3(phantom.APP_SUCCESS)
attachment_ids = list()
internet_message_id = None
try:
internet_message_id = resp_json['m:Items']['t:Message']['t:InternetMessageId']
except Exception:
internet_message_id = None
email_guid = resp_json['emailGuid']
for curr_key in list(attachments.keys()):
attachment_data = attachments[curr_key]
if not isinstance(attachment_data, list):
attachment_data = [attachment_data]
for curr_attachment in attachment_data:
attachment_ids.append(curr_attachment['t:AttachmentId']['@Id'])
# Add the info that we have right now
curr_attach_meta_info = self._get_attachment_meta_info(curr_attachment, curr_key, internet_message_id, email_guid)
if curr_attach_meta_info:
attach_meta_info_ret.append(curr_attach_meta_info)
if not attachment_ids:
return RetVal3(phantom.APP_SUCCESS)
data = ews_soap.xml_get_attachments_data(attachment_ids)
action_result = ActionResult()
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_get_attachment_response)
# Process errors
if phantom.is_fail(ret_val):
return RetVal3(action_result.get_status())
if not isinstance(resp_json, list):
resp_json = [resp_json]
for curr_attachment_data in resp_json:
try:
curr_attachment_data = curr_attachment_data['m:Attachments']
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Could not parse the attachments response", error_text)
continue
if curr_attachment_data is None:
self.debug_print("Could not parse the attachments response")
continue
curr_attachment_data['emailGuid'] = str(uuid.uuid4())
ret_val, data = self._extract_ext_properties(curr_attachment_data, internet_message_id, email_guid)
if data:
email_headers_ret.append(data)
ret_val, email_headers_info, attach_meta_info = self._extract_ext_properties_from_attachments(curr_attachment_data)
if email_headers_info:
email_headers_ret.extend(email_headers_info)
if attach_meta_info:
attach_meta_info_ret.extend(attach_meta_info)
else:
# This is a file attachment, we most probably already have the info from the resp_json
# But update it with the call to the xml_get_attachments_data(..) There might be more info
# that has to be updated
curr_attach_meta_info = self._get_attachment_meta_info(
curr_attachment_data['m:Items'], 't:FileAttachment', internet_message_id, email_guid)
if curr_attach_meta_info:
# find the attachment in the list and update it
matched_meta_info = list(
[x for x in attach_meta_info_ret if x.get('attachmentId', 'foo1') == curr_attach_meta_info.get('attachmentId', 'foo2')]
)
if matched_meta_info:
matched_meta_info[0].update(curr_attach_meta_info)
return phantom.APP_SUCCESS, email_headers_ret, attach_meta_info_ret
def _extract_email_headers(self, email_headers):
header_parser = HeaderParser()
try:
email_part = header_parser.parsestr(email_headers)
except UnicodeEncodeError:
email_part = header_parser.parsestr(UnicodeDammit(email_headers).unicode_markup)
email_headers = list(email_part.items())
headers = {}
charset = 'utf-8'
try:
[headers.update({x[0]: self._get_string(x[1], charset)}) for x in email_headers]
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Error occurred while converting the header tuple into a dictionary. {}".format(error_text))
# Handle received separately
try:
received_headers = list()
received_headers = [self._get_string(x[1], charset) for x in email_headers if x[0].lower() == 'received']
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Error occurred while handling the received header tuple separately. {}".format(error_text))
if received_headers:
headers['Received'] = received_headers
return headers
def _extract_ext_properties(self, resp_json, parent_internet_message_id=None, parent_guid=None):
if 'm:Items' not in resp_json:
k = list(resp_json.keys())[0]
resp_json['m:Items'] = resp_json.pop(k)
headers = dict()
extended_properties = list()
# Get the Extended Properties
try:
extended_properties = resp_json['m:Items']['t:Message']['t:ExtendedProperty']
except Exception:
pass
if extended_properties:
if not isinstance(extended_properties, list):
extended_properties = [extended_properties]
for curr_ext_property in extended_properties:
property_tag = curr_ext_property.get('t:ExtendedFieldURI', {}).get('@PropertyTag')
value = curr_ext_property.get('t:Value')
if not property_tag:
continue
if property_tag.lower() == ews_soap.EXTENDED_PROPERTY_HEADERS.lower() or \
property_tag.lower() == ews_soap.EXTENDED_PROPERTY_HEADERS_RESPONSE.lower():
email_headers = self._extract_email_headers(value)
if email_headers is not None:
headers.update(email_headers)
continue
if property_tag == ews_soap.EXTENDED_PROPERTY_BODY_TEXT:
headers.update({'bodyText': value})
# now parse the body in the main resp_json
try:
body_text = resp_json['m:Items']['t:Message']['t:Body']['#text']
except Exception:
body_text = None
try:
body_type = resp_json['m:Items']['t:Message']['t:Body']['@BodyType']
except Exception:
body_type = None
if body_text is not None:
if body_type is not None:
body_key = "body{0}".format(body_type.title().replace(' ', ''))
headers.update({body_key: body_text})
# In some cases the message id is not part of the headers, in this case
# copy the message id from the envelope to the header
headers_ci = CaseInsensitiveDict(headers)
message_id = headers_ci.get('message-id')
if message_id is None:
try:
message_id = resp_json['m:Items']['t:Message']['t:InternetMessageId']
headers['Message-ID'] = message_id
except Exception:
pass
if parent_internet_message_id is not None:
headers['parentInternetMessageId'] = parent_internet_message_id
if parent_guid is not None:
headers['parentGuid'] = parent_guid
headers['emailGuid'] = resp_json['emailGuid']
return phantom.APP_SUCCESS, headers
def _parse_email(self, resp_json, email_id, target_container_id, flag=False):
try:
mime_content = resp_json['m:Items']['t:Message']['t:MimeContent']['#text']
except Exception:
return phantom.APP_ERROR, "Email MimeContent missing in response."
try:
rfc822_email = base64.b64decode(mime_content)
rfc822_email = UnicodeDammit(rfc822_email).unicode_markup
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Unable to decode Email Mime Content. {0}".format(error_text))
return phantom.APP_ERROR, "Unable to decode Email Mime Content"
epoch = self._get_email_epoch(resp_json)
email_header_list = list()
attach_meta_info_list = list()
resp_json['emailGuid'] = str(uuid.uuid4())
ret_val, data = self._extract_ext_properties(resp_json)
if data:
email_header_list.append(data)
ret_val, attach_email_headers, attach_meta_info = self._extract_ext_properties_from_attachments(resp_json)
if attach_email_headers:
email_header_list.extend(attach_email_headers)
if attach_meta_info:
attach_meta_info_list.extend(attach_meta_info)
config = self.get_config()
if flag:
config.update({
"extract_attachments": True,
"extract_domains": True,
"extract_hashes": True,
"extract_ips": True,
"extract_urls": True,
"extract_eml": True
})
process_email = ProcessEmail()
return process_email.process_email(self, rfc822_email, email_id, config, epoch, target_container_id, email_headers=email_header_list,
attachments_data=attach_meta_info_list)
def _process_email_id(self, email_id, target_container_id=None, flag=False):
action_result = ActionResult()
try:
data = ews_soap.xml_get_emails_data([email_id])
except Exception as e:
error_text = self._get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "Parameter validation failed for the ID. {0}".format(error_text))
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_getitem_response)
# Process errors
if phantom.is_fail(ret_val):
message = "Error while getting email data for id {0}. Error: {1}".format(email_id, action_result.get_message())
self.debug_print(message)
self.send_progress(message)
self._skipped_emails += 1
return phantom.APP_ERROR
ret_val, message = self._parse_email(resp_json, email_id, target_container_id, flag)
if phantom.is_fail(ret_val):
return phantom.APP_ERROR
return phantom.APP_SUCCESS
def _get_email_infos_to_process(self, offset, max_emails, action_result, restriction=None):
config = self.get_config()
# get the user
poll_user = config.get(EWS_JSON_POLL_USER, config[phantom.APP_JSON_USERNAME])
if not poll_user:
return action_result.set_status(phantom.APP_ERROR, "Polling User Email not specified, cannot continue"), None
self._target_user = poll_user
folder_path = config.get(EWS_JSON_POLL_FOLDER, 'Inbox')
is_public_folder = config.get(EWS_JSON_IS_PUBLIC_FOLDER, False)
ret_val, folder_info = self._get_folder_info(poll_user, folder_path, action_result, is_public_folder)
if phantom.is_fail(ret_val):
return ret_val, None
manner = config[EWS_JSON_INGEST_MANNER]
folder_id = folder_info['id']
order = "Ascending"
if manner == EWS_INGEST_LATEST_EMAILS:
order = "Descending"
data = ews_soap.xml_get_email_ids(
poll_user, order=order, offset=offset, max_emails=max_emails, folder_id=folder_id, restriction=restriction)
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_find_response)
# Process errors
if phantom.is_fail(ret_val):
# Dump error messages in the log
self.debug_print(action_result.get_message())
# return error
return ret_val, None
resp_json = resp_json.get('m:RootFolder')
if not resp_json:
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain required RootFolder key'), None
items = resp_json.get('t:Items')
if items is None:
self.debug_print("Items is None")
return action_result.set_status(phantom.APP_SUCCESS, 'Result does not contain items key. Possibly no emails in folder'), None
items = resp_json.get('t:Items', {}).get('t:Message', [])
if not isinstance(items, list):
items = [items]
email_infos = [{'id': x['t:ItemId']['@Id'], 'last_modified_time': x['t:LastModifiedTime']} for x in items]
return phantom.APP_SUCCESS, email_infos
def _pprint_email_id(self, email_id):
return "{0}.........{1}".format(email_id[:20], email_id[-20:])
def _process_email_ids(self, email_ids, action_result):
if email_ids is None:
return action_result.set_status(phantom.APP_ERROR, "Did not get access to email IDs")
self.save_progress("Got {0} email{1}".format(len(email_ids), '' if len(email_ids) == 1 else 's'))
failed_emails_parsing_list = []
for i, email_id in enumerate(email_ids):
self.send_progress("Querying email # {0} with id: {1}".format(i + 1, self._pprint_email_id(email_id)))
try:
ret_val = self._process_email_id(email_id)
if phantom.is_fail(ret_val):
failed_emails_parsing_list.append(email_id)
except Exception as e:
error_text = self._get_error_message_from_exception(e)
self.debug_print("Error occurred in _process_email_id # {0} with Message ID: {1}. {2}".format(i, email_id, error_text))
failed_emails_parsing_list.append(email_id)
if len(failed_emails_parsing_list) == len(email_ids):
return action_result.set_status(
phantom.APP_ERROR, "ErrorExp in _process_email_id for all the email IDs: {}".format(str(failed_emails_parsing_list)))
if self._skipped_emails > 0:
self.save_progress("Skipped emails: {}. (For more details, check the logs)".format(self._skipped_emails))
return action_result.set_status(phantom.APP_SUCCESS)
def _get_fips_enabled(self):
try:
from phantom_common.install_info import is_fips_enabled
except ImportError:
return False
fips_enabled = is_fips_enabled()
if fips_enabled:
self.debug_print('FIPS is enabled')
else:
self.debug_print('FIPS is not enabled')
return fips_enabled
def _poll_now(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
# Get the maximum number of emails that we can pull
config = self.get_config()
# Get the maximum number of emails that we can pull, same as container count
try:
max_emails = int(param[phantom.APP_JSON_CONTAINER_COUNT])
if max_emails == 0 or (max_emails and (not str(max_emails).isdigit() or max_emails <= 0)):
return action_result.set_status(
phantom.APP_ERROR, "Please provide a valid non-zero positive integer value in 'container_count' parameter")
except Exception:
return self.set_status(phantom.APP_ERROR, "Invalid container count")
self.save_progress("Will be ingesting all possible artifacts (ignoring max artifacts value) for POLL NOW")
email_id = param.get(phantom.APP_JSON_CONTAINER_ID)
email_ids = [email_id]
# get the user
poll_user = UnicodeDammit(config.get(EWS_JSON_POLL_USER, config[phantom.APP_JSON_USERNAME])).unicode_markup
if not poll_user:
return action_result.set_status(phantom.APP_ERROR, "Polling User Email not specified, cannot continue"), None
self._target_user = poll_user
if not email_id:
self.save_progress("POLL NOW Getting {0} '{1}' email ids".format(max_emails, config[EWS_JSON_INGEST_MANNER]))
ret_val, email_infos = self._get_email_infos_to_process(0, max_emails, action_result)
if phantom.is_fail(ret_val) or email_infos is None:
return action_result.get_status()
if not email_infos:
return action_result.set_status(phantom.APP_SUCCESS, "No emails found for the ingestion process")
email_ids = [x['id'] for x in email_infos]
else:
self.save_progress("POLL NOW Getting the single email id")
ret_val = self._process_email_ids(email_ids, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
return action_result.set_status(phantom.APP_SUCCESS)
def _get_restriction(self):
config = self.get_config()
emails_after_key = 'last_ingested_format' if config[EWS_JSON_INGEST_MANNER] == EWS_INGEST_LATEST_EMAILS else 'last_email_format'
date_time_string = self._state.get(emails_after_key)
if not date_time_string:
return None
return ews_soap.xml_get_restriction(date_time_string)
def _get_next_start_time(self, last_time):
# get the time string passed into a datetime object
last_time = datetime.strptime(last_time, DATETIME_FORMAT)
# add a second to it
last_time = last_time + timedelta(seconds=1)
# format it
return last_time.strftime(DATETIME_FORMAT)
def _manage_data_duplication(self, email_infos, email_index, max_emails, total_ingested, limit):
# Define current time to store as starting reference for the next run of scheduled | interval polling
utc_now = datetime.utcnow()
self._state['last_ingested_format'] = utc_now.strftime('%Y-%m-%dT%H:%M:%SZ')
self._state['last_email_format'] = email_infos[email_index]['last_modified_time']
self.save_state(self._encrypt_client_token(self._state.copy()))
if max_emails:
if email_index == 0 or self._less_data:
return None, None
total_ingested += max_emails - (self._dup_data + self._skipped_emails)
self._remaining = limit - total_ingested
if total_ingested >= limit:
return None, None
next_cycle_repeat_data = 0
last_modified_time = email_infos[email_index]['last_modified_time']
for x in reversed(email_infos):
if x["last_modified_time"] == last_modified_time:
next_cycle_repeat_data += 1
else:
break
max_emails = next_cycle_repeat_data + self._remaining
return max_emails, total_ingested
else:
return None, None
def _on_poll(self, param):
# on poll action that is supposed to be scheduled
if self.is_poll_now():
self.debug_print("DEBUGGER: Starting polling now")
return self._poll_now(param)
config = self.get_config()
action_result = self.add_action_result(ActionResult(dict(param)))
# Fetch first_run_max_emails for asset configuration
first_run_max_emails = config[EWS_JSON_FIRST_RUN_MAX_EMAILS]
ret_val, first_run_max_emails = self._validate_integer(action_result, first_run_max_emails, "Maximum Emails to Poll First Time")
if phantom.is_fail(ret_val):
return action_result.get_status()
# Fetch max_containers for asset configuration
max_containers = config[EWS_JSON_POLL_MAX_CONTAINERS]
ret_val, max_containers = self._validate_integer(action_result, max_containers, "Maximum Containers for Scheduled Polling")
if phantom.is_fail(ret_val):
return action_result.get_status()
# handle poll_now i.e. scheduled poll
# Get the email ids that we will be querying for, different set for first run
if self._state.get('first_run', True):
# set the config to _not_ first run
max_emails = first_run_max_emails
self.save_progress("First time Ingestion detected.")
else:
max_emails = max_containers
total_ingested = 0
limit = max_emails
while True:
self._dup_data = 0
self._skipped_emails = 0
restriction = self._get_restriction()
ret_val, email_infos = self._get_email_infos_to_process(0, max_emails, action_result, restriction)
if phantom.is_fail(ret_val) or email_infos is None:
return action_result.get_status()
if not email_infos:
return action_result.set_status(phantom.APP_SUCCESS, "No emails found for the restriction: {}".format(str(restriction)))
if len(email_infos) < max_emails:
self._less_data = True
# if the config is for latest emails, then the 0th is the latest in the list returned, else
# The last email is the latest in the list returned
email_index = 0 if config[EWS_JSON_INGEST_MANNER] == EWS_INGEST_LATEST_EMAILS else -1
email_ids = [x['id'] for x in email_infos]
ret_val = self._process_email_ids(email_ids, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
max_emails, total_ingested = self._manage_data_duplication(email_infos, email_index, max_emails, total_ingested, limit)
if not max_emails:
break
# Save the state file data only if the ingestion gets successfully completed
if self._state.get('first_run', True):
self._state['first_run'] = False
return action_result.set_status(phantom.APP_SUCCESS)
def _get_trace_error_details(self, response):
self.debug_print("Response text: {}".format(response.text))
if 'json' in response.headers.get('Content-Type', ''):
try:
r_json = json.loads(response.text)
error_code = r_json["error"]["code"] or response.status_code
error_msg = r_json["error"]["message"]["value"]
error_text = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
except Exception:
error_text = "API returned an error. Status: {}, Response: {}. Please check your input parameters/configuration." \
.format(response.status_code, response.text)
self.save_progress(error_text)
return error_text
try:
soup = BeautifulSoup(response.text, "html.parser")
# Remove the script, style, footer and navigation part from the HTML message
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
error_text = soup.text
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
error_text = error_text.replace('{', '{{').replace('}', '}}')
except Exception:
error_text = "Can't process response from server. Status Code: {0} Data from server: {1}".format(
response.status_code, response.text.replace('{', '{{').replace('}', '}}'))
if len(error_text) > 500:
error_text = "Error while connecting to the server"
return error_text
def _create_filter_string(self, action_result, param):
""" This method is used to generate create filter string from the given parameters.
:param param: Parameter dictionary
:return: filter_str: Filter string
"""
sender_address = param.get('sender_address', '')
recipient_address = param.get('recipient_address', '')
status = param.get('status', '')
start_date = param.get('start_date', '')
end_date = param.get('end_date', '')
from_ip = param.get('from_ip', '')
to_ip = param.get('to_ip', '')
internet_message_id = param.get('internet_message_id', '')
message_trace_id = param.get('message_trace_id', '')
if (start_date and not end_date) or (end_date and not start_date):
return action_result.set_status(phantom.APP_ERROR, "Please specify both the 'start date' and 'end date' parameters"), {}
params = {}
if sender_address:
sender_list = list(filter(None, [x.strip() for x in sender_address.split(",")]))
params['SenderAddress'] = "'{}'".format(",".join(set(sender_list)))
if recipient_address:
recipient_list = list(filter(None, [x.strip() for x in recipient_address.split(",")]))
params['RecipientAddress'] = "'{}'".format(",".join(set(recipient_list)))
if status:
status_list = list(filter(None, [x.strip() for x in status.split(",")]))
status_list = ["" if x.lower() == "none" else x for x in status_list]
params['Status'] = "'{}'".format(",".join(set(status_list)))
if start_date:
params['StartDate'] = "datetime'{}'".format(start_date)
if end_date:
params['EndDate'] = "datetime'{}'".format(end_date)
if from_ip:
params['FromIP'] = "'{}'".format(from_ip)
if to_ip:
params['ToIP'] = "'{}'".format(to_ip)
if internet_message_id:
params['MessageId'] = "'{}'".format(internet_message_id)
if message_trace_id:
params['MessageTraceId'] = "guid'{}'".format(message_trace_id)
filter_str = ""
for key, value in params.items():
if not filter_str:
filter_str = "{} eq {}".format(key, value)
else:
filter_str = "{} and {} eq {}".format(filter_str, key, value)
return phantom.APP_SUCCESS, filter_str
def _trace_email(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val, filter_str = self._create_filter_string(action_result, param)
if phantom.is_fail(ret_val):
return action_result.get_status()
parameter = {
"$format": "Json",
"$filter": filter_str
}
self.save_progress("Query parameter: {}".format(repr(parameter)))
email_range = param.get("range")
mini, maxi = 0, EWSONPREM_MAX_END_OFFSET_VAL
if email_range:
ret_val = self._validate_range(email_range, action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
mini, maxi = (int(x) for x in email_range.split('-'))
config = self.get_config()
if phantom.APP_JSON_PASSWORD not in config:
return action_result.set_status(phantom.APP_ERROR, "Password is required for the 'trace email' action")
password = config[phantom.APP_JSON_PASSWORD]
username = config[phantom.APP_JSON_USERNAME].replace('/', '\\')
auth = HTTPBasicAuth(username, password)
trace_url = EWS_TRACE_URL
results = []
while True:
response = requests.get(trace_url, auth=auth, params=parameter, timeout=self._timeout)
if response.status_code != 200:
error_text = self._get_trace_error_details(response)
return action_result.set_status(phantom.APP_ERROR, error_text)
# format as json data
try:
r_json = json.loads(response.text)
results.extend(r_json["d"]["results"])
trace_url = r_json["d"].get("__next")
# Break if we got all the data we want or if there is no more data to fetch
if not trace_url or len(results) > maxi:
break
except Exception as e:
# Log the exception details
error_text = self._get_error_message_from_exception(e)
self.debug_print("Error while parsing response: {}".format(error_text))
# Fetch the error message from the API response
error_text = self._get_trace_error_details(response)
return action_result.set_status(phantom.APP_ERROR, error_text)
parameter = {"$format": "Json"}
if param.get('widget_filter', False):
for email_dict in results:
email_dict['MessageId'] = email_dict['MessageId'].replace('>', '').replace('<', '')
results = results[mini:maxi + 1]
action_result.add_data(results)
summary = action_result.update_summary({})
summary['emails_found'] = len(results)
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
"""Function that handles all the actions"""
# Get the action that we are supposed to carry out, set it in the connection result object
action = self.get_action_identifier()
# Initialize it to success
ret_val = phantom.APP_SUCCESS
# Bunch if if..elif to process actions
if action == self.ACTION_ID_RUN_QUERY:
ret_val = self._run_query(param)
elif action == self.ACTION_ID_DELETE_EMAIL:
ret_val = self._delete_email(param)
elif action == self.ACTION_ID_UPDATE_EMAIL:
ret_val = self._update_email(param)
elif action == self.ACTION_ID_GET_EMAIL:
ret_val = self._get_email(param)
elif action == self.ACTION_ID_COPY_EMAIL:
ret_val = self._copy_move_email(param)
elif action == self.ACTION_ID_MOVE_EMAIL:
ret_val = self._copy_move_email(param, action='move')
elif action == self.ACTION_ID_BLOCK_SENDER:
ret_val = self._mark_as_junk(param, action='block')
elif action == self.ACTION_ID_UNBLOCK_SENDER:
ret_val = self._mark_as_junk(param, action='unblock')
elif action == self.ACTION_ID_EXPAND_DL:
ret_val = self._expand_dl(param)
elif action == self.ACTION_ID_RESOLVE_NAME:
ret_val = self._resolve_name(param)
elif action == self.ACTION_ID_ON_POLL:
ret_val = self._on_poll(param)
elif action == phantom.ACTION_ID_TEST_ASSET_CONNECTIVITY:
ret_val = self._test_connectivity(param)
elif action == self.ACTION_ID_TRACE_EMAIL:
ret_val = self._trace_email(param)
return ret_val
if __name__ == '__main__':
import argparse
import sys
import pudb
pudb.set_trace()
in_json = None
in_email = None
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
argparser.add_argument('-v', '--verify', action='store_true', help='verify', required=False, default=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
verify = args.verify
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = getpass.getpass("Password: ")
if username and password:
try:
print("Accessing the Login page")
phantom_url = "{}login".format(BaseConnector._get_phantom_base_url())
r = requests.get(phantom_url, verify=verify, timeout=DEFAULT_REQUEST_TIMEOUT)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken={}'.format(csrftoken)
headers['Referer'] = phantom_url
print("Logging into Platform to get the session id")
r2 = requests.post(phantom_url, verify=verify,
data=data, headers=headers, timeout=DEFAULT_REQUEST_TIMEOUT)
session_id = r2.cookies['sessionid']
except Exception as e:
print("Unable to get session id from the platform. Error: {}".format(e))
sys.exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
connector = EWSOnPremConnector()
connector.print_progress_message = True
data = in_json.get('data')
raw_email = in_json.get('raw_email')
# if neither present then treat it as a normal action test json
if not data and not raw_email:
print(json.dumps(in_json, indent=4))
if session_id is not None:
in_json['user_session_token'] = session_id
result = connector._handle_action(json.dumps(in_json), None)
print(result)
sys.exit(0)
if data:
raw_email = data.get('raw_email')
if raw_email:
config = {
"extract_attachments": True,
"extract_domains": True,
"extract_hashes": True,
"extract_ips": True,
"extract_urls": True,
"extract_eml": True,
"add_body_to_header_artifacts": True
}
process_email = ProcessEmail()
ret_val, message = process_email.process_email(connector, raw_email, "manual_parsing", config, None)
sys.exit(0)
| splunk-soar-connectors/office365 | ewsonprem_connector.py | ewsonprem_connector.py | py | 129,772 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.sys.path.insert",
... |
7752973710 | from fastapi import FastAPI, Request, Form, HTTPException
from fastapi.templating import Jinja2Templates
from fastapi.responses import HTMLResponse, RedirectResponse, JSONResponse
from pydantic import BaseModel
import psycopg2
import datetime
from uuid import uuid4
from fastapi.responses import StreamingResponse
from io import BytesIO
from typing import Optional
import tempfile
import os
from PIL import Image
import os
from .routers import background_tasks, create_qr_codes
from dotenv import load_dotenv
from os import getenv
load_dotenv() # take environment variables from .env.
app = FastAPI()
app.include_router(background_tasks.router)
app.include_router(create_qr_codes.router)
templates_path = os.path.join(os.path.dirname(__file__), "templates")
templates = Jinja2Templates(directory=templates_path)
# Connect to the database
def connect_to_db():
use_ssl = 'localhost' not in os.getenv("DATABASE_URL")
conn = psycopg2.connect(os.getenv("DATABASE_URL"), sslmode='require' if use_ssl else None)
return conn
# Initialize the database
def init_db():
conn = connect_to_db()
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS food_items (
pk UUID PRIMARY KEY,
id UUID NOT NULL,
food VARCHAR(255) NOT NULL,
date_added DATE NOT NULL,
expiration_date DATE NOT NULL,
notes VARCHAR(255),
update_time TIMESTAMP NOT NULL,
date_consumed DATE,
location VARCHAR(255)
)
""")
conn.commit()
cursor.close()
conn.close()
init_db()
TWILIO_ACCOUNT_SID = os.environ['TWILIO_ACCOUNT_SID']
TWILIO_AUTH_TOKEN = os.environ['TWILIO_AUTH_TOKEN']
TWILIO_PHONE_NUMBER = os.environ['TWILIO_PHONE_NUMBER']
# Define the request model
class FoodItem(BaseModel):
pk: Optional[str] = None
id: Optional[str] = None
food: str
date_added: datetime.date
expiration_date: datetime.date
notes: Optional[str] = None
days_old: Optional[int] = None
days_left: Optional[int] = None
update_time: Optional[datetime.datetime] = None
date_consumed: Optional[datetime.date] = None
location: Optional[str] = None
async def get_food_items(query_string):
conn = connect_to_db()
cur = conn.cursor()
query = """
SELECT fi.pk, fi.id, fi.food, fi.date_added, fi.expiration_date, fi.notes, fi.update_time, fi.date_consumed, fi.location
FROM food_items fi
INNER JOIN (
SELECT id, MAX(update_time) AS max_update_time
FROM food_items
GROUP BY id
) AS mfi ON fi.id = mfi.id AND fi.update_time = mfi.max_update_time
WHERE fi.date_consumed IS NULL
"""
query = query + query_string
cur.execute(query)
rows = cur.fetchall()
cur.close()
conn.close()
food_items = [FoodItem(pk=row[0], days_left=(row[4] - datetime.date.today()).days, id=row[1], food=row[2], date_added=row[3], expiration_date=row[4], notes=row[5], update_time=row[6], date_consumed=row[7], location=row[8]) for row in rows]
return food_items
@app.get("/", response_class=HTMLResponse)
async def read_items(request: Request, sort_by_expiration_date: bool = False, sort_order: Optional[str] = None):
query_string = ""
if sort_by_expiration_date:
order = "ASC" if sort_order == "asc" else "DESC"
query_string = f" ORDER BY fi.expiration_date {order}"
query_string += ";"
food_items = await get_food_items(query_string)
return templates.TemplateResponse("index.html", {"request": request, "food_items": food_items})
@app.get("/favicon.ico")
def read_favicon():
raise HTTPException(status_code=204, detail="No content")
@app.get("/{item_id}/update/", response_class=HTMLResponse)
async def edit_food_item(
request: Request,
item_id: str):
food_item = {}
location_list=[]
query_string = ";"
food_items = await get_food_items(query_string)
for item in food_items:
if item.location not in location_list:
location_list.append(item.location)
if item.id == item_id:
food_item = {
"id": item.id,
"food": item.food,
"date_added": item.date_added,
"expiration_date": item.expiration_date,
"notes": item.notes,
"date_consumed": item.date_consumed,
"location": item.location
}
return templates.TemplateResponse("edit.html", {"locations": location_list, "request": request, "item": food_item})
@app.post("/{item_id}/update/")
async def update_food_item(
item_id: str,
food: str = Form(...),
expiration_date: datetime.date = Form(...),
notes: Optional[str] = Form(None),
date_consumed: Optional[datetime.date] = Form(None),
location: Optional[str] = Form(None)):
conn = connect_to_db()
cursor = conn.cursor()
# create new entry for edit so needs a new PK
item_pk = str(uuid4())
# capture time of edit
dt = datetime.datetime.now()
# get date_added from original entry and add to updated entry
cursor.execute("SELECT date_added FROM food_items WHERE id=%s", (item_id,))
date_added_row = cursor.fetchone()
date_added = date_added_row[0] if date_added_row is not None else datetime.date.today()
cursor.execute(
"INSERT INTO food_items (pk, id, food, date_added, expiration_date, notes, update_time, date_consumed, location) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",
(item_pk, item_id, food, date_added, expiration_date, notes, dt, date_consumed, location),
)
conn.commit()
cursor.close()
conn.close()
return {"success": True, "message": "Successfully updated the food item."}
@app.get("/{item_id}/view/", response_class=HTMLResponse)
async def view_food_item(request: Request, item_id: str):
conn = connect_to_db()
cursor = conn.cursor()
cursor.execute("SELECT * FROM food_items WHERE id = %s ORDER BY update_time DESC LIMIT 1", (item_id,))
item = cursor.fetchone()
cursor.close()
conn.close()
if not item:
raise HTTPException(status_code=404, detail="Food item not found")
days_old = (datetime.date.today() - item[3]).days
days_left = (item[4] - datetime.date.today()).days
food_item = FoodItem(id=item[1], food=item[2], date_added=item[3], days_old=days_old, days_left=days_left ,expiration_date=item[4], notes=item[5], date_consumed=item[6], location=item[7])
return templates.TemplateResponse("view.html", {"request": request, "item": food_item})
@app.get("/consumed_items/", response_class=HTMLResponse)
async def read_updated_items(request: Request, sort_by_expiration_date: bool = False):
conn = connect_to_db()
cur = conn.cursor()
query = """
SELECT fi.pk, fi.id, fi.food, fi.date_added, fi.expiration_date, fi.notes, fi.update_time, fi.date_consumed, fi.location
FROM food_items fi
INNER JOIN (
SELECT id, MAX(update_time) AS max_update_time
FROM food_items
GROUP BY id
) AS mfi ON fi.id = mfi.id AND fi.update_time = mfi.max_update_time
WHERE fi.date_consumed IS NOT NULL;
"""
if sort_by_expiration_date:
query += " ORDER BY fi.expiration_date"
cur.execute(query)
rows = cur.fetchall()
cur.close()
conn.close()
food_items = [FoodItem(pk=row[0], id=row[1], food=row[2], date_added=row[3], expiration_date=row[4], notes=row[5], update_time=row[6], date_consumed=row[7], location=row[8]) for row in rows]
return templates.TemplateResponse("consumed.html", {"request": request, "food_items": food_items})
@app.get("/{item_id}/")
async def handle_qr_scan(item_id: str):
conn = connect_to_db()
cursor = conn.cursor()
cursor.execute("""
SELECT * FROM food_items
WHERE id = %s
ORDER BY update_time DESC
LIMIT 1
""", (item_id,))
item = cursor.fetchone()
cursor.close()
conn.close()
if item and item[7] is None:
return RedirectResponse(url=f"/{item_id}/view/")
else:
# Add the new UUID to the database before redirecting to the update page
return RedirectResponse(url=f"/{item_id}/update/")
@app.get("/{item_id}/consumed/")
async def add_consumed_date(item_id: str):
conn = connect_to_db()
cursor = conn.cursor()
# Find the latest entry based on the "update_time" column for the passed in item.id
cursor.execute("""
SELECT * FROM food_items
WHERE id = %s
ORDER BY update_time DESC
LIMIT 1
""", (item_id,))
item = cursor.fetchone()
# create new entry for edit so needs a new PK
item_pk = str(uuid4())
if not item:
raise HTTPException(status_code=404, detail="Item not found")
# Create a new entry with the same info, but add the current time to the "update_time" column and "date_consumed" column
current_time = datetime.datetime.now()
cursor.execute(
"INSERT INTO food_items (pk, id, food, date_added, expiration_date, notes, update_time, date_consumed, location) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",
(item_pk, item_id, item[2], item[3], item[4], item[5], current_time, current_time, item[7]),
)
conn.commit()
cursor.close()
conn.close()
return RedirectResponse(url="/")
| ksindy/qrfood | qr_food_app/main.py | main.py | py | 9,400 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "routers.background_tasks.router",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name"... |
6007494490 | import json
import unittest
from datetime import date
from first_config import Config
from first_data import FirstData
from first_distance import FirstDistance
from first_pace import FirstPace
from first_plan import FirstPlan
from first_race import FirstRaceType, FirstRace
from first_runner import FirstRunner
from first_step import FirstStepBody, FirstStepRepeat
from first_time import FirstTime
from first_workout import FirstWorkout
class TestFirstPlan(unittest.TestCase):
def test_to_string(self):
ws1 = [0, 2, 5]
ws2 = [1, 3, 6]
try: # name only
p1 = FirstPlan(name='My first marathon training plan', weekly_schedule=ws1)
self.assertEqual('Training Plan:\nName - "My first marathon training plan"\nWorkout days: Mon, Wed, Sat\n',
str(p1))
file_name = 'cmp_plan1.tcx'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_string = from_file.read()
self.assertEqual(cmp_string, p1.tcx())
cmp_json = {"name": "My first marathon training plan",
"weekly_schedule": ["mon", "wed", "sat"],
"workouts": []}
self.assertEqual(cmp_json, p1.to_json())
cmp_html = ('<!DOCTYPE html>\n' +
'<html>\n' +
' <head>\n' +
' </head>\n' +
' <body>\n' +
' <h1>Training Plan: My first marathon training plan</h1>\n' +
' <div>\n' +
' <h2>\n' +
' Schedule:\n' +
' </h2>\n' +
' </div>\n' +
' </body>\n' +
'</html>')
self.assertEqual(cmp_html, p1.to_html())
except TypeError as tex:
self.fail(str(tex))
except ValueError as vex:
self.fail(str(vex))
rt1 = FirstRaceType(name='Marathon', distance=FirstDistance.from_string('42.195 km'))
rd1 = date(year=2017, month=7, day=29)
r1 = FirstRace(name='SFM', race_type=rt1, race_date=rd1)
rn1 = FirstRunner(name='DBD')
try: # all
p2 = FirstPlan(name='My first marathon training plan', weekly_schedule=ws2, race=r1, runner=rn1)
cmp_string = ('Training Plan:\nName - "My first marathon training plan"\nWorkout days: Tue, Thu, Sun\n' +
'Race:\n Name - "SFM" of type Marathon - 42.195 km\nRunner:\n Name - "DBD"\n')
self.assertEqual(cmp_string, str(p2))
except TypeError as tex:
self.fail(str(tex))
except ValueError as vex:
self.fail(str(vex))
def test_add_workout(self):
ws1 = [0, 2, 5]
rt1 = FirstRaceType(name='Marathon', distance=FirstDistance.from_string('42.195 km'))
rd1 = date(year=2017, month=7, day=29)
r1 = FirstRace(name='SFM', race_type=rt1, race_date=rd1)
rn1 = FirstRunner(name='DBD')
p1 = FirstPlan(name='My first marathon training plan', weekly_schedule=ws1, race=r1, runner=rn1)
t_warmup = FirstTime.from_string('0:15:00')
p_warmup = FirstPace.from_string('0:10:00 min per mile')
s_warmup = FirstStepBody(name='Warm up', pace=p_warmup, time=t_warmup)
s_repeat = FirstStepRepeat(name='repeat X 8', repeat=8)
d_interval = FirstDistance.from_string('400 m')
p_fast = FirstPace.from_string('0:08:00 min per mile')
s_fast = FirstStepBody(name='Fast', pace=p_fast, distance=d_interval)
s_repeat.add_step(s_fast)
s_slow = FirstStepBody(name='Rest', pace=p_warmup, distance=d_interval)
s_repeat.add_step(s_slow)
t_cooldown = FirstTime.from_string('0:10:00')
s_cooldown = FirstStepBody(name='Cool down', pace=p_warmup, time=t_cooldown)
wo = FirstWorkout(name='Week 1 Key-run 1', workout_date=date(year=2017, month=6, day=24))
wo.add_step(step=s_warmup)
wo.add_step(step=s_repeat)
wo.add_step(step=s_cooldown)
try: # first workout
p1.add_workout(workout=wo)
cmp_string = ('Training Plan:\nName - "My first marathon training plan"\n' +
'Workout days: Mon, Wed, Sat\nRace:\n' +
' Name - "SFM" of type Marathon - 42.195 km\nRunner:\n Name - "DBD"\nWorkouts:\n' +
' "Week 1 Key-run 1"\n Sat 2017-06-24\n scheduled\n' +
'Total 1 workouts\n')
self.assertEqual(cmp_string, str(p1))
file_name = 'cmp_plan2.tcx'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_string = from_file.read()
self.assertEqual(cmp_string, p1.tcx())
file_name = 'cmp_plan2.json'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_json = json.load(from_file)
self.assertEqual(cmp_json, p1.to_json())
file_name = 'cmp_plan2_km.json'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_json = json.load(from_file)
self.assertEqual(cmp_json, p1.to_json(output_unit='km'))
file_name = 'cmp_plan2.html'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_html = from_file.read()
self.assertEqual(cmp_html, p1.to_html())
file_name = 'cmp_plan2_km.html'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_html = from_file.read()
self.assertEqual(cmp_html, p1.to_html(output_unit='km'))
except TypeError as ex:
self.fail(str(ex))
def test_generate_workouts(self):
data = FirstData(json_path=Config.DATABASE_JSON)
ws1 = [0, 2, 5]
target_time = data.equivalent_time(time_from=FirstTime(minutes=30),
race_index_from=data.race_type_index_by_name('5K'),
race_index_to=data.race_type_index_by_name('Marathon'))
sf_marathon = FirstRace(race_type=data.get_race_type_by_name('Marathon'),
name='San Francisco Marathon',
race_date=date(year=2017, month=7, day=23),
target_time=target_time)
me = FirstRunner(name='Daniel BenDavid', age=56, gender='m', email='yossi@gmail.com')
p1 = FirstPlan(name='My first marathon training plan', weekly_schedule=ws1, race=sf_marathon, runner=me)
try: # positive
p1.generate_workouts(data=data)
self.assertEqual(48, len(p1.workouts))
wo = p1.workouts[0]
self.assertEqual('Week 1 Keyrun 1', wo.name)
self.assertEqual(3, len(wo.steps))
step = wo.steps[0]
self.assertEqual('warmup', step.name)
self.assertEqual(0, step.step_id)
self.assertEqual('time', step.get_duration_type())
self.assertEqual('0:15:00', str(step.time))
self.assertEqual('0:11:31 min per mile', str(step.pace))
step = wo.steps[1]
self.assertEqual('repeat X 3', step.name)
self.assertEqual(1, step.step_id)
self.assertEqual(3, step.repeat) # repeat
self.assertEqual(2, len(step.steps))
substep = step.steps[0]
self.assertEqual('1600m', substep.name)
self.assertEqual(2, substep.step_id)
self.assertEqual('distance', substep.get_duration_type())
self.assertEqual('1600.0 m', str(substep.distance))
self.assertEqual('0:09:26 min per mile', str(substep.pace))
substep = step.steps[1]
self.assertEqual('200 m@RI', substep.name)
self.assertEqual(3, substep.step_id)
self.assertEqual('distance', substep.get_duration_type())
self.assertEqual('200.0 m', str(substep.distance))
self.assertEqual('0:11:31 min per mile', str(substep.pace))
step = wo.steps[2]
self.assertEqual('cooldown', step.name)
self.assertEqual(4, step.step_id)
self.assertEqual('time', step.get_duration_type())
self.assertEqual('0:10:00', str(step.time))
self.assertEqual('0:11:31 min per mile', str(step.pace))
file_name = 'cmp_plan_marathon.tcx'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_string = from_file.read()
self.assertEqual(cmp_string, p1.tcx())
file_name = 'cmp_plan_marathon.json'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_json = json.load(from_file)
self.assertEqual(cmp_json, p1.to_json())
file_name = 'cmp_plan_marathon.html'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_html = from_file.read()
self.assertEqual(cmp_html, p1.to_html())
except ValueError as vex:
self.fail(str(vex))
except TypeError as tex:
self.fail(str(tex))
ws1 = [0, 3, 6]
target_time = data.equivalent_time(time_from=FirstTime(minutes=22, seconds=36),
race_index_from=data.race_type_index_by_name('5K'),
race_index_to=data.race_type_index_by_name('HalfMarathon'))
sf_half_marathon = FirstRace(race_type=data.get_race_type_by_name('HalfMarathon'),
name='San Francisco Marathon',
race_date=date(year=2017, month=7, day=23),
target_time=target_time)
me = FirstRunner(name='Daniel BenDavid', age=56, gender='m', email='yossi@gmail.com')
p2 = FirstPlan(name='San Francisco half-marathon training plan', weekly_schedule=ws1,
race=sf_half_marathon, runner=me)
try: # positive
p2.generate_workouts(data=data)
file_name = 'cmp_plan_half_marathon.tcx'
from_file = open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name))
cmp_string = from_file.read()
from_file.close()
self.assertEqual(cmp_string, p2.tcx())
except ValueError as vex:
self.fail(str(vex))
except TypeError as tex:
self.fail(str(tex))
if __name__ == '__main__':
unittest.main()
| bendaten/first_trainer | test/test_plan.py | test_plan.py | py | 10,937 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "first_plan.FirstPlan",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "first_config.Config.TEST_RESOURCE_DIR",
"line_number": 30,
"usage_type": "attribute"
},
{... |
10178056973 | import bitstring
import collections
import math
from array import array
import os
""" HGR = 280 * 192
C64 = 40*24 chars => 320 * 192 ; display 3min 20 (2000 images) sec instead of 3min 40 (2200)
Video = 192 * 160 (24*20)
"""
if os.name == 'nt':
IMG_PREFIX = r'c:/PORT-STC/PRIVATE/tmp'
FFMPEG = r'c:\PORT-STC\opt\ffmpeg-20181221-53d3a1c-win64-static\bin\ffmpeg' # -i bad_apple_original.mp4 -an -vf fps=10,scale=36:46 c:\port-stc\private\tmp\bad_apple%05d.png'
else:
IMG_PREFIX = '/tmp'
FFMPEG = 'ffmpeg'
DISK_SIZE = 143360
class SpecialTiles:
def __init__( self, black, white, transparent):
self.black = black
self.white = white
self.transparent = transparent
# self.others = [x for x in range(self.transparent)]
# self.others.remove( self.white)
# self.others.remove( self.black)
def all(self):
return (self.black, self.white, self.transparent)
class Stripe:
def __init__( self, data, special_tiles):
self.data = data
self._hash = hash(array('B',data).tobytes())
self.cycles = None # Number of cycles needed to decompress the stripe
self.stripe_id = None
self.compressed = self._compress_stripe2( self.data, special_tiles)
self.stripe_id2 = None
decomp = self._decompress_stripe( self.compressed, special_tiles)
assert data == decomp, "{} != {}, compressed={}".format( hex_byte(data), decomp, hex_byte(self.compressed))
#self.compressed = self._compress_stripe2( self.data, transparent_tile)
self.label = None
self.frequency = 0
def __str__(self):
return "Stripe freq:{}x {} [hash:{}]".format( self.frequency, ",".join( ['$'+format(b,'02X') for b in self.data] ) ,self._hash)
def __hash__(self):
return self._hash
def has_no_count(self):
return self.compressed[0] & 128 == 128
def _decompress_stripe( self, data, special_tiles):
self.cycles = 80
r = []
if data[0] & 128 == 128:
self.cycles += len(data) * (63+26)
#print("decompress raw bytes : {}".format(data))
r.append( data[0] & 127 )
if data[1] == 255:
return r
i = 1
while True:
r.append( data[i])
if data[i] & 128:
r[-1] = r[-1] & 127
return r
i += 1
return r
else:
self.cycles += 111
#print("decompress byte run")
cmd = data[0] >> 5
cnt = (data[0] & 31) + 1
if cmd == 0:
color = special_tiles.white
self.cycles += cnt * 19
elif cmd == 1:
color = special_tiles.black
self.cycles += cnt * 19
elif cmd == 2:
color = special_tiles.transparent
self.cycles += cnt * 19
r = []
r.extend( [color] * cnt)
r.append( data[1])
return r
def _compress_stripe2( self, values, special_tiles):
# Some calculations :
# There are 2200 frames
# There are 3760 different stripes.
# If want to know how many stripes there are in a frame at run time, I can :
# 1/ Have a map frame -> nb stripes; which optimistically is 2200 bytes
# 2/ I can count the length of the stripes until a I reach a frame, but this is colstly => I need to be able to tell the size
# of a stripe => I add a length byte => 3288 stripes need one => cost 3288 bytes
# 3/ I can add a special strip to mark the end of a frame, but if Huffman compression
# doesn't work well enough (less than 8 bits for that stripe), it might be bigger than 2200.
if len(values) <= 2 or values[0] not in special_tiles.all():
r = [v for v in values ]
r[0] = r[0] | 128
# There are two ways of marking the end of a stream of tiles (see below).
# Optimizing it this way let me spare +/- 5 kb out of 29 kb in the
# stripe dictionary.
if len(values) == 1:
r.append(255)
else:
r[-1] = r[-1] | 128 # BAsically : data byte | MARK, data_byte, ..., data_byte | MARK => I spare a counter byte.
return r
else:
# We always encode : a repetition of one tile followed by a single tile.
# I cannot use the topmost bit because it's used for stream of stripes
# (cf above)
if values[0] == special_tiles.white:
cmd = 0b000 << 5 # 0x00 = 0
elif values[0] == special_tiles.black:
cmd = 0b001 << 5 # 100000 = 0x20
elif values[0] == special_tiles.transparent:
cmd = 0b010 << 5 # 1000000 = 0x40
assert cmd & 128 == 0
# Run over black or white bytes
i = 0
while i+1 < len(values) and values[i+1] == values[0]:
i += 1
assert len(values) > 2, "shorter run should be compressed differently"
assert values[i] == values[0], "this is not a run of bytes that are all the same"
assert i in (len(values)-1, len(values)-2), "{} not in {}".format( i, (len(values)-2, len(values)-1))
assert len(values) - 1 - 1 < 2 ** 5
# -1 because the last tile is put apart, -1 because the cnt of repetition is never 0 (so we save 1 increment)
repeat_byte = cmd | (len(values) - 1 - 1)
additional_tile_byte = values[ len(values) - 1]
return [ repeat_byte, additional_tile_byte]
def ffmpeg( params):
print(params)
os.system("{} {}".format( FFMPEG, params))
def make_delta_frames_stream( frames, special_tiles, bytes_per_frame):
assert len( frames) % bytes_per_frame == 0
stats_change = []
delta_frames_stream = []
delta_frames_stream.extend( frames[0:bytes_per_frame] )
for i in range(bytes_per_frame, len( frames), bytes_per_frame):
old_f = frames[i-bytes_per_frame:i]
f = frames[i:i+bytes_per_frame]
delta_frame = []
for j in range( len( f)):
if f[j] == old_f[j]:
delta_frame.append( special_tiles.transparent)
else:
delta_frame.append( f[j])
# Compute some stats
unchanged = 0
for i in delta_frame:
if i == special_tiles.transparent:
unchanged += 1
stats_change.append(100.0 * unchanged/len(delta_frame))
delta_frames_stream.extend( delta_frame)
avg = sum(stats_change)/len(stats_change)
stddev = sum( [ math.fabs( i - avg) for i in stats_change ])/len(stats_change)
print( "unchanged avg:{}, stddev:{}".format( avg, stddev))
return delta_frames_stream
def peek( data, i, scan_value, strip_max_len):
cnt = 0
while i+cnt < len(data) and data[i+cnt] == scan_value and (cnt < strip_max_len):
cnt += 1
return cnt
def pack_line( data, i, predicate, max_len):
cnt = 0
stripe = []
while i < len(data) and predicate(data[i]) and (cnt < max_len): # and (data[i] in scan_values)
stripe.append(data[i])
i += 1
cnt += 1
return stripe, i
def pack_line_one_pixel_stop( data, i, scan_values, stop_values, max_i, strip_max_len):
""" Somehow, picking an additional, different, pixel after a long run
is really efficient (ie a 8% increase in compression).
Picking 1 more is 10% more efficient.
Picking 2 more is 30% less efficient.
Picking n more is totally not efficient (like 100% less efficient)
"""
cnt = 0
stripe = []
while i < len(data) and data[i] == scan_values and (cnt < strip_max_len) and i < max_i: # and (data[i] in scan_values)
stripe.append(data[i])
i += 1
cnt += 1
stop_value_cnt = 0
while i < len(data) and (cnt < strip_max_len) and i < max_i: # and (data[i] in scan_values)
stripe.append(data[i])
i += 1
cnt += 1
stop_value_cnt += 1
if stop_value_cnt == 1:
break
return stripe, i
def super_pack_line( data, i, scan_value, max_stripe_length):
shorter_len = max_stripe_length // 2
shorter_len = 4
bigger_len = max_stripe_length*4
if bigger_len > 31:
bigger_len = 31
# 31 : gives a few bytes better compression than 32.
assert shorter_len < bigger_len
cnt = peek( data, i, scan_value, bigger_len)
#print(cnt)
if cnt > shorter_len:
# Simple tile repetition
stripe, i = pack_line( data, i, lambda d:d == scan_value, bigger_len)
else:
others = set( range(256))
others.remove( scan_value)
stripe, i = pack_line_one_pixel_stop( data, i, scan_value, others, i+shorter_len, max_stripe_length )
#print("{} {}".format( scan_value, len(stripe)))
return stripe, i
def make_stripes(data_stream, special_tiles, bytes_per_frame, max_stripe_length):
assert len(data_stream) % bytes_per_frame == 0
all_stripes_codes = []
# others = set( range(256))
# others.remove( special_tiles.white)
# others.remove( special_tiles.black)
# others.remove( special_tiles.transparent)
for ndx in range( 0, len(data_stream), bytes_per_frame):
#print(ndx)
data = data_stream[ ndx:ndx+bytes_per_frame]
i = 0
while i < len(data):
if data[i] == special_tiles.transparent:
#print("transparent")
stripe, i = super_pack_line( data, i, special_tiles.transparent, max_stripe_length)
#stripe, i = pack_line( data, i, [transparent_tile])
elif data[i] == special_tiles.white:
#print("white")
#stripe, i = pack_line( data, i, OTHERS + [WHITE], BLACK)
#stripe, i = pack_line( data, i, WHITE, [])
#stripe, i = pack_line_one_pixel_stop( data, i, WHITE, OTHERS, i+MAX_STRIPE_LENGTH )
stripe, i = super_pack_line( data, i, special_tiles.white, max_stripe_length)
elif data[i] == special_tiles.black:
#print("black")
#stripe, i = pack_line( data, i, OTHERS + [BLACK], WHITE)
#stripe, i = pack_line( data, i, BLACK, [])
#stripe, i = pack_line_one_pixel_stop( data, i, BLACK, OTHERS, i+MAX_STRIPE_LENGTH)
stripe, i = super_pack_line( data, i, special_tiles.black, max_stripe_length)
else:
#stripe, i = pack_line( data, i, OTHERS, [BLACK[0], WHITE[0]])
stripe, i = pack_line( data, i, lambda d : d not in special_tiles.all(), 4)
all_stripes_codes.append( Stripe(stripe, special_tiles))
return all_stripes_codes
def simplify_stripes( all_stripes):
unique_stripes = dict()
stripe_id = 1
for s in all_stripes:
h = hash(s)
if h not in unique_stripes:
unique_stripes[ h] = s
s.stripe_id = stripe_id
stripe_id += 1
for i in range( len( all_stripes)):
all_stripes[i] = unique_stripes[ hash(all_stripes[i])]
return unique_stripes
def compute_stripes_frequencies( all_stripes):
ndx = 0
for s, freq in collections.Counter( all_stripes ).items():
s.frequency = freq
s.label = ndx
ndx += 1
def hex_byte(b_or_list, prefix="!byte "):
if type(b_or_list) in (list, bytes):
return prefix + ",".join( ['$'+format(b,'02X') for b in b_or_list] )
else:
return '$'+format(b_or_list,'02X')
def array_to_asm( fo, a, line_prefix, label = ""):
if type(a[0]) == str:
fmt = "{}"
elif line_prefix == '!word':
fmt = "${:04x}"
elif line_prefix == '!byte':
fmt = "${:02x}"
else:
raise Exception("Unknown format {}".format( line_prefix))
if label:
label = "\t{}:".format(label)
else:
label = ""
fo.write("{}; {} values\n".format(label, len(a)))
for i in range( 0, len( a), 10):
end = min( i + 10, len( a))
fo.write("\t{} {}\n".format( line_prefix, ", ".join( [ fmt.format(x) for x in a[i:end]])))
def stats_unique_stipes( unique_stripes):
print("{} unique stripes, stored with {} bytes, representing {} stripes".format(
len(unique_stripes),
sum( [ len(s.compressed) for s in unique_stripes]),
sum( [ s.frequency for s in unique_stripes])))
f1 = [s for s in filter( lambda s:s.frequency == 1, sorted( unique_stripes, key=lambda s:s.frequency)) ]
f1_size = sum( [ len(s.data) for s in f1 ])
print( "{} frequency-1 stripes, totalling {} bytes. Other stripes + index table = {} bytes => total {} bytes".format(
len(f1), f1_size,
sum( [ len(s.compressed) for s in unique_stripes]) - f1_size + 2*(len(unique_stripes) - len(f1)),
f1_size + sum( [ len(s.compressed) for s in unique_stripes]) - f1_size + 2*(len(unique_stripes) - len(f1))))
f2 = [s for s in filter( lambda s:s.frequency == 2, sorted( unique_stripes, key=lambda s:s.frequency)) ]
print( "{} frequency-2 stripes, totalling {} bytes".format( len(f2), sum( [ len(s.data) for s in f2 ])))
# with open("stats.csv","w") as fo:
# for s in sorted( unique_stripes, key=lambda s:s.frequency * 100000 + len(s.compressed)):
# fo.write("{};{};{};{};\"{}\"\n".format( s.frequency, len(s.compressed), len(s.data), s.has_no_count(), (hex_byte(s.data))))
def simple_huffman( unique_stripes, all_stripes):
sid = 1
# Sort stripes, most frequent first
for s in sorted( unique_stripes.values(), key=lambda s:s.frequency, reverse=True):
s.stripe_id2 = sid
sid += 1
# for s in all_stripes[0:100]:
# print("({},{})".format( s.stripe_id, s.stripe_id2 ))
stream = bitstring.BitArray()
d1 = (2 ** 3)
d2 = (2 ** 6) + d1
d3 = (2 ** 9) + d2
d1_count = d2_count = d3_count = d4_count = 0
d1_len = d2_len = d3_len = d4_len = 0
ndx = 0
warn = False
for s in all_stripes:
sid = s.stripe_id2 - 1
if sid < d1:
# 0xxxb => 8 values
bits = bitstring.BitArray(length=4, uint=sid)
d1_count += 1
d1_len += len(s.compressed)
elif d1 <= sid < d2:
# 10yy yyyy => 64 values
bits = bitstring.BitArray(length=8, uint=0b10000000 + sid - d1)
d2_count += 1
d2_len += len(s.compressed)
elif d2 <= sid < d3:
# 110z zzzz zzzz 12 bits, 9 significant => 512 values
bits = bitstring.BitArray(length=12, uint=0b110000000000 + sid - d2)
d3_count += 1
d3_len += len(s.compressed)
elif d3 <= sid < 2 ** 13:
# 111z zzzz zzzz zzzz 16 bits, 13 significant => 8192 values
bits = bitstring.BitArray(length=16, uint=0b1110000000000000 + sid - d3)
d4_count += 1
d4_len += len(s.compressed)
else:
# Error !
warn = True
bits = bitstring.BitArray(length=16, uint=0b1111111111111111)
# if ndx < 300:
# print("s# {} (b: {}) -> {} / {}".format( hex(ndx), len(stream.tobytes()), hex(bits.uint), sid))
stream.append( bits)
ndx += 1
if warn:
print("Too many stripes for the compressor ! (8192) {}".format( len(unique_stripes)))
print("{} * 4 bits for {} bytes, {} * 8 bits for {} bytes, {} * 12 bits for {} bytes, {} * 16 bits for {} bytes".format(d1_count,d1_len,d2_count,d2_len,d3_count,d3_len,d4_count,d4_len))
b = stream.tobytes()
print("Bit stream simple huffman : {} stripes, {} bits, {} bytes".format( len( all_stripes), len( stream), len(b)))
with open("cstripes.data","bw") as fo:
fo.write( b)
# # Allow some wrapping so that the ASM code is simpler
# extra_bytes = 3
# too_much = len(b) - DISK_SIZE
# MAX = 4096
# if too_much <= 0:
# too_much = MAX
# if too_much > MAX:
# too_much = MAX
# with open("compressed.a","w") as fo:
# array_to_asm( fo, b[0:too_much + extra_bytes], '!byte')
# with open("cstripes.dsk","bw") as fo:
# fo.write( disk_2_dos( b[too_much:]))
print("Some stripes:")
for i in range(20):
print( '{:04} '.format(i*16) + ' '.join([ "${:04x}".format(s.stripe_id2 - 1) for s in all_stripes[i*16:(i+1)*16]]))
return
# Test decompression
#print( hex_word([s.stripe_id2 for s in all_stripes[0:500]]))
#print( hex_byte( stream.tobytes()[0:1000]))
decomp_stream = []
max_l = len( stream)
ndx = 0
while ndx < max_l:
half_byte = stream[ndx:ndx+4].uint
if half_byte & 0b1000 == 0:
s = half_byte
elif half_byte & 0b1100 == 0b1000:
s = (half_byte & 0b0011)
ndx += 4
s = (s << 4) + stream[ndx:ndx+4].uint
s += d1
elif half_byte & 0b1110 == 0b1100:
s = (half_byte & 0b0001)
#print( hex(s))
ndx += 4
s = (s << 4) + stream[ndx:ndx+4].uint
#print( hex(s))
ndx += 4
s = (s << 4) + stream[ndx:ndx+4].uint
#print( hex(s))
s += d2
#print( hex(d2))
#print( hex(s))
elif half_byte & 0b1110 == 0b1110:
s = (half_byte & 0b0001)
ndx += 4
s = (s << 4) + stream[ndx:ndx+4].uint
ndx += 4
s = (s << 4) + stream[ndx:ndx+4].uint
ndx += 4
s = (s << 4) + stream[ndx:ndx+4].uint
s += d3
decomp_stream.append(s)
ndx += 4
a = [s.stripe_id2 for s in all_stripes]
b = decomp_stream
for i in range( len(a)):
if a[i] != b[i]:
print(i)
def unique_stripes_to_asm( fo, unique_stripes):
def stripe_id(stripe):
return stripe.stripe_id2
sorted_stripes = sorted( unique_stripes.values(), key=stripe_id)
fo.write('\n')
for s in sorted_stripes:
fo.write("stripe{}\t{}\t; [${:X}] {}\n".format( stripe_id(s), hex_byte(s.compressed), stripe_id(s) - 1, hex_byte(s.data, '')))
fo.write('stripes_indices:\n')
array_to_asm( fo, ["stripe{}".format( stripe_id(s)) for s in sorted_stripes], "!word")
def stripes_to_disk( stripes):
disk = bytearray()
for s in stripes[0:min( (len(stripes) // 2) - 1, (DISK_SIZE//2) - 1)]:
sid = (s.stripe_id2 - 1) * 2
assert sid < 65536
disk.append( sid & 0xFF)
disk.append( sid >> 8)
disk.append( 0xFF)
disk.append( 0xFF)
#disk.extend( bytearray( 143360 - len(disk)))
with open("stripes.dsk","bw") as fo:
fo.write( disk_2_dos( disk))
def disk_2_dos( disk):
disk = bytearray( disk)
dos_sector= [0x0, 0xd, 0xb, 0x9, 0x7, 0x5, 0x3, 0x1,
0xe, 0xc, 0xa, 0x8, 0x6, 0x4, 0x2, 0xf]
prodos_sector = [0x0, 0x8, 0x1, 0x9, 0x2, 0xa, 0x3, 0xb,
0x4, 0xc, 0x5, 0xd, 0x6, 0xe, 0x7, 0xf]
# Dos order : https://en.wikipedia.org/wiki/Apple_DOS
dos_sector= [0x0, 0x7, 0xe, 0x6, 0xd, 0x5, 0xc, 0x4,
0xb, 0x3, 0xa, 0x2, 0x9, 0x1, 0x8, 0xf]
if len(disk) > DISK_SIZE:
print("Disk image too big by {} bytes, truncating...".format(len(disk) - DISK_SIZE))
disk = disk[0:DISK_SIZE]
elif len(disk) < DISK_SIZE:
print("Disk image too small ({}), extending to disk size...".format(len(disk)))
disk.extend( bytearray( DISK_SIZE - len(disk)))
else:
print("disk_2_dos : putting {} bytes on a disk of {}".format(len(disk), DISK_SIZE))
# dos_sector = list( range( 16))
disk_dos = bytearray( DISK_SIZE)
for track in range(35):
for sector in range(16):
track_offset = track * 16 * 256
dos_ofs = track_offset + dos_sector[sector]*256
dsk_ofs = track_offset + sector*256
disk_dos[ dos_ofs:dos_ofs+256] = disk[ dsk_ofs:dsk_ofs+256] # [sector for i in range(256)]
return disk_dos
| wiz21b/badapple | utils.py | utils.py | py | 20,203 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "os.name",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "array.array",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "math.fabs",
"line_number": 19... |
23216322875 | """Add season_bet table
Revision ID: 8076a0692fc3
Revises:
Create Date: 2023-03-12 10:53:35.538988
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8076a0692fc3'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('season_bet',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.String(length=50), nullable=True),
sa.Column('rank', sa.Integer(), nullable=True),
sa.Column('type', sa.String(length=50), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('season_bet')
# ### end Alembic commands ###
| rbikar/f1-guessing-game-app | migrations/versions/8076a0692fc3_add_season_bet_table.py | 8076a0692fc3_add_season_bet_table.py | py | 962 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.