max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
src/bookmaker/__init__.py | fossabot/BookMaker | 0 | 6620851 | <reponame>fossabot/BookMaker<filename>src/bookmaker/__init__.py<gh_stars>0
"""Python package using PEP 517 hooks
"""
import sys
import os
import subprocess
from pathlib import Path # if you haven't already done so
file = Path(__file__).resolve()
try:
sys.path.append(str(file.parents[1]))
print(f'Allow the import to look in {str(file.parents[1])}')
from src.bookmaker.about import VERSION as __version__
except ModuleNotFoundError:
sys.path.append(str(file.parents[2]))
print(f'Failed; it needs to look in {str(file.parents[2])}')
from bookmaker.about import VERSION as __version__
class my_class():
def main(self):
prefix = os.path.dirname(os.path.realpath(__file__))
python = os.path.join(prefix, 'venv/bin/python')
module = os.path.join(prefix, 'BookMaker.py')
print(f'Running subprocess {python} {module}')
completed = subprocess.run([python, module])
# print('returncode:', completed.returncode)
def main():
instance = my_class()
instance.main()
# Run if called directly
if __name__ == '__main__':
main()
| """Python package using PEP 517 hooks
"""
import sys
import os
import subprocess
from pathlib import Path # if you haven't already done so
file = Path(__file__).resolve()
try:
sys.path.append(str(file.parents[1]))
print(f'Allow the import to look in {str(file.parents[1])}')
from src.bookmaker.about import VERSION as __version__
except ModuleNotFoundError:
sys.path.append(str(file.parents[2]))
print(f'Failed; it needs to look in {str(file.parents[2])}')
from bookmaker.about import VERSION as __version__
class my_class():
def main(self):
prefix = os.path.dirname(os.path.realpath(__file__))
python = os.path.join(prefix, 'venv/bin/python')
module = os.path.join(prefix, 'BookMaker.py')
print(f'Running subprocess {python} {module}')
completed = subprocess.run([python, module])
# print('returncode:', completed.returncode)
def main():
instance = my_class()
instance.main()
# Run if called directly
if __name__ == '__main__':
main() | en | 0.702258 | Python package using PEP 517 hooks # if you haven't already done so # print('returncode:', completed.returncode) # Run if called directly | 2.363304 | 2 |
src/test.py | dhruvramani/Space-Debris | 0 | 6620852 | <filename>src/test.py<gh_stars>0
import os
import gc
import torch
import argparse
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from models import *
from dataset import SpaceDataset
from utils import progress_bar
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('==> Creating network..')
net = SpaceLSTM()
net = net.to(device)
def test():
global net
net.load_state_dict(torch.load('../save/network.ckpt'))
vdataset = SpaceDataset('/home/nevronas/dataset/', download=False)
dataloader = DataLoader(vdataset, batch_size=1)
sequences, predictions = next(iter(dataloader))
out = net(sequences)
out = out[0].detach().cpu().numpy()
sequences = sequences[0].cpu().numpy()
'''
matplotlib.image.imsave('../save/plots/input/sequences.png', sequences[0])
matplotlib.image.imsave('../save/plots/output/stylized_sequences.png', out[0])
aud_res = reconstruction(sequences[0], phase)
out_res = reconstruction(out[0], phase[:, :-3])
librosa.output.write_wav("../save/plots/input/raw_sequences.wav", aud_res, fs)
librosa.output.write_wav("../save/plots/output/raw_output.wav", out_res, fs)
'''
print("Testing Finished") | <filename>src/test.py<gh_stars>0
import os
import gc
import torch
import argparse
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from models import *
from dataset import SpaceDataset
from utils import progress_bar
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('==> Creating network..')
net = SpaceLSTM()
net = net.to(device)
def test():
global net
net.load_state_dict(torch.load('../save/network.ckpt'))
vdataset = SpaceDataset('/home/nevronas/dataset/', download=False)
dataloader = DataLoader(vdataset, batch_size=1)
sequences, predictions = next(iter(dataloader))
out = net(sequences)
out = out[0].detach().cpu().numpy()
sequences = sequences[0].cpu().numpy()
'''
matplotlib.image.imsave('../save/plots/input/sequences.png', sequences[0])
matplotlib.image.imsave('../save/plots/output/stylized_sequences.png', out[0])
aud_res = reconstruction(sequences[0], phase)
out_res = reconstruction(out[0], phase[:, :-3])
librosa.output.write_wav("../save/plots/input/raw_sequences.wav", aud_res, fs)
librosa.output.write_wav("../save/plots/output/raw_output.wav", out_res, fs)
'''
print("Testing Finished") | en | 0.451212 | matplotlib.image.imsave('../save/plots/input/sequences.png', sequences[0]) matplotlib.image.imsave('../save/plots/output/stylized_sequences.png', out[0]) aud_res = reconstruction(sequences[0], phase) out_res = reconstruction(out[0], phase[:, :-3]) librosa.output.write_wav("../save/plots/input/raw_sequences.wav", aud_res, fs) librosa.output.write_wav("../save/plots/output/raw_output.wav", out_res, fs) | 2.173452 | 2 |
{{ cookiecutter.repository_name }}/{{ cookiecutter.project_name }}/auth/resources.py | cyrilrbt/cookiecutter-flask-mongorest | 1 | 6620853 | from flask_mongorest.resources import Resource
from {{ cookiecutter.project_name }}.auth.documents import User, UserSchema
from {{ cookiecutter.project_name }}.auth import jwt_for
class SimpleUserResource(Resource):
document = User
fields = ['id', 'email']
class RegistrationUserResource(Resource):
document = User
fields = ['id', 'email', 'token']
def token(self, obj):
return jwt_for(obj)
class UserResource(Resource):
document = User
schema = UserSchema
fields = ['id', 'email', 'roles']
allowed_ordering = ['email']
| from flask_mongorest.resources import Resource
from {{ cookiecutter.project_name }}.auth.documents import User, UserSchema
from {{ cookiecutter.project_name }}.auth import jwt_for
class SimpleUserResource(Resource):
document = User
fields = ['id', 'email']
class RegistrationUserResource(Resource):
document = User
fields = ['id', 'email', 'token']
def token(self, obj):
return jwt_for(obj)
class UserResource(Resource):
document = User
schema = UserSchema
fields = ['id', 'email', 'roles']
allowed_ordering = ['email']
| none | 1 | 2.264137 | 2 | |
src/api_ai.py | coders-creed/botathon | 1 | 6620854 | <reponame>coders-creed/botathon
# -*- coding: utf-8 -*-
# @Author: karthik
# @Date: 2016-12-11 08:41:49
# @Last Modified by: karthik
# @Last Modified time: 2016-12-11 09:02:22
from api.ai import Agent
DEVELOPER_ACCESS_TOKEN = "3f4bc676dfb04ecba1d42f3f8a6ffe54"
ai = agent = Agent(
'',
'',
DEVELOPER_ACCESS_TOKEN,
)
response = agent.query("Hello there")
print response | # -*- coding: utf-8 -*-
# @Author: karthik
# @Date: 2016-12-11 08:41:49
# @Last Modified by: karthik
# @Last Modified time: 2016-12-11 09:02:22
from api.ai import Agent
DEVELOPER_ACCESS_TOKEN = "3f4bc676dfb04ecba1d42f3f8a6ffe54"
ai = agent = Agent(
'',
'',
DEVELOPER_ACCESS_TOKEN,
)
response = agent.query("Hello there")
print response | en | 0.653183 | # -*- coding: utf-8 -*- # @Author: karthik # @Date: 2016-12-11 08:41:49 # @Last Modified by: karthik # @Last Modified time: 2016-12-11 09:02:22 | 1.934541 | 2 |
icbd/compiler/benchmarks/modified/hax.py | kmod/icbd | 7 | 6620855 | <reponame>kmod/icbd<filename>icbd/compiler/benchmarks/modified/hax.py
fmin = min
max = max
min = min
abs = abs
def ftoi(d):
return int(d)
def itof(n):
return 1.0 * n
| fmin = min
max = max
min = min
abs = abs
def ftoi(d):
return int(d)
def itof(n):
return 1.0 * n | none | 1 | 2.383971 | 2 | |
core/config.py | DCZYewen/NullDCZHFS | 1 | 6620856 | import os
import json
conf_path = "config.json"
base_config = {
"server": {
"request_timeout": 10,
"daemon": True,
"loop_debug": False,
"handler": {"*": ["core.urls"]}
},
"http": {
"host": "",
"port": 80,
"is_enable": True,
"rewrite_only": False
},
"https": {
"host": "",
"port": 443,
"is_enable": False,
"support_ciphers": "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128",
"cert_path": "",
"key_path": ""
},
"database": {
"database_url": "sqlite:///database.db",
"use_memcached": False,
"memcached_url": "",
"debug": False
},
"logger": {
"level": 20,
"formatter": "$(asctime)s [$(levelname)s]:$(message)s",
"time_format": "$Y/$m/$d $H:$M:$S",
"save_log": True,
"save_path": "log/"
},
"template": {
"template_path": "template/",
"use_fs_cache": True,
"cache_path": "__pycache__/"
}
}
class JsonConfigParser:
def __init__(self, config: dict):
self.config = config
def _dict_sync(self, source: dict, target: dict):
for k, v in source.items():
if isinstance(v, dict):
self._dict_sync(v, target[k])
else:
target[k] = v
def update(self, path):
if not os.path.exists(path):
raise FileNotFoundError
with open(path, "r") as raw:
data = raw.read()
try:
self._dict_sync(
json.loads(data),
base_config
)
except json.decoder.JSONDecodeError as e:
print("Error: ConfigFile is not load")
print("reason:", e)
exit(0)
def get(self, segment: str, block=None):
if segment in self.config:
result = self.config[segment]
if not block:
return result
elif block in result:
return result[block]
raise KeyError(f"block {block} is not exist")
raise KeyError(f"segment {segment} is not exist")
def sets(self, segment: str, data: dict):
if segment not in self.config:
raise KeyError(f"segment {segment} is not exist")
for k, v in data.items():
self.config[segment][k] = v
def set(self, segment: str, block, data):
if segment in self.config:
self.config[segment][block] = data
else:
raise KeyError(f"block {block} is not exist")
def save(self, path=conf_path):
with open(path, "w") as f:
f.write(
json.dumps(self.config, indent=2)
)
conf = JsonConfigParser(base_config)
if not os.path.exists(conf_path):
print(f"Warning: {conf_path} not found, regenerating...")
conf.save()
else:
conf.update(conf_path)
| import os
import json
conf_path = "config.json"
base_config = {
"server": {
"request_timeout": 10,
"daemon": True,
"loop_debug": False,
"handler": {"*": ["core.urls"]}
},
"http": {
"host": "",
"port": 80,
"is_enable": True,
"rewrite_only": False
},
"https": {
"host": "",
"port": 443,
"is_enable": False,
"support_ciphers": "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128",
"cert_path": "",
"key_path": ""
},
"database": {
"database_url": "sqlite:///database.db",
"use_memcached": False,
"memcached_url": "",
"debug": False
},
"logger": {
"level": 20,
"formatter": "$(asctime)s [$(levelname)s]:$(message)s",
"time_format": "$Y/$m/$d $H:$M:$S",
"save_log": True,
"save_path": "log/"
},
"template": {
"template_path": "template/",
"use_fs_cache": True,
"cache_path": "__pycache__/"
}
}
class JsonConfigParser:
def __init__(self, config: dict):
self.config = config
def _dict_sync(self, source: dict, target: dict):
for k, v in source.items():
if isinstance(v, dict):
self._dict_sync(v, target[k])
else:
target[k] = v
def update(self, path):
if not os.path.exists(path):
raise FileNotFoundError
with open(path, "r") as raw:
data = raw.read()
try:
self._dict_sync(
json.loads(data),
base_config
)
except json.decoder.JSONDecodeError as e:
print("Error: ConfigFile is not load")
print("reason:", e)
exit(0)
def get(self, segment: str, block=None):
if segment in self.config:
result = self.config[segment]
if not block:
return result
elif block in result:
return result[block]
raise KeyError(f"block {block} is not exist")
raise KeyError(f"segment {segment} is not exist")
def sets(self, segment: str, data: dict):
if segment not in self.config:
raise KeyError(f"segment {segment} is not exist")
for k, v in data.items():
self.config[segment][k] = v
def set(self, segment: str, block, data):
if segment in self.config:
self.config[segment][block] = data
else:
raise KeyError(f"block {block} is not exist")
def save(self, path=conf_path):
with open(path, "w") as f:
f.write(
json.dumps(self.config, indent=2)
)
conf = JsonConfigParser(base_config)
if not os.path.exists(conf_path):
print(f"Warning: {conf_path} not found, regenerating...")
conf.save()
else:
conf.update(conf_path)
| none | 1 | 2.236655 | 2 | |
src/clearskies/column_types/string_test.py | cmancone/clearskies | 4 | 6620857 | import unittest
from .string import String
class StringTest(unittest.TestCase):
def test_is_allowed_operator(self):
string = String()
for operator in ['=', 'LIKE']:
self.assertTrue(string.is_allowed_operator(operator))
for operator in ['==', '<=>']:
self.assertFalse(string.is_allowed_operator(operator))
def test_build_condition(self):
string = String()
string.configure('name', {}, int)
self.assertEquals('name=sup', string.build_condition('sup', operator='='))
self.assertEquals("name LIKE '%asdf%'", string.build_condition('asdf', operator='like'))
self.assertEquals("name LIKE '%asdf%'", string.build_condition('asdf'))
def test_check_search_value(self):
string = String()
self.assertEquals('', string.check_search_value('sup'))
self.assertEquals('value should be a string', string.check_search_value(10))
| import unittest
from .string import String
class StringTest(unittest.TestCase):
def test_is_allowed_operator(self):
string = String()
for operator in ['=', 'LIKE']:
self.assertTrue(string.is_allowed_operator(operator))
for operator in ['==', '<=>']:
self.assertFalse(string.is_allowed_operator(operator))
def test_build_condition(self):
string = String()
string.configure('name', {}, int)
self.assertEquals('name=sup', string.build_condition('sup', operator='='))
self.assertEquals("name LIKE '%asdf%'", string.build_condition('asdf', operator='like'))
self.assertEquals("name LIKE '%asdf%'", string.build_condition('asdf'))
def test_check_search_value(self):
string = String()
self.assertEquals('', string.check_search_value('sup'))
self.assertEquals('value should be a string', string.check_search_value(10))
| none | 1 | 3.70321 | 4 | |
remimi/sensors/paseudo_camera.py | xiong-jie-y/remimi | 23 | 6620858 | import os
import glob
from remimi.monodepth.ken3d.depthestim import Ken3DDepthEstimator
from remimi.monodepth.dpt import DPTDepthEstimator
# from remimi.monodepth.adabin import InferenceHelper
from remimi.segmentation.rgb_segmentation import SemanticSegmenter
import torch
import cv2
import argparse
import numpy as np
import util.io
from torchvision.transforms import Compose
from dpt.models import DPTDepthModel
from dpt.midas_net import MidasNet_large
from dpt.transforms import Resize, NormalizeImage, PrepareForNet
from util.misc import visualize_attention
import enum
class ImageType(enum.Enum):
RGB = "rgb"
BGR = "bgr"
class DPTPaseudoDepthCamera:
def __init__(self, sensor, depth_estimator, model_name: str, debug=False, output_type=ImageType.RGB, boundary_depth_removal=False):
# if model_name == "dpt":
# self.depth_estimator = DPTDepthEstimator(debug)
# elif model_name == "ken3d":
# self.depth_estimator = Ken3DDepthEstimator(debug=debug)
self.depth_estimator = depth_estimator
# self.depth_estimator = InferenceHelper()
self.sensor = sensor
self.output_type = output_type
self.boundary_depth_removal = boundary_depth_removal
# self.semantic_segmentater = semantic_segmenter
def get_color_and_depth(self):
color = self.sensor.get_color()
# color = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = util.io.read_image(img_name)
depth = self.depth_estimator.estimate_depth(color)
# import IPython; IPython.embed()
if self.boundary_depth_removal:
color2 = self.semantic_segmentater.convert_to_semantic_image(color)
color_rgb = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)
cv2.imshow("semantic", color2)
# color2 = np.copy(smimg)
# show_image_blocking(color_rgb)
color2 = cv2.cvtColor(color2, cv2.COLOR_RGB2GRAY)
color2[color2 > 0] = 255
# show_image_blocking(color2, cmap = "gray")
canny_img = cv2.Canny(color2, 50, 110)
kernel = np.ones((5,5),np.uint8)
canny_img = cv2.dilate(canny_img,kernel,iterations = 1)
cv2.imshow("test2222", canny_img)
depth[canny_img == 255] = 0
color = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)
if self.output_type == ImageType.BGR:
return color, depth
elif self.output_type == ImageType.RGB:
return color, depth
else:
raise RuntimeError(f"no such image type {self.output_type}.") | import os
import glob
from remimi.monodepth.ken3d.depthestim import Ken3DDepthEstimator
from remimi.monodepth.dpt import DPTDepthEstimator
# from remimi.monodepth.adabin import InferenceHelper
from remimi.segmentation.rgb_segmentation import SemanticSegmenter
import torch
import cv2
import argparse
import numpy as np
import util.io
from torchvision.transforms import Compose
from dpt.models import DPTDepthModel
from dpt.midas_net import MidasNet_large
from dpt.transforms import Resize, NormalizeImage, PrepareForNet
from util.misc import visualize_attention
import enum
class ImageType(enum.Enum):
RGB = "rgb"
BGR = "bgr"
class DPTPaseudoDepthCamera:
def __init__(self, sensor, depth_estimator, model_name: str, debug=False, output_type=ImageType.RGB, boundary_depth_removal=False):
# if model_name == "dpt":
# self.depth_estimator = DPTDepthEstimator(debug)
# elif model_name == "ken3d":
# self.depth_estimator = Ken3DDepthEstimator(debug=debug)
self.depth_estimator = depth_estimator
# self.depth_estimator = InferenceHelper()
self.sensor = sensor
self.output_type = output_type
self.boundary_depth_removal = boundary_depth_removal
# self.semantic_segmentater = semantic_segmenter
def get_color_and_depth(self):
color = self.sensor.get_color()
# color = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = util.io.read_image(img_name)
depth = self.depth_estimator.estimate_depth(color)
# import IPython; IPython.embed()
if self.boundary_depth_removal:
color2 = self.semantic_segmentater.convert_to_semantic_image(color)
color_rgb = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)
cv2.imshow("semantic", color2)
# color2 = np.copy(smimg)
# show_image_blocking(color_rgb)
color2 = cv2.cvtColor(color2, cv2.COLOR_RGB2GRAY)
color2[color2 > 0] = 255
# show_image_blocking(color2, cmap = "gray")
canny_img = cv2.Canny(color2, 50, 110)
kernel = np.ones((5,5),np.uint8)
canny_img = cv2.dilate(canny_img,kernel,iterations = 1)
cv2.imshow("test2222", canny_img)
depth[canny_img == 255] = 0
color = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)
if self.output_type == ImageType.BGR:
return color, depth
elif self.output_type == ImageType.RGB:
return color, depth
else:
raise RuntimeError(f"no such image type {self.output_type}.") | en | 0.263209 | # from remimi.monodepth.adabin import InferenceHelper # if model_name == "dpt": # self.depth_estimator = DPTDepthEstimator(debug) # elif model_name == "ken3d": # self.depth_estimator = Ken3DDepthEstimator(debug=debug) # self.depth_estimator = InferenceHelper() # self.semantic_segmentater = semantic_segmenter # color = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # img = util.io.read_image(img_name) # import IPython; IPython.embed() # color2 = np.copy(smimg) # show_image_blocking(color_rgb) # show_image_blocking(color2, cmap = "gray") | 2.125669 | 2 |
2015/08_01/nips15.py | pschulam/Notebook | 0 | 6620859 | <filename>2015/08_01/nips15.py
import numpy as np
import os
from scipy.stats import multivariate_normal
from scipy.misc import logsumexp
from mypy.bsplines import universal_basis
from mypy.models import softmax
from mypy.util import as_row, as_col
class NipsModel:
def __init__(self, b, B, W, basis_param, kernel_param):
self.b = b
self.B = B
self.W = W
self.k = B.shape[0]
self.basis_param = basis_param
self.basis = universal_basis(*self.basis_param.values())
self.kernel_param = kernel_param
@classmethod
def from_directory(cls, directory):
param_files = ['basis.dat', 'kernel.dat', 'pop.dat', 'subpop.dat', 'marginal.dat']
param_paths = [os.path.join(directory, f) for f in param_files]
basis = np.loadtxt(param_paths[0])
bparam = BasisParam(tuple(basis[:2]), basis[2], basis[3])
kernel = np.loadtxt(param_paths[1])
kparam = KernelParam(*tuple(kernel))
b = np.loadtxt(param_paths[2])
B = np.loadtxt(param_paths[3])
W = np.loadtxt(param_paths[4])
W = np.r_[ np.zeros((1, W.shape[1])), W ]
return cls(b, B, W, bparam, kparam)
@property
def num_subtypes(self):
return self.k
def phi1(self, x):
return np.ones((x.size, 1))
def phi2(self, x):
return self.basis.eval(x)
def covariance(self, x1, x2=None):
return kernel(x1, x2, *self.kernel_param.values())
def trajectory_means(self, t, x):
from numpy import dot
b, B = self.b, self.B
P1 = self.phi1(t)
P2 = self.phi2(t)
m1 = dot(P1, dot(b, x)).ravel()
m2 = dot(B, P2.T)
return m1 + m2
def trajectory_logl(self, t, x, y, z):
if t.size < 1:
return 0.0
m = self.trajectory_means(t, x)[z]
S = self.covariance(t)
return multivariate_normal.logpdf(y, m, S)
def prior(self, t, x1, x2, y):
return softmax.regression_log_proba(x2, self.W)
def likelihood(self, t, x1, x2, y):
subtypes = range(self.k)
return np.array([self.trajectory_logl(t, x1, y, z) for z in subtypes])
def joint(self, t, x1, x2, y):
prior = self.prior(t, x1, x2, y)
likel = self.likelihood(t, x1, x2, y)
return prior + likel
def posterior(self, t, x1, x2, y):
if len(t) == 0:
return np.exp(self.prior(t, x1, x2, y))
else:
j = self.joint(t, x1, x2, y)
return np.exp(j - logsumexp(j))
def evidence(self, t, x1, x2, y):
j = self.joint(t, x1, x2, y)
return logsumexp(j)
def predict(self, tnew, t, x1, x2, y):
if len(t) == 0:
Y = trajectory_means(tnew, x1)
K = self.covariance(tnew)
else:
R = y - trajectory_means(t, x1)
Y = trajectory_means(tnew, x1)
K = None
for i, r in enumerate(R):
yhat, Khat = _gp_posterior(tnew, t, r, self.covariance)
Y[i] += yhat
K = Khat
return Y, K
class BasisParam:
def __init__(self, boundaries, degree, num_features):
self.boundaries = boundaries
self.degree = degree
self.num_features = num_features
def values(self):
return self.boundaries, self.degree, self.num_features
class KernelParam:
def __init__(self, a_const=1.0, a_ou=1.0, l_ou=1.0, a_noise=1.0):
self.a_const = a_const
self.a_ou = a_ou
self.l_ou = l_ou
self.a_noise = a_noise
def values(self):
return self.a_const, self.a_ou, self.l_ou, self.a_noise
class PatientData:
def __init__(self, ptid, t, y, x1, x2):
self.ptid = ptid
self.t = np.array([]) if np.all(np.isnan(t)) else t.copy()
self.y = np.array([]) if np.all(np.isnan(y)) else y.copy()
self.x1 = x1
self.x2 = x2
@classmethod
def from_tbl(cls, tbl, t, y, x1, x2):
pd = {}
pd['ptid'] = int(tbl['ptid'].values[0])
pd['t'] = tbl[t].values
pd['y'] = tbl[y].values
pd['x1'] = np.asarray(tbl.loc[:, x1].drop_duplicates()).ravel()
pd['x2'] = np.asarray(tbl.loc[:, x2].drop_duplicates()).ravel()
pd['x2'] = np.r_[1.0, pd['x2']]
return cls(**pd)
def unpack(self):
return self.t, self.x1, self.x2, self.y
def truncate(self, censor_time, after=False):
if after:
obs = self.t > censor_time
else:
obs = self.t <= censor_time
return self.__class__(self.ptid, self.t[obs], self.y[obs], self.x1, self.x2)
def kernel(x1, x2=None, a_const=1.0, a_ou=1.0, l_ou=1.0, a_noise=1.0):
symmetric = x2 is None
d = _differences(x1, x1) if symmetric else _differences(x1, x2)
K = a_const * np.ones_like(d)
K += _ou_kernel(d, a_ou, l_ou)
if symmetric:
K += a_noise * np.eye(x1.size)
return K
def _ou_kernel(d, a, l):
return a * np.exp( - np.abs(d) / l )
def _differences(x1, x2):
return as_col(x1) - as_row(x2)
def _gp_posterior(tnew, t, y, kern):
from numpy import dot
from scipy.linalg import inv, solve
K11 = kern(tnew)
K12 = kern(tnew, t)
K22 = kern(t)
m = dot(K12, solve(K22, y))
K = K11 - dot(K12, solve(K22, K12.T))
return m, K
| <filename>2015/08_01/nips15.py
import numpy as np
import os
from scipy.stats import multivariate_normal
from scipy.misc import logsumexp
from mypy.bsplines import universal_basis
from mypy.models import softmax
from mypy.util import as_row, as_col
class NipsModel:
def __init__(self, b, B, W, basis_param, kernel_param):
self.b = b
self.B = B
self.W = W
self.k = B.shape[0]
self.basis_param = basis_param
self.basis = universal_basis(*self.basis_param.values())
self.kernel_param = kernel_param
@classmethod
def from_directory(cls, directory):
param_files = ['basis.dat', 'kernel.dat', 'pop.dat', 'subpop.dat', 'marginal.dat']
param_paths = [os.path.join(directory, f) for f in param_files]
basis = np.loadtxt(param_paths[0])
bparam = BasisParam(tuple(basis[:2]), basis[2], basis[3])
kernel = np.loadtxt(param_paths[1])
kparam = KernelParam(*tuple(kernel))
b = np.loadtxt(param_paths[2])
B = np.loadtxt(param_paths[3])
W = np.loadtxt(param_paths[4])
W = np.r_[ np.zeros((1, W.shape[1])), W ]
return cls(b, B, W, bparam, kparam)
@property
def num_subtypes(self):
return self.k
def phi1(self, x):
return np.ones((x.size, 1))
def phi2(self, x):
return self.basis.eval(x)
def covariance(self, x1, x2=None):
return kernel(x1, x2, *self.kernel_param.values())
def trajectory_means(self, t, x):
from numpy import dot
b, B = self.b, self.B
P1 = self.phi1(t)
P2 = self.phi2(t)
m1 = dot(P1, dot(b, x)).ravel()
m2 = dot(B, P2.T)
return m1 + m2
def trajectory_logl(self, t, x, y, z):
if t.size < 1:
return 0.0
m = self.trajectory_means(t, x)[z]
S = self.covariance(t)
return multivariate_normal.logpdf(y, m, S)
def prior(self, t, x1, x2, y):
return softmax.regression_log_proba(x2, self.W)
def likelihood(self, t, x1, x2, y):
subtypes = range(self.k)
return np.array([self.trajectory_logl(t, x1, y, z) for z in subtypes])
def joint(self, t, x1, x2, y):
prior = self.prior(t, x1, x2, y)
likel = self.likelihood(t, x1, x2, y)
return prior + likel
def posterior(self, t, x1, x2, y):
if len(t) == 0:
return np.exp(self.prior(t, x1, x2, y))
else:
j = self.joint(t, x1, x2, y)
return np.exp(j - logsumexp(j))
def evidence(self, t, x1, x2, y):
j = self.joint(t, x1, x2, y)
return logsumexp(j)
def predict(self, tnew, t, x1, x2, y):
if len(t) == 0:
Y = trajectory_means(tnew, x1)
K = self.covariance(tnew)
else:
R = y - trajectory_means(t, x1)
Y = trajectory_means(tnew, x1)
K = None
for i, r in enumerate(R):
yhat, Khat = _gp_posterior(tnew, t, r, self.covariance)
Y[i] += yhat
K = Khat
return Y, K
class BasisParam:
def __init__(self, boundaries, degree, num_features):
self.boundaries = boundaries
self.degree = degree
self.num_features = num_features
def values(self):
return self.boundaries, self.degree, self.num_features
class KernelParam:
def __init__(self, a_const=1.0, a_ou=1.0, l_ou=1.0, a_noise=1.0):
self.a_const = a_const
self.a_ou = a_ou
self.l_ou = l_ou
self.a_noise = a_noise
def values(self):
return self.a_const, self.a_ou, self.l_ou, self.a_noise
class PatientData:
def __init__(self, ptid, t, y, x1, x2):
self.ptid = ptid
self.t = np.array([]) if np.all(np.isnan(t)) else t.copy()
self.y = np.array([]) if np.all(np.isnan(y)) else y.copy()
self.x1 = x1
self.x2 = x2
@classmethod
def from_tbl(cls, tbl, t, y, x1, x2):
pd = {}
pd['ptid'] = int(tbl['ptid'].values[0])
pd['t'] = tbl[t].values
pd['y'] = tbl[y].values
pd['x1'] = np.asarray(tbl.loc[:, x1].drop_duplicates()).ravel()
pd['x2'] = np.asarray(tbl.loc[:, x2].drop_duplicates()).ravel()
pd['x2'] = np.r_[1.0, pd['x2']]
return cls(**pd)
def unpack(self):
return self.t, self.x1, self.x2, self.y
def truncate(self, censor_time, after=False):
if after:
obs = self.t > censor_time
else:
obs = self.t <= censor_time
return self.__class__(self.ptid, self.t[obs], self.y[obs], self.x1, self.x2)
def kernel(x1, x2=None, a_const=1.0, a_ou=1.0, l_ou=1.0, a_noise=1.0):
symmetric = x2 is None
d = _differences(x1, x1) if symmetric else _differences(x1, x2)
K = a_const * np.ones_like(d)
K += _ou_kernel(d, a_ou, l_ou)
if symmetric:
K += a_noise * np.eye(x1.size)
return K
def _ou_kernel(d, a, l):
return a * np.exp( - np.abs(d) / l )
def _differences(x1, x2):
return as_col(x1) - as_row(x2)
def _gp_posterior(tnew, t, y, kern):
from numpy import dot
from scipy.linalg import inv, solve
K11 = kern(tnew)
K12 = kern(tnew, t)
K22 = kern(t)
m = dot(K12, solve(K22, y))
K = K11 - dot(K12, solve(K22, K12.T))
return m, K
| none | 1 | 1.941129 | 2 | |
generators/campaigns_generator.py | caroljunq/create-retail-mock-data | 0 | 6620860 | <reponame>caroljunq/create-retail-mock-data
# This is dataset is composed by default campaigns, but you can change or add/remove them in the config.
import json
import pandas as pd
import random
# reading config
with open('../config.json') as data:
config = json.load(data)
# setting up variables
out_path = config["output_path_files"]
outfile = config["campaigns"]["outfile"]
language = config["language"]
campaigns = config[language]["campaigns"]
outsize = len(campaigns)
camps = []
for i in range(outsize):
print(i + 1,"processed")
campaign_id = i + 1
discount = round(random.uniform(0.1, 0.5),2)
camps.append((campaign_id,campaigns[i],discount))
# adding "no campaign"
camps.append((0,"None",0))
# creating a data frame with the final results
df = pd.DataFrame(camps)
# columns names aka header
columns_names = ["campaign_id","title","discount"]
print("Saving file...")
# writing file
f = df.to_csv(out_path + outfile,header=columns_names,sep=",",index=False)
print("File was saved at path {}".format(out_path + outfile)) | # This is dataset is composed by default campaigns, but you can change or add/remove them in the config.
import json
import pandas as pd
import random
# reading config
with open('../config.json') as data:
config = json.load(data)
# setting up variables
out_path = config["output_path_files"]
outfile = config["campaigns"]["outfile"]
language = config["language"]
campaigns = config[language]["campaigns"]
outsize = len(campaigns)
camps = []
for i in range(outsize):
print(i + 1,"processed")
campaign_id = i + 1
discount = round(random.uniform(0.1, 0.5),2)
camps.append((campaign_id,campaigns[i],discount))
# adding "no campaign"
camps.append((0,"None",0))
# creating a data frame with the final results
df = pd.DataFrame(camps)
# columns names aka header
columns_names = ["campaign_id","title","discount"]
print("Saving file...")
# writing file
f = df.to_csv(out_path + outfile,header=columns_names,sep=",",index=False)
print("File was saved at path {}".format(out_path + outfile)) | en | 0.856008 | # This is dataset is composed by default campaigns, but you can change or add/remove them in the config. # reading config # setting up variables # adding "no campaign" # creating a data frame with the final results # columns names aka header # writing file | 2.977647 | 3 |
exercicios/Lista2/Q16.py | AlexandrePeBrito/CursoUdemyPython | 0 | 6620861 | #Usando switch, escreva um programa que leia um inteiro
#entre 1 e 12 e imprima o mês correspondente a este numero.
#Isto é, janeiro se 1, fevereiro se 2, e assim por diante.
num=int(input("Informe um numero inteiro(entre 1 e 12): "))
if(num==1):
print("Janeiro")
elif(num==2):
print("Fevereiro")
elif(num==3):
print("Março")
elif(num==4):
print("Abril")
elif(num==5):
print("Maio")
elif(num==6):
print("Junho")
elif(num==7):
print("Julho")
elif(num==8):
print("Agosto")
elif(num==9):
print("Setembro")
elif(num==10):
print("Outubro")
elif(num==11):
print("Novembro")
elif(num==12):
print("Dezembro")
| #Usando switch, escreva um programa que leia um inteiro
#entre 1 e 12 e imprima o mês correspondente a este numero.
#Isto é, janeiro se 1, fevereiro se 2, e assim por diante.
num=int(input("Informe um numero inteiro(entre 1 e 12): "))
if(num==1):
print("Janeiro")
elif(num==2):
print("Fevereiro")
elif(num==3):
print("Março")
elif(num==4):
print("Abril")
elif(num==5):
print("Maio")
elif(num==6):
print("Junho")
elif(num==7):
print("Julho")
elif(num==8):
print("Agosto")
elif(num==9):
print("Setembro")
elif(num==10):
print("Outubro")
elif(num==11):
print("Novembro")
elif(num==12):
print("Dezembro")
| pt | 0.966794 | #Usando switch, escreva um programa que leia um inteiro #entre 1 e 12 e imprima o mês correspondente a este numero. #Isto é, janeiro se 1, fevereiro se 2, e assim por diante. | 3.87355 | 4 |
project/vision_backend/tests/tasks/test_classify.py | beijbom/coralnet | 31 | 6620862 | <filename>project/vision_backend/tests/tasks/test_classify.py
from unittest import mock
from django.core.cache import cache
from django.db import IntegrityError
from django.test import override_settings
from django.test.utils import patch_logger
import numpy as np
import spacer.config as spacer_config
from accounts.utils import get_robot_user, is_robot_user
from annotations.models import Annotation
from images.models import Point
from vision_backend.models import Score
from vision_backend.tasks import (
classify_image, collect_all_jobs, submit_classifier)
from .utils import BaseTaskTest
class ClassifyImageTest(BaseTaskTest):
def test_classify_unannotated_image(self):
"""Classify an image where all points are unannotated."""
self.upload_data_and_train_classifier()
# Image without annotations
img = self.upload_image(self.user, self.source)
# Process feature extraction results + classify image
collect_all_jobs()
for point in Point.objects.filter(image__id=img.id):
try:
point.annotation
except Annotation.DoesNotExist:
self.fail("New image's points should be classified")
self.assertTrue(
is_robot_user(point.annotation.user),
"Image should have robot annotations")
# Score count per point should be label count or 5,
# whichever is less. (In this case it's label count)
self.assertEqual(
2, point.score_set.count(), "Each point should have scores")
def test_more_than_5_labels(self):
"""
When there are more than 5 labels, score count should be capped to 5.
"""
# Increase label count from 2 to 8.
labels = self.create_labels(
self.user, ['C', 'D', 'E', 'F', 'G', 'H'], "Group2")
self.create_labelset(self.user, self.source, labels | self.labels)
# Use each label, so that they all have enough training
# data to be considered during classification.
img = self.upload_image(self.user, self.source)
self.add_annotations(
self.user, img, {1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E'})
img = self.upload_image(self.user, self.source)
self.add_annotations(
self.user, img, {1: 'F', 2: 'G', 3: 'H', 4: 'B', 5: 'C'})
img = self.upload_image(self.user, self.source)
self.add_annotations(
self.user, img, {1: 'D', 2: 'E', 3: 'F', 4: 'G', 5: 'H'})
# This uploads a bunch of images using nothing but A/B, then runs
# tasks needed to train a classifier.
self.upload_data_and_train_classifier()
# Upload, extract features, classify
img = self.upload_image(self.user, self.source)
collect_all_jobs()
for point in Point.objects.filter(image__id=img.id):
# Score count per point should be label count or 5,
# whichever is less. (In this case 5)
# Or apparently in rare cases there may be less than 5, possibly
# since the scores are integers?
# But the point is that there shouldn't be 8 scores.
self.assertLessEqual(
5, point.score_set.count(),
"Each point should have <= 5 scores")
def test_classify_unconfirmed_image(self):
"""
Classify an image which has already been machine-classified
previously.
"""
def mock_classify_msg_1(
self_, runtime, scores, classes, valid_rowcol):
self_.runtime = runtime
self_.classes = classes
self_.valid_rowcol = valid_rowcol
# 1 list per point; 1 float score per label per point.
# This would classify as all A.
scores_simple = [
[0.8, 0.2], [0.8, 0.2], [0.8, 0.2], [0.8, 0.2], [0.8, 0.2],
]
self_.scores = []
for i, score in enumerate(scores):
self_.scores.append((score[0], score[1], scores_simple[i]))
def mock_classify_msg_2(
self_, runtime, scores, classes, valid_rowcol):
self_.runtime = runtime
self_.classes = classes
self_.valid_rowcol = valid_rowcol
# This would classify as 3 A's, 2 B's.
# We'll just check the count of each label later to check
# correctness of results, since assigning specific scores to
# specific points is trickier to keep track of.
scores_simple = [
[0.6, 0.4], [0.4, 0.6], [0.4, 0.6], [0.6, 0.4], [0.6, 0.4],
]
self_.scores = []
for i, score in enumerate(scores):
self_.scores.append((score[0], score[1], scores_simple[i]))
self.upload_data_and_train_classifier()
clf_1 = self.source.get_latest_robot()
# Upload
img = self.upload_image(self.user, self.source)
# Extract features + classify with a particular set of scores
with mock.patch(
'spacer.messages.ClassifyReturnMsg.__init__',
mock_classify_msg_1):
collect_all_jobs()
# Accept another classifier. Override settings so that 1) we
# don't need more images to train a new classifier, and 2) we don't
# need improvement to mark a new classifier as accepted.
with override_settings(
NEW_CLASSIFIER_TRAIN_TH=0.0001,
NEW_CLASSIFIER_IMPROVEMENT_TH=0.0001):
submit_classifier(self.source.pk)
# 1) Save classifier. 2) re-classify with a different set of
# scores so that specific points get their labels changed (and
# other points don't).
with mock.patch(
'spacer.messages.ClassifyReturnMsg.__init__',
mock_classify_msg_2):
collect_all_jobs()
clf_2 = self.source.get_latest_robot()
all_classifiers = self.source.classifier_set.all()
message = (
f"clf 1 and 2 IDs: {clf_1.pk}, {clf_2.pk}"
+ " | All classifier IDs: {}".format(
list(all_classifiers.values_list('pk', flat=True)))
+ "".join([
f" | pk {clf.pk} details: status={clf.status},"
f" accuracy={clf.accuracy}, images={clf.nbr_train_images}"
for clf in all_classifiers])
)
self.assertNotEqual(
clf_1.pk, clf_2.pk,
f"Should have a new accepted classifier. Debug info: {message}")
for point in Point.objects.filter(image=img):
self.assertTrue(
is_robot_user(point.annotation.user),
"Should still have robot annotations")
self.assertEqual(
3,
Point.objects.filter(
image=img, annotation__label__name='A').count(),
"3 points should be labeled A")
self.assertEqual(
2,
Point.objects.filter(
image=img, annotation__label__name='B').count(),
"2 points should be labeled B")
self.assertEqual(
3,
Point.objects.filter(
image=img, annotation__robot_version=clf_1).count(),
"3 points should still be under classifier 1")
self.assertEqual(
2,
Point.objects.filter(
image=img, annotation__robot_version=clf_2).count(),
"2 points should have been updated by classifier 2")
def test_classify_partially_confirmed_image(self):
"""
Classify an image where some, but not all points have confirmed
annotations.
"""
self.upload_data_and_train_classifier()
# Image without annotations
img = self.upload_image(self.user, self.source)
# Add partial annotations
self.add_annotations(self.user, img, {1: 'A'})
# Process feature extraction results + classify image
collect_all_jobs()
for point in Point.objects.filter(image__id=img.id):
if point.point_number == 1:
self.assertFalse(
is_robot_user(point.annotation.user),
"The confirmed annotation should still be confirmed")
else:
self.assertTrue(
is_robot_user(point.annotation.user),
"The other annotations should be unconfirmed")
self.assertEqual(
2, point.score_set.count(), "Each point should have scores")
def test_classify_confirmed_image(self):
"""Attempt to classify an image where all points are confirmed."""
self.upload_data_and_train_classifier()
# Image with annotations
img = self.upload_image_with_annotations('confirmed.png')
# Process feature extraction results
collect_all_jobs()
# Try to classify
classify_image(img.id)
for point in Point.objects.filter(image__id=img.id):
self.assertFalse(
is_robot_user(point.annotation.user),
"Image should still have confirmed annotations")
self.assertEqual(
2, point.score_set.count(), "Each point should have scores")
def test_classify_scores_and_labels_match(self):
"""
Check that the Scores and the labels assigned by classification are
consistent with each other.
"""
self.upload_data_and_train_classifier()
# Upload, extract features, classify
img = self.upload_image(self.user, self.source)
collect_all_jobs()
for point in Point.objects.filter(image__id=img.id):
ann = point.annotation
scores = Score.objects.filter(point=point)
posteriors = [score.score for score in scores]
self.assertEqual(
scores[int(np.argmax(posteriors))].label, ann.label,
"Max score label should match the annotation label."
" Posteriors: {}".format(posteriors))
def test_with_dupe_points(self):
"""
The image to be classified has two points with the same row/column.
"""
# Provide enough data for training
self.upload_images_for_training(
train_image_count=spacer_config.MIN_TRAINIMAGES, val_image_count=1)
# Add one image without annotations, including a duplicate point
img = self.upload_image_with_dupe_points('has_dupe.png')
# Extract features
collect_all_jobs()
# Train classifier + classify image
submit_classifier(self.source.pk)
collect_all_jobs()
self.assertEqual(
len(self.rowcols_with_dupes_included),
Annotation.objects.filter(image__id=img.id).count(),
"New image should be classified, including dupe points")
def test_legacy_features(self):
"""Classify an image which has features saved in the legacy format."""
def mock_classify_msg(
self_, runtime, scores, classes, valid_rowcol):
self_.runtime = runtime
self_.scores = [
(0, 0, [0.2, 0.8]),
(0, 0, [0.8, 0.2]),
(0, 0, [0.2, 0.8]),
(0, 0, [0.2, 0.8]),
(0, 0, [0.8, 0.2]),
]
self_.classes = classes
self_.valid_rowcol = False
self.upload_data_and_train_classifier()
# Image without annotations
img = self.upload_image(self.user, self.source)
# Process feature extraction results + classify image
with mock.patch(
'spacer.messages.ClassifyReturnMsg.__init__',
mock_classify_msg):
collect_all_jobs()
for point in Point.objects.filter(image__id=img.id):
try:
point.annotation
except Annotation.DoesNotExist:
self.fail("New image's points should be classified")
self.assertTrue(
is_robot_user(point.annotation.user),
"Image should have robot annotations")
# Score count per point should be label count or 5,
# whichever is less. (In this case it's label count)
self.assertEqual(
2, point.score_set.count(), "Each point should have scores")
# Check the labels to make sure the mock was actually applied. For
# legacy features, the scores are assumed to be in order of point pk.
actual_labels = Point.objects.filter(image__id=img.id) \
.order_by('pk').values_list('annotation__label__name', flat=True)
self.assertListEqual(
['B', 'A', 'B', 'B', 'A'], list(actual_labels),
"Applied labels match the given scores")
class AbortCasesTest(BaseTaskTest):
"""Test cases where the task would abort before reaching the end."""
def test_classify_nonexistent_image(self):
"""Try to classify a nonexistent image ID."""
# To get a nonexistent image ID, upload an image, get its ID, then
# delete the image.
img = self.upload_image(self.user, self.source)
image_id = img.pk
img.delete()
# patch_logger is an undocumented Django test utility. It lets us check
# logged messages.
# https://stackoverflow.com/a/54055056
with patch_logger('vision_backend.tasks', 'info') as log_messages:
classify_image(image_id)
log_message = "Image {} does not exist.".format(image_id)
self.assertIn(
log_message, log_messages,
"Should log the appropriate message")
def test_classify_without_features(self):
"""Try to classify an image without features extracted."""
self.upload_data_and_train_classifier()
img = self.upload_image(self.user, self.source)
classify_image(img.pk)
self.assertFalse(
Annotation.objects.filter(image=img).exists(),
"Image shouldn't have been classified")
def test_classify_without_classifier(self):
"""Try to classify an image without a classifier for the source."""
img = self.upload_image(self.user, self.source)
# Extract features
collect_all_jobs()
# Try to classify
classify_image(img.pk)
self.assertFalse(
Annotation.objects.filter(image=img).exists(),
"Image shouldn't have been classified")
def test_integrity_error_when_saving_annotations(self):
"""Get an IntegrityError when saving annotations."""
self.upload_data_and_train_classifier()
classifier = self.source.get_latest_robot()
img = self.upload_image(self.user, self.source)
def mock_update_annotation(
point, label, now_confirmed, user_or_robot_version):
# Raise an IntegrityError on the FIRST call only. We want to get an
# IntegrityError the first time and then do fine the second time.
# Due to auto-retries and CELERY_ALWAYS_EAGER, if we always raised
# the error, we'd infinite-loop.
if not cache.get('raised_integrity_error'):
cache.set('raised_integrity_error', True)
raise IntegrityError
# This is a simple saving case (for brevity) which works for this
# particular test.
new_annotation = Annotation(
point=point, image=point.image,
source=point.image.source, label=label,
user=get_robot_user(),
robot_version=user_or_robot_version)
new_annotation.save()
# Extract features + classify
with patch_logger('vision_backend.tasks', 'info') as log_messages:
with mock.patch(
'annotations.models.Annotation.objects'
'.update_point_annotation_if_applicable',
mock_update_annotation):
collect_all_jobs()
log_message = (
"Failed to classify Image {} [Source: {} [{}] with "
"classifier {}. There might have been a race condition "
"when trying to save annotations. Will try again later."
.format(
img.pk, img.source, img.source_id, classifier.pk
)
)
self.assertIn(
log_message, log_messages,
"Should log the appropriate message")
self.assertTrue(
Annotation.objects.filter(image=img).exists(),
"Image should have been classified after retry")
| <filename>project/vision_backend/tests/tasks/test_classify.py
from unittest import mock
from django.core.cache import cache
from django.db import IntegrityError
from django.test import override_settings
from django.test.utils import patch_logger
import numpy as np
import spacer.config as spacer_config
from accounts.utils import get_robot_user, is_robot_user
from annotations.models import Annotation
from images.models import Point
from vision_backend.models import Score
from vision_backend.tasks import (
classify_image, collect_all_jobs, submit_classifier)
from .utils import BaseTaskTest
class ClassifyImageTest(BaseTaskTest):
def test_classify_unannotated_image(self):
"""Classify an image where all points are unannotated."""
self.upload_data_and_train_classifier()
# Image without annotations
img = self.upload_image(self.user, self.source)
# Process feature extraction results + classify image
collect_all_jobs()
for point in Point.objects.filter(image__id=img.id):
try:
point.annotation
except Annotation.DoesNotExist:
self.fail("New image's points should be classified")
self.assertTrue(
is_robot_user(point.annotation.user),
"Image should have robot annotations")
# Score count per point should be label count or 5,
# whichever is less. (In this case it's label count)
self.assertEqual(
2, point.score_set.count(), "Each point should have scores")
def test_more_than_5_labels(self):
"""
When there are more than 5 labels, score count should be capped to 5.
"""
# Increase label count from 2 to 8.
labels = self.create_labels(
self.user, ['C', 'D', 'E', 'F', 'G', 'H'], "Group2")
self.create_labelset(self.user, self.source, labels | self.labels)
# Use each label, so that they all have enough training
# data to be considered during classification.
img = self.upload_image(self.user, self.source)
self.add_annotations(
self.user, img, {1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E'})
img = self.upload_image(self.user, self.source)
self.add_annotations(
self.user, img, {1: 'F', 2: 'G', 3: 'H', 4: 'B', 5: 'C'})
img = self.upload_image(self.user, self.source)
self.add_annotations(
self.user, img, {1: 'D', 2: 'E', 3: 'F', 4: 'G', 5: 'H'})
# This uploads a bunch of images using nothing but A/B, then runs
# tasks needed to train a classifier.
self.upload_data_and_train_classifier()
# Upload, extract features, classify
img = self.upload_image(self.user, self.source)
collect_all_jobs()
for point in Point.objects.filter(image__id=img.id):
# Score count per point should be label count or 5,
# whichever is less. (In this case 5)
# Or apparently in rare cases there may be less than 5, possibly
# since the scores are integers?
# But the point is that there shouldn't be 8 scores.
self.assertLessEqual(
5, point.score_set.count(),
"Each point should have <= 5 scores")
def test_classify_unconfirmed_image(self):
"""
Classify an image which has already been machine-classified
previously.
"""
def mock_classify_msg_1(
self_, runtime, scores, classes, valid_rowcol):
self_.runtime = runtime
self_.classes = classes
self_.valid_rowcol = valid_rowcol
# 1 list per point; 1 float score per label per point.
# This would classify as all A.
scores_simple = [
[0.8, 0.2], [0.8, 0.2], [0.8, 0.2], [0.8, 0.2], [0.8, 0.2],
]
self_.scores = []
for i, score in enumerate(scores):
self_.scores.append((score[0], score[1], scores_simple[i]))
def mock_classify_msg_2(
self_, runtime, scores, classes, valid_rowcol):
self_.runtime = runtime
self_.classes = classes
self_.valid_rowcol = valid_rowcol
# This would classify as 3 A's, 2 B's.
# We'll just check the count of each label later to check
# correctness of results, since assigning specific scores to
# specific points is trickier to keep track of.
scores_simple = [
[0.6, 0.4], [0.4, 0.6], [0.4, 0.6], [0.6, 0.4], [0.6, 0.4],
]
self_.scores = []
for i, score in enumerate(scores):
self_.scores.append((score[0], score[1], scores_simple[i]))
self.upload_data_and_train_classifier()
clf_1 = self.source.get_latest_robot()
# Upload
img = self.upload_image(self.user, self.source)
# Extract features + classify with a particular set of scores
with mock.patch(
'spacer.messages.ClassifyReturnMsg.__init__',
mock_classify_msg_1):
collect_all_jobs()
# Accept another classifier. Override settings so that 1) we
# don't need more images to train a new classifier, and 2) we don't
# need improvement to mark a new classifier as accepted.
with override_settings(
NEW_CLASSIFIER_TRAIN_TH=0.0001,
NEW_CLASSIFIER_IMPROVEMENT_TH=0.0001):
submit_classifier(self.source.pk)
# 1) Save classifier. 2) re-classify with a different set of
# scores so that specific points get their labels changed (and
# other points don't).
with mock.patch(
'spacer.messages.ClassifyReturnMsg.__init__',
mock_classify_msg_2):
collect_all_jobs()
clf_2 = self.source.get_latest_robot()
all_classifiers = self.source.classifier_set.all()
message = (
f"clf 1 and 2 IDs: {clf_1.pk}, {clf_2.pk}"
+ " | All classifier IDs: {}".format(
list(all_classifiers.values_list('pk', flat=True)))
+ "".join([
f" | pk {clf.pk} details: status={clf.status},"
f" accuracy={clf.accuracy}, images={clf.nbr_train_images}"
for clf in all_classifiers])
)
self.assertNotEqual(
clf_1.pk, clf_2.pk,
f"Should have a new accepted classifier. Debug info: {message}")
for point in Point.objects.filter(image=img):
self.assertTrue(
is_robot_user(point.annotation.user),
"Should still have robot annotations")
self.assertEqual(
3,
Point.objects.filter(
image=img, annotation__label__name='A').count(),
"3 points should be labeled A")
self.assertEqual(
2,
Point.objects.filter(
image=img, annotation__label__name='B').count(),
"2 points should be labeled B")
self.assertEqual(
3,
Point.objects.filter(
image=img, annotation__robot_version=clf_1).count(),
"3 points should still be under classifier 1")
self.assertEqual(
2,
Point.objects.filter(
image=img, annotation__robot_version=clf_2).count(),
"2 points should have been updated by classifier 2")
def test_classify_partially_confirmed_image(self):
"""
Classify an image where some, but not all points have confirmed
annotations.
"""
self.upload_data_and_train_classifier()
# Image without annotations
img = self.upload_image(self.user, self.source)
# Add partial annotations
self.add_annotations(self.user, img, {1: 'A'})
# Process feature extraction results + classify image
collect_all_jobs()
for point in Point.objects.filter(image__id=img.id):
if point.point_number == 1:
self.assertFalse(
is_robot_user(point.annotation.user),
"The confirmed annotation should still be confirmed")
else:
self.assertTrue(
is_robot_user(point.annotation.user),
"The other annotations should be unconfirmed")
self.assertEqual(
2, point.score_set.count(), "Each point should have scores")
def test_classify_confirmed_image(self):
"""Attempt to classify an image where all points are confirmed."""
self.upload_data_and_train_classifier()
# Image with annotations
img = self.upload_image_with_annotations('confirmed.png')
# Process feature extraction results
collect_all_jobs()
# Try to classify
classify_image(img.id)
for point in Point.objects.filter(image__id=img.id):
self.assertFalse(
is_robot_user(point.annotation.user),
"Image should still have confirmed annotations")
self.assertEqual(
2, point.score_set.count(), "Each point should have scores")
def test_classify_scores_and_labels_match(self):
"""
Check that the Scores and the labels assigned by classification are
consistent with each other.
"""
self.upload_data_and_train_classifier()
# Upload, extract features, classify
img = self.upload_image(self.user, self.source)
collect_all_jobs()
for point in Point.objects.filter(image__id=img.id):
ann = point.annotation
scores = Score.objects.filter(point=point)
posteriors = [score.score for score in scores]
self.assertEqual(
scores[int(np.argmax(posteriors))].label, ann.label,
"Max score label should match the annotation label."
" Posteriors: {}".format(posteriors))
def test_with_dupe_points(self):
"""
The image to be classified has two points with the same row/column.
"""
# Provide enough data for training
self.upload_images_for_training(
train_image_count=spacer_config.MIN_TRAINIMAGES, val_image_count=1)
# Add one image without annotations, including a duplicate point
img = self.upload_image_with_dupe_points('has_dupe.png')
# Extract features
collect_all_jobs()
# Train classifier + classify image
submit_classifier(self.source.pk)
collect_all_jobs()
self.assertEqual(
len(self.rowcols_with_dupes_included),
Annotation.objects.filter(image__id=img.id).count(),
"New image should be classified, including dupe points")
def test_legacy_features(self):
"""Classify an image which has features saved in the legacy format."""
def mock_classify_msg(
self_, runtime, scores, classes, valid_rowcol):
self_.runtime = runtime
self_.scores = [
(0, 0, [0.2, 0.8]),
(0, 0, [0.8, 0.2]),
(0, 0, [0.2, 0.8]),
(0, 0, [0.2, 0.8]),
(0, 0, [0.8, 0.2]),
]
self_.classes = classes
self_.valid_rowcol = False
self.upload_data_and_train_classifier()
# Image without annotations
img = self.upload_image(self.user, self.source)
# Process feature extraction results + classify image
with mock.patch(
'spacer.messages.ClassifyReturnMsg.__init__',
mock_classify_msg):
collect_all_jobs()
for point in Point.objects.filter(image__id=img.id):
try:
point.annotation
except Annotation.DoesNotExist:
self.fail("New image's points should be classified")
self.assertTrue(
is_robot_user(point.annotation.user),
"Image should have robot annotations")
# Score count per point should be label count or 5,
# whichever is less. (In this case it's label count)
self.assertEqual(
2, point.score_set.count(), "Each point should have scores")
# Check the labels to make sure the mock was actually applied. For
# legacy features, the scores are assumed to be in order of point pk.
actual_labels = Point.objects.filter(image__id=img.id) \
.order_by('pk').values_list('annotation__label__name', flat=True)
self.assertListEqual(
['B', 'A', 'B', 'B', 'A'], list(actual_labels),
"Applied labels match the given scores")
class AbortCasesTest(BaseTaskTest):
"""Test cases where the task would abort before reaching the end."""
def test_classify_nonexistent_image(self):
"""Try to classify a nonexistent image ID."""
# To get a nonexistent image ID, upload an image, get its ID, then
# delete the image.
img = self.upload_image(self.user, self.source)
image_id = img.pk
img.delete()
# patch_logger is an undocumented Django test utility. It lets us check
# logged messages.
# https://stackoverflow.com/a/54055056
with patch_logger('vision_backend.tasks', 'info') as log_messages:
classify_image(image_id)
log_message = "Image {} does not exist.".format(image_id)
self.assertIn(
log_message, log_messages,
"Should log the appropriate message")
def test_classify_without_features(self):
"""Try to classify an image without features extracted."""
self.upload_data_and_train_classifier()
img = self.upload_image(self.user, self.source)
classify_image(img.pk)
self.assertFalse(
Annotation.objects.filter(image=img).exists(),
"Image shouldn't have been classified")
def test_classify_without_classifier(self):
"""Try to classify an image without a classifier for the source."""
img = self.upload_image(self.user, self.source)
# Extract features
collect_all_jobs()
# Try to classify
classify_image(img.pk)
self.assertFalse(
Annotation.objects.filter(image=img).exists(),
"Image shouldn't have been classified")
def test_integrity_error_when_saving_annotations(self):
"""Get an IntegrityError when saving annotations."""
self.upload_data_and_train_classifier()
classifier = self.source.get_latest_robot()
img = self.upload_image(self.user, self.source)
def mock_update_annotation(
point, label, now_confirmed, user_or_robot_version):
# Raise an IntegrityError on the FIRST call only. We want to get an
# IntegrityError the first time and then do fine the second time.
# Due to auto-retries and CELERY_ALWAYS_EAGER, if we always raised
# the error, we'd infinite-loop.
if not cache.get('raised_integrity_error'):
cache.set('raised_integrity_error', True)
raise IntegrityError
# This is a simple saving case (for brevity) which works for this
# particular test.
new_annotation = Annotation(
point=point, image=point.image,
source=point.image.source, label=label,
user=get_robot_user(),
robot_version=user_or_robot_version)
new_annotation.save()
# Extract features + classify
with patch_logger('vision_backend.tasks', 'info') as log_messages:
with mock.patch(
'annotations.models.Annotation.objects'
'.update_point_annotation_if_applicable',
mock_update_annotation):
collect_all_jobs()
log_message = (
"Failed to classify Image {} [Source: {} [{}] with "
"classifier {}. There might have been a race condition "
"when trying to save annotations. Will try again later."
.format(
img.pk, img.source, img.source_id, classifier.pk
)
)
self.assertIn(
log_message, log_messages,
"Should log the appropriate message")
self.assertTrue(
Annotation.objects.filter(image=img).exists(),
"Image should have been classified after retry")
| en | 0.910465 | Classify an image where all points are unannotated. # Image without annotations # Process feature extraction results + classify image # Score count per point should be label count or 5, # whichever is less. (In this case it's label count) When there are more than 5 labels, score count should be capped to 5. # Increase label count from 2 to 8. # Use each label, so that they all have enough training # data to be considered during classification. # This uploads a bunch of images using nothing but A/B, then runs # tasks needed to train a classifier. # Upload, extract features, classify # Score count per point should be label count or 5, # whichever is less. (In this case 5) # Or apparently in rare cases there may be less than 5, possibly # since the scores are integers? # But the point is that there shouldn't be 8 scores. Classify an image which has already been machine-classified previously. # 1 list per point; 1 float score per label per point. # This would classify as all A. # This would classify as 3 A's, 2 B's. # We'll just check the count of each label later to check # correctness of results, since assigning specific scores to # specific points is trickier to keep track of. # Upload # Extract features + classify with a particular set of scores # Accept another classifier. Override settings so that 1) we # don't need more images to train a new classifier, and 2) we don't # need improvement to mark a new classifier as accepted. # 1) Save classifier. 2) re-classify with a different set of # scores so that specific points get their labels changed (and # other points don't). Classify an image where some, but not all points have confirmed annotations. # Image without annotations # Add partial annotations # Process feature extraction results + classify image Attempt to classify an image where all points are confirmed. # Image with annotations # Process feature extraction results # Try to classify Check that the Scores and the labels assigned by classification are consistent with each other. # Upload, extract features, classify The image to be classified has two points with the same row/column. # Provide enough data for training # Add one image without annotations, including a duplicate point # Extract features # Train classifier + classify image Classify an image which has features saved in the legacy format. # Image without annotations # Process feature extraction results + classify image # Score count per point should be label count or 5, # whichever is less. (In this case it's label count) # Check the labels to make sure the mock was actually applied. For # legacy features, the scores are assumed to be in order of point pk. Test cases where the task would abort before reaching the end. Try to classify a nonexistent image ID. # To get a nonexistent image ID, upload an image, get its ID, then # delete the image. # patch_logger is an undocumented Django test utility. It lets us check # logged messages. # https://stackoverflow.com/a/54055056 Try to classify an image without features extracted. Try to classify an image without a classifier for the source. # Extract features # Try to classify Get an IntegrityError when saving annotations. # Raise an IntegrityError on the FIRST call only. We want to get an # IntegrityError the first time and then do fine the second time. # Due to auto-retries and CELERY_ALWAYS_EAGER, if we always raised # the error, we'd infinite-loop. # This is a simple saving case (for brevity) which works for this # particular test. # Extract features + classify | 2.487229 | 2 |
train_las.py | HappyBall/asr_guided_tacotron | 5 | 6620863 | '''
modified from:
https://www.github.com/kyubyong/tacotron
'''
import os
import sys
import numpy as np
from hyperparams import Hyperparams as hp
import tensorflow as tf
from tqdm import tqdm
from utils import *
from graph import Graph
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("keep_train", "False", "keep training from existed model or not")
if __name__ == '__main__':
keep_train = FLAGS.keep_train
g = Graph(mode='train_las'); print("Training Graph loaded")
if not os.path.isdir(hp.las_logdir):
os.makedirs(hp.las_logdir)
logfile = open(hp.las_logfile, "a")
saver = tf.train.Saver(max_to_keep=10)
saver_las = tf.train.Saver(var_list=g.las_variable)
init = tf.global_variables_initializer()
#sv = tf.train.Supervisor(las_logdir=hp.las_logdir, save_summaries_secs=60, save_model_secs=0)
with tf.Session() as sess:
#while 1:
writer = tf.summary.FileWriter(hp.las_logdir, graph = sess.graph)
sess.run(init)
print('finish init model')
if keep_train == "True":
saver_las.restore(sess, tf.train.latest_checkpoint(hp.las_logdir))
print("Continue training from existed latest model...")
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
#lr = hp.las_lr
previous_total_loss = np.inf
for epoch in range(1, hp.las_num_epochs + 1):
total_loss = 0.0
for _ in tqdm(range(g.num_batch), total=g.num_batch, ncols=70, leave=False, unit='b'):
#_, gs = sess.run([g.train_op, g.global_step])
#_, gs, l = sess.run([g.train_op, g.global_step, g.loss], feed_dict={g.lr:lr})
_, gs, l = sess.run([g.train_op, g.global_step, g.loss])
total_loss += l
# Write checkpoint files
if gs % 1000 == 0:
#sv.saver.save(sess, hp.las_logdir + '/model_gs_{}k'.format(gs//1000))
# plot the first alignment for logging
al = sess.run(g.alignments_las)
plot_alignment(al[0], gs, hp.las_logdir, name='las')
#if total_loss > previous_total_loss:
# lr = lr*hp.las_lr_decay
# print('decay learning rate by:', hp.las_lr_decay, 'now lr:', lr)
#previous_total_loss = total_loss
print("Epoch " + str(epoch) + " average loss: " + str(total_loss/float(g.num_batch)) + "\n")
sys.stdout.flush()
logfile.write("Epoch " + str(epoch) + " average loss: " + str(total_loss/float(g.num_batch)) + "\n")
# Write checkpoint files
if epoch % 10 == 0:
#sv.saver.save(sess, hp.las_logdir + '/model_gs_{}k'.format(gs//1000))
saver.save(sess, hp.las_logdir + '/model_epoch_{}.ckpt'.format(epoch))
#result = sess.run(g.merged, feed_dict={g.lr:lr})
result = sess.run(g.merged)
writer.add_summary(result, epoch)
coord.request_stop()
coord.join(threads)
print("Done")
# add dropout
# use diff attention
| '''
modified from:
https://www.github.com/kyubyong/tacotron
'''
import os
import sys
import numpy as np
from hyperparams import Hyperparams as hp
import tensorflow as tf
from tqdm import tqdm
from utils import *
from graph import Graph
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("keep_train", "False", "keep training from existed model or not")
if __name__ == '__main__':
keep_train = FLAGS.keep_train
g = Graph(mode='train_las'); print("Training Graph loaded")
if not os.path.isdir(hp.las_logdir):
os.makedirs(hp.las_logdir)
logfile = open(hp.las_logfile, "a")
saver = tf.train.Saver(max_to_keep=10)
saver_las = tf.train.Saver(var_list=g.las_variable)
init = tf.global_variables_initializer()
#sv = tf.train.Supervisor(las_logdir=hp.las_logdir, save_summaries_secs=60, save_model_secs=0)
with tf.Session() as sess:
#while 1:
writer = tf.summary.FileWriter(hp.las_logdir, graph = sess.graph)
sess.run(init)
print('finish init model')
if keep_train == "True":
saver_las.restore(sess, tf.train.latest_checkpoint(hp.las_logdir))
print("Continue training from existed latest model...")
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
#lr = hp.las_lr
previous_total_loss = np.inf
for epoch in range(1, hp.las_num_epochs + 1):
total_loss = 0.0
for _ in tqdm(range(g.num_batch), total=g.num_batch, ncols=70, leave=False, unit='b'):
#_, gs = sess.run([g.train_op, g.global_step])
#_, gs, l = sess.run([g.train_op, g.global_step, g.loss], feed_dict={g.lr:lr})
_, gs, l = sess.run([g.train_op, g.global_step, g.loss])
total_loss += l
# Write checkpoint files
if gs % 1000 == 0:
#sv.saver.save(sess, hp.las_logdir + '/model_gs_{}k'.format(gs//1000))
# plot the first alignment for logging
al = sess.run(g.alignments_las)
plot_alignment(al[0], gs, hp.las_logdir, name='las')
#if total_loss > previous_total_loss:
# lr = lr*hp.las_lr_decay
# print('decay learning rate by:', hp.las_lr_decay, 'now lr:', lr)
#previous_total_loss = total_loss
print("Epoch " + str(epoch) + " average loss: " + str(total_loss/float(g.num_batch)) + "\n")
sys.stdout.flush()
logfile.write("Epoch " + str(epoch) + " average loss: " + str(total_loss/float(g.num_batch)) + "\n")
# Write checkpoint files
if epoch % 10 == 0:
#sv.saver.save(sess, hp.las_logdir + '/model_gs_{}k'.format(gs//1000))
saver.save(sess, hp.las_logdir + '/model_epoch_{}.ckpt'.format(epoch))
#result = sess.run(g.merged, feed_dict={g.lr:lr})
result = sess.run(g.merged)
writer.add_summary(result, epoch)
coord.request_stop()
coord.join(threads)
print("Done")
# add dropout
# use diff attention
| en | 0.442212 | modified from: https://www.github.com/kyubyong/tacotron #sv = tf.train.Supervisor(las_logdir=hp.las_logdir, save_summaries_secs=60, save_model_secs=0) #while 1: #lr = hp.las_lr #_, gs = sess.run([g.train_op, g.global_step]) #_, gs, l = sess.run([g.train_op, g.global_step, g.loss], feed_dict={g.lr:lr}) # Write checkpoint files #sv.saver.save(sess, hp.las_logdir + '/model_gs_{}k'.format(gs//1000)) # plot the first alignment for logging #if total_loss > previous_total_loss: # lr = lr*hp.las_lr_decay # print('decay learning rate by:', hp.las_lr_decay, 'now lr:', lr) #previous_total_loss = total_loss # Write checkpoint files #sv.saver.save(sess, hp.las_logdir + '/model_gs_{}k'.format(gs//1000)) #result = sess.run(g.merged, feed_dict={g.lr:lr}) # add dropout # use diff attention | 2.033066 | 2 |
pipe/tools/houtools/rollback/rollback.py | htinney/byupipe | 5 | 6620864 | <filename>pipe/tools/houtools/rollback/rollback.py
import hou
import os
# from PySide2 import QtGui, QtWidgets, QtCore
import pipe.gui.quick_dialogs as qd
import pipe.gui.select_from_list as sfl
from pipe.tools.houtools.utils.utils import *
from pipe.tools.houtools.importer.importer import Importer
from pipe.am.project import Project
from pipe.am.body import Body
from pipe.am.element import Element
from pipe.am.environment import Department
from pipe.am.environment import Environment
class Rollback:
def __init__(self):
self.node = None
self.publishes = None
self.sanitized_publish_list = None
self.item_gui = None
def rollback_element(self, node, department, name):
self.node = node
self.department = department
project = Project()
body = project.get_body(name)
element = body.get_element(department)
self.publishes = element.list_publishes()
print("publishes: ", self.publishes)
if not self.publishes:
qd.error("There have been no publishes for this department. Rollback failed.")
return
# make the list a list of strings, not tuples
self.sanitized_publish_list = []
for publish in self.publishes:
path = publish[3]
file_ext = path.split('.')[-1]
if not file_ext == "hda" and not file_ext =="hdanc":
continue
label = publish[0] + " " + publish[1] + " " + publish[2]
self.sanitized_publish_list.append(label)
self.item_gui = sfl.SelectFromList(l=self.sanitized_publish_list, parent=houdini_main_window(), title="Select publish to clone")
self.item_gui.submitted.connect(self.publish_selection_results)
def get_definition_by_department(self, source_path):
definition = None
print("dept: ", self.department)
if self.department == Department.MATERIAL:
definition = hou.hdaDefinition(hou.sopNodeTypeCategory(), "dcc_material", source_path)
elif self.department == Department.MODIFY:
definition = hou.hdaDefinition(hou.sopNodeTypeCategory(), "dcc_modify", source_path)
elif self.department == Department.HAIR:
definition = hou.hdaDefinition(hou.objNodeTypeCategory(), "dcc_hair", source_path)
elif self.department == Department.CLOTH:
definition = hou.hdaDefinition(hou.objNodeTypeCategory(), "dcc_cloth", source_path)
return definition
def publish_selection_results(self, value):
selected_publish = None
for item in self.sanitized_publish_list:
if value[0] == item:
selected_publish = item
selected_file = None
for publish in self.publishes:
label = publish[0] + " " + publish[1] + " " + publish[2]
if label == selected_publish:
selected_file = publish[3]
print("selected file: ", selected_file)
definitions = hou.hda.definitionsInFile(selected_file)
definition = definitions[0]
parent = self.node.parent()
print("node: ", self.node, str(self.node))
type = self.node.type().name()
print("type: ", str(type))
self.node.destroy()
# source_path = self.node.type().sourcePath()
# print("source path: ", source_path)
hou.hda.installFile(selected_file)
hou.hda.reloadFile(selected_file)
# definition = self.get_definition_by_department(selected_file)
print("def: ", definition)
definition.setPreferred(True)
new_node = parent.createNode(str(type), node_name=self.department)
print("new node: ", new_node)
new_node.allowEditingOfContents()
geo = parent.node("geo")
if geo is None:
qd.error("There should be a geo network. Something went wrong, so you'll need to place the node manually.")
parent.layoutChildren()
return
if self.department == Department.HAIR or self.department == Department.CLOTH:
new_node.setInput(0, geo)
elif self.department == Department.MODIFY:
# If there is a material node, put the modify node in between material and geo.
material = parent.node("material")
if material is not None:
new_node.setInput(0, geo)
material.setInput(0, new_node)
else: # Else, stick it between geo and shot_modeling.
new_node.setInput(0, geo)
shot_modeling.setInput(0, new_node)
elif self.department == Department.MATERIAL:
# If there is a modify node, put the material node in between modify and shot_modeling.
modify = parent.node("modify")
if modify is not None:
new_node.setInput(0, modify)
shot_modeling.setInput(0, new_node)
else: # Else, stick it between geo and shot_modeling.
new_node.setInput(0, geo)
shot_modeling.setInput(0, new_node)
parent.layoutChildren()
def rollback_asset(self, node=None):
pass
def rollback_tool(self, node=None):
pass
def rollback_shot(self):
pass
| <filename>pipe/tools/houtools/rollback/rollback.py
import hou
import os
# from PySide2 import QtGui, QtWidgets, QtCore
import pipe.gui.quick_dialogs as qd
import pipe.gui.select_from_list as sfl
from pipe.tools.houtools.utils.utils import *
from pipe.tools.houtools.importer.importer import Importer
from pipe.am.project import Project
from pipe.am.body import Body
from pipe.am.element import Element
from pipe.am.environment import Department
from pipe.am.environment import Environment
class Rollback:
def __init__(self):
self.node = None
self.publishes = None
self.sanitized_publish_list = None
self.item_gui = None
def rollback_element(self, node, department, name):
self.node = node
self.department = department
project = Project()
body = project.get_body(name)
element = body.get_element(department)
self.publishes = element.list_publishes()
print("publishes: ", self.publishes)
if not self.publishes:
qd.error("There have been no publishes for this department. Rollback failed.")
return
# make the list a list of strings, not tuples
self.sanitized_publish_list = []
for publish in self.publishes:
path = publish[3]
file_ext = path.split('.')[-1]
if not file_ext == "hda" and not file_ext =="hdanc":
continue
label = publish[0] + " " + publish[1] + " " + publish[2]
self.sanitized_publish_list.append(label)
self.item_gui = sfl.SelectFromList(l=self.sanitized_publish_list, parent=houdini_main_window(), title="Select publish to clone")
self.item_gui.submitted.connect(self.publish_selection_results)
def get_definition_by_department(self, source_path):
definition = None
print("dept: ", self.department)
if self.department == Department.MATERIAL:
definition = hou.hdaDefinition(hou.sopNodeTypeCategory(), "dcc_material", source_path)
elif self.department == Department.MODIFY:
definition = hou.hdaDefinition(hou.sopNodeTypeCategory(), "dcc_modify", source_path)
elif self.department == Department.HAIR:
definition = hou.hdaDefinition(hou.objNodeTypeCategory(), "dcc_hair", source_path)
elif self.department == Department.CLOTH:
definition = hou.hdaDefinition(hou.objNodeTypeCategory(), "dcc_cloth", source_path)
return definition
def publish_selection_results(self, value):
selected_publish = None
for item in self.sanitized_publish_list:
if value[0] == item:
selected_publish = item
selected_file = None
for publish in self.publishes:
label = publish[0] + " " + publish[1] + " " + publish[2]
if label == selected_publish:
selected_file = publish[3]
print("selected file: ", selected_file)
definitions = hou.hda.definitionsInFile(selected_file)
definition = definitions[0]
parent = self.node.parent()
print("node: ", self.node, str(self.node))
type = self.node.type().name()
print("type: ", str(type))
self.node.destroy()
# source_path = self.node.type().sourcePath()
# print("source path: ", source_path)
hou.hda.installFile(selected_file)
hou.hda.reloadFile(selected_file)
# definition = self.get_definition_by_department(selected_file)
print("def: ", definition)
definition.setPreferred(True)
new_node = parent.createNode(str(type), node_name=self.department)
print("new node: ", new_node)
new_node.allowEditingOfContents()
geo = parent.node("geo")
if geo is None:
qd.error("There should be a geo network. Something went wrong, so you'll need to place the node manually.")
parent.layoutChildren()
return
if self.department == Department.HAIR or self.department == Department.CLOTH:
new_node.setInput(0, geo)
elif self.department == Department.MODIFY:
# If there is a material node, put the modify node in between material and geo.
material = parent.node("material")
if material is not None:
new_node.setInput(0, geo)
material.setInput(0, new_node)
else: # Else, stick it between geo and shot_modeling.
new_node.setInput(0, geo)
shot_modeling.setInput(0, new_node)
elif self.department == Department.MATERIAL:
# If there is a modify node, put the material node in between modify and shot_modeling.
modify = parent.node("modify")
if modify is not None:
new_node.setInput(0, modify)
shot_modeling.setInput(0, new_node)
else: # Else, stick it between geo and shot_modeling.
new_node.setInput(0, geo)
shot_modeling.setInput(0, new_node)
parent.layoutChildren()
def rollback_asset(self, node=None):
pass
def rollback_tool(self, node=None):
pass
def rollback_shot(self):
pass
| en | 0.7796 | # from PySide2 import QtGui, QtWidgets, QtCore # make the list a list of strings, not tuples # source_path = self.node.type().sourcePath() # print("source path: ", source_path) # definition = self.get_definition_by_department(selected_file) # If there is a material node, put the modify node in between material and geo. # Else, stick it between geo and shot_modeling. # If there is a modify node, put the material node in between modify and shot_modeling. # Else, stick it between geo and shot_modeling. | 2.436257 | 2 |
afm/scripts/modeling/output/__init__.py | nerdneilsfield/2D-3D-pose-tracking | 288 | 6620865 | <reponame>nerdneilsfield/2D-3D-pose-tracking
from .output import build_output_method | from .output import build_output_method | none | 1 | 1.097726 | 1 | |
musicbotv2/plugins/play.py | dabolink/MusicBot | 0 | 6620866 | <gh_stars>0
import asyncio
import logging
import re
import time
import traceback
from musicbot import _func_, _get_variable, exceptions, factory
from musicbot.bot import MusicBot
from musicbot.constructs import Response
from musicbot.opus_loader import load_opus_lib
from musicbot.utils import fixg, ftimedelta
load_opus_lib()
log = logging.getLogger(__name__)
class PlayCmd():
async def Run(self, bot: MusicBot, message: discord.Message, player, channel, author, permissions, leftover_args, song_url):
"""
Usage:
{command_prefix}play song_link
{command_prefix}play text to search for
{command_prefix}play spotify_uri
Adds the song to the playlist. If a link is not provided, the first
result from a youtube search is added to the queue.
If enabled in the config, the bot will also support Spotify URIs, however
it will use the metadata (e.g song name and artist) to find a YouTube
equivalent of the song. Streaming from Spotify is not possible.
"""
song_url = song_url.strip('<>')
await bot.send_typing(channel)
if leftover_args:
song_url = ' '.join([song_url, *leftover_args])
leftover_args = None # prevent some crazy shit happening down the line
# Make sure forward slashes work properly in search queries
linksRegex = '((http(s)*:[/][/]|www.)([a-z]|[A-Z]|[0-9]|[/.]|[~])*)'
pattern = re.compile(linksRegex)
matchUrl = pattern.match(song_url)
song_url = song_url.replace(
'/', '%2F') if matchUrl is None else song_url
# Rewrite YouTube playlist URLs if the wrong URL type is given
playlistRegex = r'watch\?v=.+&(list=[^&]+)'
matches = re.search(playlistRegex, song_url)
groups = matches.groups() if matches is not None else []
song_url = "https://www.youtube.com/playlist?" + \
groups[0] if len(groups) > 0 else song_url
if bot.config._spotify:
if 'open.spotify.com' in song_url:
song_url = 'spotify:' + \
re.sub('(http[s]?:\/\/)?(open.spotify.com)\/',
'', song_url).replace('/', ':')
# remove session id (and other query stuff)
song_url = re.sub('\?.*', '', song_url)
if song_url.startswith('spotify:'):
parts = song_url.split(":")
try:
if 'track' in parts:
res = await bot.spotify.get_track(parts[-1])
song_url = res['artists'][0]['name'] + \
' ' + res['name']
elif 'album' in parts:
res = await bot.spotify.get_album(parts[-1])
await bot._do_playlist_checks(permissions, player, author, res['tracks']['items'])
procmesg = await bot.safe_send_message(channel, bot.str.get('cmd-play-spotify-album-process', 'Processing album `{0}` (`{1}`)').format(res['name'], song_url))
for i in res['tracks']['items']:
song_url = i['name'] + ' ' + \
i['artists'][0]['name']
log.debug('Processing {0}'.format(song_url))
await bot.cmd_play(message, player, channel, author, permissions, leftover_args, song_url)
await bot.safe_delete_message(procmesg)
return Response(bot.str.get('cmd-play-spotify-album-queued', "Enqueued `{0}` with **{1}** songs.").format(res['name'], len(res['tracks']['items'])))
elif 'playlist' in parts:
res = []
r = await bot.spotify.get_playlist_tracks(parts[-1])
while True:
res.extend(r['items'])
if r['next'] is not None:
r = await bot.spotify.make_spotify_req(r['next'])
continue
else:
break
await bot._do_playlist_checks(permissions, player, author, res)
procmesg = await bot.safe_send_message(channel, bot.str.get('cmd-play-spotify-playlist-process', 'Processing playlist `{0}` (`{1}`)').format(parts[-1], song_url))
for i in res:
song_url = i['track']['name'] + ' ' + \
i['track']['artists'][0]['name']
log.debug('Processing {0}'.format(song_url))
await bot.cmd_play(message, player, channel, author, permissions, leftover_args, song_url)
await bot.safe_delete_message(procmesg)
return Response(bot.str.get('cmd-play-spotify-playlist-queued', "Enqueued `{0}` with **{1}** songs.").format(parts[-1], len(res)))
else:
raise exceptions.CommandError(bot.str.get(
'cmd-play-spotify-unsupported', 'That is not a supported Spotify URI.'), expire_in=30)
except exceptions.SpotifyError:
raise exceptions.CommandError(bot.str.get(
'cmd-play-spotify-invalid', 'You either provided an invalid URI, or there was a problem.'))
# This lock prevent spamming play command to add entries that exceeds time limit/ maximum song limit
async with bot.aiolocks[_func_() + ':' + str(author.id)]:
if permissions.max_songs and player.playlist.count_for_user(author) >= permissions.max_songs:
raise exceptions.PermissionsError(
bot.str.get('cmd-play-limit', "You have reached your enqueued song limit ({0})").format(permissions.max_songs), expire_in=30
)
if player.karaoke_mode and not permissions.bypass_karaoke_mode:
raise exceptions.PermissionsError(
bot.str.get('karaoke-enabled', "Karaoke mode is enabled, please try again when its disabled!"), expire_in=30
)
# Try to determine entry type, if _type is playlist then there should be entries
while True:
try:
info = await bot.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
# If there is an exception arise when processing we go on and let extract_info down the line report it
# because info might be a playlist and thing that's broke it might be individual entry
try:
info_process = await bot.downloader.extract_info(player.playlist.loop, song_url, download=False)
except:
info_process = None
log.debug(info)
if info_process and info and info_process.get('_type', None) == 'playlist' and 'entries' not in info and not info.get('url', '').startswith('ytsearch'):
use_url = info_process.get(
'webpage_url', None) or info_process.get('url', None)
if use_url == song_url:
log.warning(
"Determined incorrect entry type, but suggested url is the same. Help.")
break # If we break here it will break things down the line and give "This is a playlist" exception as a result
log.debug(
"Assumed url \"%s\" was a single entry, was actually a playlist" % song_url)
log.debug("Using \"%s\" instead" % use_url)
song_url = use_url
else:
break
except Exception as e:
if 'unknown url type' in str(e):
# it's probably not actually an extractor
song_url = song_url.replace(':', '')
info = await bot.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
else:
raise exceptions.CommandError(e, expire_in=30)
if not info:
raise exceptions.CommandError(
bot.str.get(
'cmd-play-noinfo', "That video cannot be played. Try using the {0}stream command.").format(bot.config.command_prefix),
expire_in=30
)
if info.get('extractor', '') not in permissions.extractors and permissions.extractors:
raise exceptions.PermissionsError(
bot.str.get('cmd-play-badextractor', "You do not have permission to play media from this service."), expire_in=30
)
# abstract the search handling away from the user
# our ytdl options allow us to use search strings as input urls
if info.get('url', '').startswith('ytsearch'):
# print("[Command:play] Searching for \"%s\"" % song_url)
info = await bot.downloader.extract_info(
player.playlist.loop,
song_url,
download=False,
process=True, # ASYNC LAMBDAS WHEN
on_error=lambda e: asyncio.ensure_future(
bot.safe_send_message(channel, "```\n%s\n```" % e, expire_in=120), loop=bot.loop),
retry_on_error=True
)
if not info:
raise exceptions.CommandError(
bot.str.get('cmd-play-nodata', "Error extracting info from search string, youtubedl returned no data. "
"You may need to restart the bot if this continues to happen."), expire_in=30
)
if not all(info.get('entries', [])):
# empty list, no data
log.debug("Got empty list, no data")
return
# TODO: handle 'webpage_url' being 'ytsearch:...' or extractor type
song_url = info['entries'][0]['webpage_url']
info = await bot.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
# Now I could just do: return await bot.cmd_play(player, channel, author, song_url)
# But this is probably fine
# If it's playlist
if 'entries' in info:
await bot._do_playlist_checks(permissions, player, author, info['entries'])
num_songs = sum(1 for _ in info['entries'])
if info['extractor'].lower() in ['youtube:playlist', 'soundcloud:set', 'bandcamp:album']:
try:
return await bot._cmd_play_playlist_async(player, channel, author, permissions, song_url, info['extractor'])
except exceptions.CommandError:
raise
except Exception as e:
log.error("Error queuing playlist", exc_info=True)
raise exceptions.CommandError(bot.str.get(
'cmd-play-playlist-error', "Error queuing playlist:\n`{0}`").format(e), expire_in=30)
t0 = time.time()
# My test was 1.2 seconds per song, but we maybe should fudge it a bit, unless we can
# monitor it and edit the message with the estimated time, but that's some ADVANCED SHIT
# I don't think we can hook into it anyways, so this will have to do.
# It would probably be a thread to check a few playlists and get the speed from that
# Different playlists might download at different speeds though
wait_per_song = 1.2
procmesg = await bot.safe_send_message(
channel,
bot.str.get('cmd-play-playlist-gathering-1', 'Gathering playlist information for {0} songs{1}').format(
num_songs,
bot.str.get('cmd-play-playlist-gathering-2', ', ETA: {0} seconds').format(fixg(
num_songs * wait_per_song)) if num_songs >= 10 else '.'))
# We don't have a pretty way of doing this yet. We need either a loop
# that sends these every 10 seconds or a nice context manager.
await bot.send_typing(channel)
# TODO: I can create an event emitter object instead, add event functions, and every play list might be asyncified
# Also have a "verify_entry" hook with the entry as an arg and returns the entry if its ok
entry_list, position = await player.playlist.import_from(song_url, channel=channel, author=author)
tnow = time.time()
ttime = tnow - t0
listlen = len(entry_list)
drop_count = 0
if permissions.max_song_length:
for e in entry_list.copy():
if e.duration > permissions.max_song_length:
player.playlist.entries.remove(e)
entry_list.remove(e)
drop_count += 1
# Im pretty sure there's no situation where this would ever break
# Unless the first entry starts being played, which would make this a race condition
if drop_count:
print("Dropped %s songs" % drop_count)
log.info("Processed {} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)".format(
listlen,
fixg(ttime),
ttime / listlen if listlen else 0,
ttime / listlen - wait_per_song if listlen - wait_per_song else 0,
fixg(wait_per_song * num_songs))
)
await bot.safe_delete_message(procmesg)
if not listlen - drop_count:
raise exceptions.CommandError(
bot.str.get('cmd-play-playlist-maxduration',
"No songs were added, all songs were over max duration (%ss)") % permissions.max_song_length,
expire_in=30
)
reply_text = bot.str.get(
'cmd-play-playlist-reply', "Enqueued **%s** songs to be played. Position in queue: %s")
btext = str(listlen - drop_count)
# If it's an entry
else:
# youtube:playlist extractor but it's actually an entry
if info.get('extractor', '').startswith('youtube:playlist'):
try:
info = await bot.downloader.extract_info(player.playlist.loop, 'https://www.youtube.com/watch?v=%s' % info.get('url', ''), download=False, process=False)
except Exception as e:
raise exceptions.CommandError(e, expire_in=30)
if permissions.max_song_length and info.get('duration', 0) > permissions.max_song_length:
raise exceptions.PermissionsError(
bot.str.get('cmd-play-song-limit', "Song duration exceeds limit ({0} > {1})").format(
info['duration'], permissions.max_song_length),
expire_in=30
)
entry, position = await player.playlist.add_entry(song_url, channel=channel, author=author)
reply_text = bot.str.get(
'cmd-play-song-reply', "Enqueued `%s` to be played. Position in queue: %s")
btext = entry.title
if position == 1 and player.is_stopped:
position = bot.str.get('cmd-play-next', 'Up next!')
reply_text %= (btext, position)
else:
try:
time_until = await player.playlist.estimate_time_until(position, player)
reply_text += bot.str.get('cmd-play-eta',
' - estimated time until playing: %s')
except:
traceback.print_exc()
time_until = ''
reply_text %= (btext, position, ftimedelta(time_until))
return Response(reply_text, delete_after=30)
def register() -> None:
factory.register("play", PlayCmd, ["p", ])
| import asyncio
import logging
import re
import time
import traceback
from musicbot import _func_, _get_variable, exceptions, factory
from musicbot.bot import MusicBot
from musicbot.constructs import Response
from musicbot.opus_loader import load_opus_lib
from musicbot.utils import fixg, ftimedelta
load_opus_lib()
log = logging.getLogger(__name__)
class PlayCmd():
async def Run(self, bot: MusicBot, message: discord.Message, player, channel, author, permissions, leftover_args, song_url):
"""
Usage:
{command_prefix}play song_link
{command_prefix}play text to search for
{command_prefix}play spotify_uri
Adds the song to the playlist. If a link is not provided, the first
result from a youtube search is added to the queue.
If enabled in the config, the bot will also support Spotify URIs, however
it will use the metadata (e.g song name and artist) to find a YouTube
equivalent of the song. Streaming from Spotify is not possible.
"""
song_url = song_url.strip('<>')
await bot.send_typing(channel)
if leftover_args:
song_url = ' '.join([song_url, *leftover_args])
leftover_args = None # prevent some crazy shit happening down the line
# Make sure forward slashes work properly in search queries
linksRegex = '((http(s)*:[/][/]|www.)([a-z]|[A-Z]|[0-9]|[/.]|[~])*)'
pattern = re.compile(linksRegex)
matchUrl = pattern.match(song_url)
song_url = song_url.replace(
'/', '%2F') if matchUrl is None else song_url
# Rewrite YouTube playlist URLs if the wrong URL type is given
playlistRegex = r'watch\?v=.+&(list=[^&]+)'
matches = re.search(playlistRegex, song_url)
groups = matches.groups() if matches is not None else []
song_url = "https://www.youtube.com/playlist?" + \
groups[0] if len(groups) > 0 else song_url
if bot.config._spotify:
if 'open.spotify.com' in song_url:
song_url = 'spotify:' + \
re.sub('(http[s]?:\/\/)?(open.spotify.com)\/',
'', song_url).replace('/', ':')
# remove session id (and other query stuff)
song_url = re.sub('\?.*', '', song_url)
if song_url.startswith('spotify:'):
parts = song_url.split(":")
try:
if 'track' in parts:
res = await bot.spotify.get_track(parts[-1])
song_url = res['artists'][0]['name'] + \
' ' + res['name']
elif 'album' in parts:
res = await bot.spotify.get_album(parts[-1])
await bot._do_playlist_checks(permissions, player, author, res['tracks']['items'])
procmesg = await bot.safe_send_message(channel, bot.str.get('cmd-play-spotify-album-process', 'Processing album `{0}` (`{1}`)').format(res['name'], song_url))
for i in res['tracks']['items']:
song_url = i['name'] + ' ' + \
i['artists'][0]['name']
log.debug('Processing {0}'.format(song_url))
await bot.cmd_play(message, player, channel, author, permissions, leftover_args, song_url)
await bot.safe_delete_message(procmesg)
return Response(bot.str.get('cmd-play-spotify-album-queued', "Enqueued `{0}` with **{1}** songs.").format(res['name'], len(res['tracks']['items'])))
elif 'playlist' in parts:
res = []
r = await bot.spotify.get_playlist_tracks(parts[-1])
while True:
res.extend(r['items'])
if r['next'] is not None:
r = await bot.spotify.make_spotify_req(r['next'])
continue
else:
break
await bot._do_playlist_checks(permissions, player, author, res)
procmesg = await bot.safe_send_message(channel, bot.str.get('cmd-play-spotify-playlist-process', 'Processing playlist `{0}` (`{1}`)').format(parts[-1], song_url))
for i in res:
song_url = i['track']['name'] + ' ' + \
i['track']['artists'][0]['name']
log.debug('Processing {0}'.format(song_url))
await bot.cmd_play(message, player, channel, author, permissions, leftover_args, song_url)
await bot.safe_delete_message(procmesg)
return Response(bot.str.get('cmd-play-spotify-playlist-queued', "Enqueued `{0}` with **{1}** songs.").format(parts[-1], len(res)))
else:
raise exceptions.CommandError(bot.str.get(
'cmd-play-spotify-unsupported', 'That is not a supported Spotify URI.'), expire_in=30)
except exceptions.SpotifyError:
raise exceptions.CommandError(bot.str.get(
'cmd-play-spotify-invalid', 'You either provided an invalid URI, or there was a problem.'))
# This lock prevent spamming play command to add entries that exceeds time limit/ maximum song limit
async with bot.aiolocks[_func_() + ':' + str(author.id)]:
if permissions.max_songs and player.playlist.count_for_user(author) >= permissions.max_songs:
raise exceptions.PermissionsError(
bot.str.get('cmd-play-limit', "You have reached your enqueued song limit ({0})").format(permissions.max_songs), expire_in=30
)
if player.karaoke_mode and not permissions.bypass_karaoke_mode:
raise exceptions.PermissionsError(
bot.str.get('karaoke-enabled', "Karaoke mode is enabled, please try again when its disabled!"), expire_in=30
)
# Try to determine entry type, if _type is playlist then there should be entries
while True:
try:
info = await bot.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
# If there is an exception arise when processing we go on and let extract_info down the line report it
# because info might be a playlist and thing that's broke it might be individual entry
try:
info_process = await bot.downloader.extract_info(player.playlist.loop, song_url, download=False)
except:
info_process = None
log.debug(info)
if info_process and info and info_process.get('_type', None) == 'playlist' and 'entries' not in info and not info.get('url', '').startswith('ytsearch'):
use_url = info_process.get(
'webpage_url', None) or info_process.get('url', None)
if use_url == song_url:
log.warning(
"Determined incorrect entry type, but suggested url is the same. Help.")
break # If we break here it will break things down the line and give "This is a playlist" exception as a result
log.debug(
"Assumed url \"%s\" was a single entry, was actually a playlist" % song_url)
log.debug("Using \"%s\" instead" % use_url)
song_url = use_url
else:
break
except Exception as e:
if 'unknown url type' in str(e):
# it's probably not actually an extractor
song_url = song_url.replace(':', '')
info = await bot.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
else:
raise exceptions.CommandError(e, expire_in=30)
if not info:
raise exceptions.CommandError(
bot.str.get(
'cmd-play-noinfo', "That video cannot be played. Try using the {0}stream command.").format(bot.config.command_prefix),
expire_in=30
)
if info.get('extractor', '') not in permissions.extractors and permissions.extractors:
raise exceptions.PermissionsError(
bot.str.get('cmd-play-badextractor', "You do not have permission to play media from this service."), expire_in=30
)
# abstract the search handling away from the user
# our ytdl options allow us to use search strings as input urls
if info.get('url', '').startswith('ytsearch'):
# print("[Command:play] Searching for \"%s\"" % song_url)
info = await bot.downloader.extract_info(
player.playlist.loop,
song_url,
download=False,
process=True, # ASYNC LAMBDAS WHEN
on_error=lambda e: asyncio.ensure_future(
bot.safe_send_message(channel, "```\n%s\n```" % e, expire_in=120), loop=bot.loop),
retry_on_error=True
)
if not info:
raise exceptions.CommandError(
bot.str.get('cmd-play-nodata', "Error extracting info from search string, youtubedl returned no data. "
"You may need to restart the bot if this continues to happen."), expire_in=30
)
if not all(info.get('entries', [])):
# empty list, no data
log.debug("Got empty list, no data")
return
# TODO: handle 'webpage_url' being 'ytsearch:...' or extractor type
song_url = info['entries'][0]['webpage_url']
info = await bot.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
# Now I could just do: return await bot.cmd_play(player, channel, author, song_url)
# But this is probably fine
# If it's playlist
if 'entries' in info:
await bot._do_playlist_checks(permissions, player, author, info['entries'])
num_songs = sum(1 for _ in info['entries'])
if info['extractor'].lower() in ['youtube:playlist', 'soundcloud:set', 'bandcamp:album']:
try:
return await bot._cmd_play_playlist_async(player, channel, author, permissions, song_url, info['extractor'])
except exceptions.CommandError:
raise
except Exception as e:
log.error("Error queuing playlist", exc_info=True)
raise exceptions.CommandError(bot.str.get(
'cmd-play-playlist-error', "Error queuing playlist:\n`{0}`").format(e), expire_in=30)
t0 = time.time()
# My test was 1.2 seconds per song, but we maybe should fudge it a bit, unless we can
# monitor it and edit the message with the estimated time, but that's some ADVANCED SHIT
# I don't think we can hook into it anyways, so this will have to do.
# It would probably be a thread to check a few playlists and get the speed from that
# Different playlists might download at different speeds though
wait_per_song = 1.2
procmesg = await bot.safe_send_message(
channel,
bot.str.get('cmd-play-playlist-gathering-1', 'Gathering playlist information for {0} songs{1}').format(
num_songs,
bot.str.get('cmd-play-playlist-gathering-2', ', ETA: {0} seconds').format(fixg(
num_songs * wait_per_song)) if num_songs >= 10 else '.'))
# We don't have a pretty way of doing this yet. We need either a loop
# that sends these every 10 seconds or a nice context manager.
await bot.send_typing(channel)
# TODO: I can create an event emitter object instead, add event functions, and every play list might be asyncified
# Also have a "verify_entry" hook with the entry as an arg and returns the entry if its ok
entry_list, position = await player.playlist.import_from(song_url, channel=channel, author=author)
tnow = time.time()
ttime = tnow - t0
listlen = len(entry_list)
drop_count = 0
if permissions.max_song_length:
for e in entry_list.copy():
if e.duration > permissions.max_song_length:
player.playlist.entries.remove(e)
entry_list.remove(e)
drop_count += 1
# Im pretty sure there's no situation where this would ever break
# Unless the first entry starts being played, which would make this a race condition
if drop_count:
print("Dropped %s songs" % drop_count)
log.info("Processed {} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)".format(
listlen,
fixg(ttime),
ttime / listlen if listlen else 0,
ttime / listlen - wait_per_song if listlen - wait_per_song else 0,
fixg(wait_per_song * num_songs))
)
await bot.safe_delete_message(procmesg)
if not listlen - drop_count:
raise exceptions.CommandError(
bot.str.get('cmd-play-playlist-maxduration',
"No songs were added, all songs were over max duration (%ss)") % permissions.max_song_length,
expire_in=30
)
reply_text = bot.str.get(
'cmd-play-playlist-reply', "Enqueued **%s** songs to be played. Position in queue: %s")
btext = str(listlen - drop_count)
# If it's an entry
else:
# youtube:playlist extractor but it's actually an entry
if info.get('extractor', '').startswith('youtube:playlist'):
try:
info = await bot.downloader.extract_info(player.playlist.loop, 'https://www.youtube.com/watch?v=%s' % info.get('url', ''), download=False, process=False)
except Exception as e:
raise exceptions.CommandError(e, expire_in=30)
if permissions.max_song_length and info.get('duration', 0) > permissions.max_song_length:
raise exceptions.PermissionsError(
bot.str.get('cmd-play-song-limit', "Song duration exceeds limit ({0} > {1})").format(
info['duration'], permissions.max_song_length),
expire_in=30
)
entry, position = await player.playlist.add_entry(song_url, channel=channel, author=author)
reply_text = bot.str.get(
'cmd-play-song-reply', "Enqueued `%s` to be played. Position in queue: %s")
btext = entry.title
if position == 1 and player.is_stopped:
position = bot.str.get('cmd-play-next', 'Up next!')
reply_text %= (btext, position)
else:
try:
time_until = await player.playlist.estimate_time_until(position, player)
reply_text += bot.str.get('cmd-play-eta',
' - estimated time until playing: %s')
except:
traceback.print_exc()
time_until = ''
reply_text %= (btext, position, ftimedelta(time_until))
return Response(reply_text, delete_after=30)
def register() -> None:
factory.register("play", PlayCmd, ["p", ]) | en | 0.909881 | Usage: {command_prefix}play song_link {command_prefix}play text to search for {command_prefix}play spotify_uri Adds the song to the playlist. If a link is not provided, the first result from a youtube search is added to the queue. If enabled in the config, the bot will also support Spotify URIs, however it will use the metadata (e.g song name and artist) to find a YouTube equivalent of the song. Streaming from Spotify is not possible. # prevent some crazy shit happening down the line # Make sure forward slashes work properly in search queries # Rewrite YouTube playlist URLs if the wrong URL type is given # remove session id (and other query stuff) # This lock prevent spamming play command to add entries that exceeds time limit/ maximum song limit # Try to determine entry type, if _type is playlist then there should be entries # If there is an exception arise when processing we go on and let extract_info down the line report it # because info might be a playlist and thing that's broke it might be individual entry # If we break here it will break things down the line and give "This is a playlist" exception as a result # it's probably not actually an extractor # abstract the search handling away from the user # our ytdl options allow us to use search strings as input urls # print("[Command:play] Searching for \"%s\"" % song_url) # ASYNC LAMBDAS WHEN # empty list, no data # TODO: handle 'webpage_url' being 'ytsearch:...' or extractor type # Now I could just do: return await bot.cmd_play(player, channel, author, song_url) # But this is probably fine # If it's playlist # My test was 1.2 seconds per song, but we maybe should fudge it a bit, unless we can # monitor it and edit the message with the estimated time, but that's some ADVANCED SHIT # I don't think we can hook into it anyways, so this will have to do. # It would probably be a thread to check a few playlists and get the speed from that # Different playlists might download at different speeds though # We don't have a pretty way of doing this yet. We need either a loop # that sends these every 10 seconds or a nice context manager. # TODO: I can create an event emitter object instead, add event functions, and every play list might be asyncified # Also have a "verify_entry" hook with the entry as an arg and returns the entry if its ok # Im pretty sure there's no situation where this would ever break # Unless the first entry starts being played, which would make this a race condition # If it's an entry # youtube:playlist extractor but it's actually an entry | 2.541734 | 3 |
scratch/trial.py | JackBurdick/calcatrix | 0 | 6620867 | <gh_stars>0
from calcatrix.devices.multiview import MultiView # pylint: disable=import-error
from calcatrix.functions.photo import take_photo # pylint: disable=import-error
DIR_PIN = 27
STEP_PIN = 17
LOC_PIN = 26
ENABLE_PIN = 22
BOUND_A_PIN = 23
BOUND_B_PIN = 25
ROTATE_PINS = [21, 20, 16, 12]
init_config = {
"rotate": {"pins": ROTATE_PINS},
"linear": {
"stepper": {"dir": DIR_PIN, "step": STEP_PIN, "enable": ENABLE_PIN},
"location": {
"marker": {"pin": LOC_PIN},
"bound_a": {"pin": BOUND_A_PIN},
"bound_b": {"pin": BOUND_B_PIN},
},
"positions": {
# filepath is the location to store, init, if true will initialize the cart
# from the saved filepath, if present
# data = {"marker_positions": [], "current_position": 0}
"file_path": "/home/pi/dev/saved_positions/trial_0.pickle",
"init_from_file": True,
},
},
}
mv = MultiView(init_config=init_config)
mv.initialize()
print(mv)
mv.follow_all_instructions(func=take_photo)
| from calcatrix.devices.multiview import MultiView # pylint: disable=import-error
from calcatrix.functions.photo import take_photo # pylint: disable=import-error
DIR_PIN = 27
STEP_PIN = 17
LOC_PIN = 26
ENABLE_PIN = 22
BOUND_A_PIN = 23
BOUND_B_PIN = 25
ROTATE_PINS = [21, 20, 16, 12]
init_config = {
"rotate": {"pins": ROTATE_PINS},
"linear": {
"stepper": {"dir": DIR_PIN, "step": STEP_PIN, "enable": ENABLE_PIN},
"location": {
"marker": {"pin": LOC_PIN},
"bound_a": {"pin": BOUND_A_PIN},
"bound_b": {"pin": BOUND_B_PIN},
},
"positions": {
# filepath is the location to store, init, if true will initialize the cart
# from the saved filepath, if present
# data = {"marker_positions": [], "current_position": 0}
"file_path": "/home/pi/dev/saved_positions/trial_0.pickle",
"init_from_file": True,
},
},
}
mv = MultiView(init_config=init_config)
mv.initialize()
print(mv)
mv.follow_all_instructions(func=take_photo) | en | 0.608543 | # pylint: disable=import-error # pylint: disable=import-error # filepath is the location to store, init, if true will initialize the cart # from the saved filepath, if present # data = {"marker_positions": [], "current_position": 0} | 2.27896 | 2 |
tests/test_utils/test_kernel.py | schmidtjonathan/probfindiff | 3 | 6620868 | <gh_stars>1-10
"""Test for kernel functionality."""
import functools
import jax
import jax.numpy as jnp
import pytest
import pytest_cases
from probfindiff.utils import autodiff, kernel, kernel_zoo
def case_exponentiated_quadratic():
k = lambda x, y: jnp.exp(-(x - y).dot(x - y))
return kernel.batch_gram(k)[0]
def case_exponentiated_quadratic_builtin():
return kernel.batch_gram(kernel_zoo.exponentiated_quadratic)[0]
def case_differentiate_0():
k = lambda x, y: (x - y).dot(x - y)
return kernel.differentiate(k, L=autodiff.derivative)[0]
def case_differentiate_1():
k = lambda x, y: (x - y).dot(x - y)
return kernel.differentiate(k, L=autodiff.derivative)[1]
def case_differentiate_2():
k = lambda x, y: (x - y).dot(x - y)
return kernel.differentiate(k, L=autodiff.derivative)[2]
def case_polynomial_builtin():
k = functools.partial(kernel_zoo.polynomial, p=jnp.ones((3,)))
return kernel.batch_gram(k)[0]
@pytest_cases.parametrize_with_cases("k", cases=".")
def test_vectorize_gram_shapes(k):
xs = jnp.arange(8.0).reshape((4, 2))
ys = jnp.arange(12.0).reshape((6, 2))
assert k(xs, ys.T).shape == (4, 6)
@pytest.mark.parametrize("L, d, diffop_shape", ([jax.jacfwd, 2, (2,)],))
def test_kernel_batch_shape(L, d, diffop_shape):
k = kernel_zoo.exponentiated_quadratic
k_batch, lk_batch, llk_batch = kernel.differentiate(k, L=L)
num_xs, num_ys = 4, 3
xs = jnp.arange(1, 1 + d * num_xs, dtype=float).reshape((num_xs, d))
ys = jnp.arange(1, 1 + d * num_ys, dtype=float).reshape((num_ys, d))
k_shape = (num_xs, num_ys)
assert k_batch(xs, ys.T).shape == k_shape
assert lk_batch(xs, ys.T).shape == diffop_shape + k_shape
assert llk_batch(xs, ys.T).shape == diffop_shape + diffop_shape + k_shape
| """Test for kernel functionality."""
import functools
import jax
import jax.numpy as jnp
import pytest
import pytest_cases
from probfindiff.utils import autodiff, kernel, kernel_zoo
def case_exponentiated_quadratic():
k = lambda x, y: jnp.exp(-(x - y).dot(x - y))
return kernel.batch_gram(k)[0]
def case_exponentiated_quadratic_builtin():
return kernel.batch_gram(kernel_zoo.exponentiated_quadratic)[0]
def case_differentiate_0():
k = lambda x, y: (x - y).dot(x - y)
return kernel.differentiate(k, L=autodiff.derivative)[0]
def case_differentiate_1():
k = lambda x, y: (x - y).dot(x - y)
return kernel.differentiate(k, L=autodiff.derivative)[1]
def case_differentiate_2():
k = lambda x, y: (x - y).dot(x - y)
return kernel.differentiate(k, L=autodiff.derivative)[2]
def case_polynomial_builtin():
k = functools.partial(kernel_zoo.polynomial, p=jnp.ones((3,)))
return kernel.batch_gram(k)[0]
@pytest_cases.parametrize_with_cases("k", cases=".")
def test_vectorize_gram_shapes(k):
xs = jnp.arange(8.0).reshape((4, 2))
ys = jnp.arange(12.0).reshape((6, 2))
assert k(xs, ys.T).shape == (4, 6)
@pytest.mark.parametrize("L, d, diffop_shape", ([jax.jacfwd, 2, (2,)],))
def test_kernel_batch_shape(L, d, diffop_shape):
k = kernel_zoo.exponentiated_quadratic
k_batch, lk_batch, llk_batch = kernel.differentiate(k, L=L)
num_xs, num_ys = 4, 3
xs = jnp.arange(1, 1 + d * num_xs, dtype=float).reshape((num_xs, d))
ys = jnp.arange(1, 1 + d * num_ys, dtype=float).reshape((num_ys, d))
k_shape = (num_xs, num_ys)
assert k_batch(xs, ys.T).shape == k_shape
assert lk_batch(xs, ys.T).shape == diffop_shape + k_shape
assert llk_batch(xs, ys.T).shape == diffop_shape + diffop_shape + k_shape | en | 0.872861 | Test for kernel functionality. | 2.278377 | 2 |
R2.py | glosophy/Time-Series | 0 | 6620869 | <gh_stars>0
def R2(y_hat, y_true):
'''Calculates R^2'''
mean = np.mean(y_true)
numerator = []
denominator = []
for i in y_hat:
num = (i - mean) ** 2
numerator.append(num)
for j in y_true:
den = (j - mean) ** 2
denominator.append(den)
R2 = np.sum(numerator) / np.sum(denominator)
return R2
| def R2(y_hat, y_true):
'''Calculates R^2'''
mean = np.mean(y_true)
numerator = []
denominator = []
for i in y_hat:
num = (i - mean) ** 2
numerator.append(num)
for j in y_true:
den = (j - mean) ** 2
denominator.append(den)
R2 = np.sum(numerator) / np.sum(denominator)
return R2 | en | 0.684595 | Calculates R^2 | 3.224249 | 3 |
jaxopt/_src/objectives.py | mblondel/jaxopt | 0 | 6620870 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common objective functions."""
import jax
import jax.numpy as jnp
from jaxopt._src import base
from jaxopt._src import loss
class CompositeLinearFunction:
"""A base class to represent composite linear functions.
These are functions of the form::
fun(params, *args, **kwargs) =
subfun(linop(params), *args, **kwargs) + vdot(params, b(*args, **kwargs))
where ``linop = make_linop(*args, **kwargs)``.
"""
def b(self, *args, **kwargs):
"""Linear term in the function."""
return None
def lipschitz_const(self, hyperparams):
"""Lipschitz-constant of subfun."""
raise NotImplementedError
def subfun(self, predictions, *args, **kwargs):
"""To be implemented by the child class."""
raise NotImplementedError
def __call__(self, params, *args, **kwargs):
linop = self.make_linop(*args, **kwargs)
predictions = linop.matvec(params)
ret = self.subfun(predictions, *args, **kwargs)
b = self.b(*args, **kwargs)
if b is not None:
ret += jnp.vdot(params, b)
return ret
class LeastSquaresFunction(CompositeLinearFunction):
"""Least squares objective class."""
def subfun(self, predictions, data):
y = data[1]
residuals = predictions - y
return 0.5 * jnp.mean(residuals ** 2)
def make_linop(self, data):
"""Creates linear operator."""
return base.LinearOperator(data[0])
def columnwise_lipschitz_const(self, data):
"""Column-wise Lipschitz constants."""
linop = self.make_linop(data)
return linop.column_l2_norms(squared=True) * 1.0
least_squares = LeastSquaresFunction()
_logloss_vmap = jax.vmap(loss.multiclass_logistic_loss)
class MulticlassLogregFunction(CompositeLinearFunction):
"""Multiclass logistic regression objective class."""
def subfun(self, predictions, data):
y = data[1]
return jnp.mean(_logloss_vmap(y, predictions))
def make_linop(self, data):
"""Creates linear operator."""
return base.LinearOperator(data[0])
def columnwise_lipschitz_const(self, data):
"""Column-wise Lipschitz constants."""
linop = self.make_linop(data)
return linop.column_l2_norms(squared=True) * 0.5
multiclass_logreg = MulticlassLogregFunction()
def multiclass_logreg_with_intercept(params, data):
X, y = data
W, b = params
y_pred = jnp.dot(X, W) + b
return jnp.mean(_logloss_vmap(y, y_pred))
def l2_multiclass_logreg(W, l2reg, data):
X, y = data
y_pred = jnp.dot(X, W)
return jnp.mean(_logloss_vmap(y, y_pred)) + 0.5 * l2reg * jnp.sum(W ** 2)
def l2_multiclass_logreg_with_intercept(params, l2reg, data):
X, y = data
W, b = params
y_pred = jnp.dot(X, W) + b
return jnp.mean(_logloss_vmap(y, y_pred)) + 0.5 * l2reg * jnp.sum(W ** 2)
_binary_logloss_vmap = jax.vmap(loss.binary_logistic_loss)
class BinaryLogregFunction(CompositeLinearFunction):
"""Binary logistic regression objective class."""
def subfun(self, predictions, data):
y = data[1]
return jnp.mean(_binary_logloss_vmap(y, predictions))
def make_linop(self, data):
"""Creates linear operator."""
return base.LinearOperator(data[0])
def columnwise_lipschitz_const(self, data):
"""Column-wise Lipschitz constants."""
linop = self.make_linop(data)
return linop.column_l2_norms(squared=True) * 0.25
binary_logreg = BinaryLogregFunction()
class MulticlassLinearSvmDual(CompositeLinearFunction):
"""Dual objective function of multiclass linear SVMs."""
def subfun(self, Xbeta, l2reg, data):
X, Y = data
XY = jnp.dot(X.T, Y) # todo: avoid storing / computing this matrix.
# The dual objective is:
# fun(beta) = vdot(beta, 1 - Y) - 0.5 / l2reg * ||V(beta)||^2
# where V(beta) = dot(X.T, Y) - dot(X.T, beta).
V = XY - Xbeta
# With opposite sign, as we want to maximize.
return 0.5 / l2reg * jnp.vdot(V, V)
def make_linop(self, l2reg, data):
"""Creates linear operator."""
return base.LinearOperator(data[0].T)
def columnwise_lipschitz_const(self, l2reg, data):
"""Column-wise Lipschitz constants."""
linop = self.make_linop(l2reg, data)
return linop.column_l2_norms(squared=True)
def b(self, l2reg, data):
return data[1] - 1
multiclass_linear_svm_dual = MulticlassLinearSvmDual()
| # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common objective functions."""
import jax
import jax.numpy as jnp
from jaxopt._src import base
from jaxopt._src import loss
class CompositeLinearFunction:
"""A base class to represent composite linear functions.
These are functions of the form::
fun(params, *args, **kwargs) =
subfun(linop(params), *args, **kwargs) + vdot(params, b(*args, **kwargs))
where ``linop = make_linop(*args, **kwargs)``.
"""
def b(self, *args, **kwargs):
"""Linear term in the function."""
return None
def lipschitz_const(self, hyperparams):
"""Lipschitz-constant of subfun."""
raise NotImplementedError
def subfun(self, predictions, *args, **kwargs):
"""To be implemented by the child class."""
raise NotImplementedError
def __call__(self, params, *args, **kwargs):
linop = self.make_linop(*args, **kwargs)
predictions = linop.matvec(params)
ret = self.subfun(predictions, *args, **kwargs)
b = self.b(*args, **kwargs)
if b is not None:
ret += jnp.vdot(params, b)
return ret
class LeastSquaresFunction(CompositeLinearFunction):
"""Least squares objective class."""
def subfun(self, predictions, data):
y = data[1]
residuals = predictions - y
return 0.5 * jnp.mean(residuals ** 2)
def make_linop(self, data):
"""Creates linear operator."""
return base.LinearOperator(data[0])
def columnwise_lipschitz_const(self, data):
"""Column-wise Lipschitz constants."""
linop = self.make_linop(data)
return linop.column_l2_norms(squared=True) * 1.0
least_squares = LeastSquaresFunction()
_logloss_vmap = jax.vmap(loss.multiclass_logistic_loss)
class MulticlassLogregFunction(CompositeLinearFunction):
"""Multiclass logistic regression objective class."""
def subfun(self, predictions, data):
y = data[1]
return jnp.mean(_logloss_vmap(y, predictions))
def make_linop(self, data):
"""Creates linear operator."""
return base.LinearOperator(data[0])
def columnwise_lipschitz_const(self, data):
"""Column-wise Lipschitz constants."""
linop = self.make_linop(data)
return linop.column_l2_norms(squared=True) * 0.5
multiclass_logreg = MulticlassLogregFunction()
def multiclass_logreg_with_intercept(params, data):
X, y = data
W, b = params
y_pred = jnp.dot(X, W) + b
return jnp.mean(_logloss_vmap(y, y_pred))
def l2_multiclass_logreg(W, l2reg, data):
X, y = data
y_pred = jnp.dot(X, W)
return jnp.mean(_logloss_vmap(y, y_pred)) + 0.5 * l2reg * jnp.sum(W ** 2)
def l2_multiclass_logreg_with_intercept(params, l2reg, data):
X, y = data
W, b = params
y_pred = jnp.dot(X, W) + b
return jnp.mean(_logloss_vmap(y, y_pred)) + 0.5 * l2reg * jnp.sum(W ** 2)
_binary_logloss_vmap = jax.vmap(loss.binary_logistic_loss)
class BinaryLogregFunction(CompositeLinearFunction):
"""Binary logistic regression objective class."""
def subfun(self, predictions, data):
y = data[1]
return jnp.mean(_binary_logloss_vmap(y, predictions))
def make_linop(self, data):
"""Creates linear operator."""
return base.LinearOperator(data[0])
def columnwise_lipschitz_const(self, data):
"""Column-wise Lipschitz constants."""
linop = self.make_linop(data)
return linop.column_l2_norms(squared=True) * 0.25
binary_logreg = BinaryLogregFunction()
class MulticlassLinearSvmDual(CompositeLinearFunction):
"""Dual objective function of multiclass linear SVMs."""
def subfun(self, Xbeta, l2reg, data):
X, Y = data
XY = jnp.dot(X.T, Y) # todo: avoid storing / computing this matrix.
# The dual objective is:
# fun(beta) = vdot(beta, 1 - Y) - 0.5 / l2reg * ||V(beta)||^2
# where V(beta) = dot(X.T, Y) - dot(X.T, beta).
V = XY - Xbeta
# With opposite sign, as we want to maximize.
return 0.5 / l2reg * jnp.vdot(V, V)
def make_linop(self, l2reg, data):
"""Creates linear operator."""
return base.LinearOperator(data[0].T)
def columnwise_lipschitz_const(self, l2reg, data):
"""Column-wise Lipschitz constants."""
linop = self.make_linop(l2reg, data)
return linop.column_l2_norms(squared=True)
def b(self, l2reg, data):
return data[1] - 1
multiclass_linear_svm_dual = MulticlassLinearSvmDual()
| en | 0.737061 | # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Common objective functions. A base class to represent composite linear functions. These are functions of the form:: fun(params, *args, **kwargs) = subfun(linop(params), *args, **kwargs) + vdot(params, b(*args, **kwargs)) where ``linop = make_linop(*args, **kwargs)``. Linear term in the function. Lipschitz-constant of subfun. To be implemented by the child class. Least squares objective class. Creates linear operator. Column-wise Lipschitz constants. Multiclass logistic regression objective class. Creates linear operator. Column-wise Lipschitz constants. Binary logistic regression objective class. Creates linear operator. Column-wise Lipschitz constants. Dual objective function of multiclass linear SVMs. # todo: avoid storing / computing this matrix. # The dual objective is: # fun(beta) = vdot(beta, 1 - Y) - 0.5 / l2reg * ||V(beta)||^2 # where V(beta) = dot(X.T, Y) - dot(X.T, beta). # With opposite sign, as we want to maximize. Creates linear operator. Column-wise Lipschitz constants. | 2.224108 | 2 |
template_problem_[code]/solution.py | alexandrustoica/leetcode | 0 | 6620871 | from typing import List
class Foo:
def foo(self):
pass
if __name__ == '__main__':
pass
| from typing import List
class Foo:
def foo(self):
pass
if __name__ == '__main__':
pass
| none | 1 | 1.735726 | 2 | |
message_recall/user_retriever.py | google/googleapps-message-recall | 12 | 6620872 | <gh_stars>10-100
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to search users using the Google Admin SDK API."""
import httplib
from apiclient.discovery import build
from apiclient.errors import HttpError
import credentials_utils
import log_utils
_LOG = log_utils.GetLogger('messagerecall.user_retriever')
_MAX_RESULT_PAGE_SIZE = 500 # Default is 100.
class DomainUserRetriever(object):
"""Class to organize large, multi-page user searches.
Uses http_utils to add error handling and retry using backoff.
"""
def __init__(self, owner_email, user_domain, email_query_prefix,
use_glob=False):
"""Initialize the search class.
Build the items needed to page through domain user lists which are expected
to be >100k users at times. Need a users collection object from the ADMIN
SDK to reference the search api and an authenticated http connection to
invoke it.
Args:
owner_email: String email address of the user who owns the task.
user_domain: String domain for our apps domain.
email_query_prefix: Admin SDK search query prefix (used in email:%s*).
use_glob: True if need to add * to the end of the search query.
"""
self._http = credentials_utils.GetAuthorizedHttp(owner_email)
self._user_domain = user_domain
self._email_query_prefix = email_query_prefix
self._search_query = 'email:%s' % email_query_prefix
if use_glob:
self._search_query += '*'
# Have seen the following error from build():
# 'DeadlineExceededError: The API call urlfetch.Fetch() took too long '
# 'to respond and was cancelled.'
directory_service = build('admin', 'directory_v1', http=self._http)
self._users_collection = directory_service.users()
def _FetchUserListPage(self, next_page_token=None):
"""Helper that handles exceptions retrieving pages of users.
Args:
next_page_token: Used for ongoing paging of users.
Returns:
List of users retrieved (one page with default page size: 100 users).
"""
# 'deleted' users are not examined.
# https://developers.google.com/admin-sdk/directory/v1/reference/users/list
request = self._users_collection.list(domain=self._user_domain,
maxResults=_MAX_RESULT_PAGE_SIZE,
query=self._search_query,
pageToken=next_page_token)
# Not infrequently seeing:
# 'HTTPException: Deadline exceeded while waiting for HTTP response '
# 'from URL: https://www.googleapis.com/admin/directory/v1/users'
# '?query=email%3A6%2A&domain=capgsfishing.com&alt=json&maxResults=500'
# Default socket timeout seems to be 5s so increasing it to 10s
# in GetAuthorizedHttp() seems to have helped.
return request.execute(http=self._http)
def GetUserAttributes(self, user_email):
"""Helper to retrieve user attributes from the Admin SDK API.
Args:
user_email: String email address of the form user@domain.com.
Returns:
Dictionary of user_attributes discovered.
Raises:
MessageRecallError: If unable to execute the API call.
"""
request = self._users_collection.get(userKey=user_email)
try:
return request.execute(
http=credentials_utils.GetAuthorizedHttp(user_email))
except (HttpError, httplib.HTTPException) as e:
if e.resp.status == 403: # If user is not an admin...
return {}
raise
def GetUserAttribute(self, user_email, attribute_tag):
"""Helper to retrieve one user attribute using the Admin SDK API.
Args:
user_email: String email address of the form <EMAIL>.
attribute_tag: String tag of the attribute to retrieve.
Returns:
Dictionary value of the user_attribute or None.
"""
return self.GetUserAttributes(user_email).get(attribute_tag)
def RetrieveDomainUsers(self):
"""Retrieves domain user list page by page and allows iteration of users.
Yields:
List of Strings: user emails of the next page of users or [].
"""
next_page_token = None
while True:
users_list = self._FetchUserListPage(next_page_token=next_page_token)
yield [(user['primaryEmail'], user['suspended'])
for user in users_list.get('users', [])
if user['primaryEmail'] and user['primaryEmail'].startswith(
self._email_query_prefix)]
next_page_token = users_list.get('nextPageToken')
if not next_page_token:
break
| # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to search users using the Google Admin SDK API."""
import httplib
from apiclient.discovery import build
from apiclient.errors import HttpError
import credentials_utils
import log_utils
_LOG = log_utils.GetLogger('messagerecall.user_retriever')
_MAX_RESULT_PAGE_SIZE = 500 # Default is 100.
class DomainUserRetriever(object):
"""Class to organize large, multi-page user searches.
Uses http_utils to add error handling and retry using backoff.
"""
def __init__(self, owner_email, user_domain, email_query_prefix,
use_glob=False):
"""Initialize the search class.
Build the items needed to page through domain user lists which are expected
to be >100k users at times. Need a users collection object from the ADMIN
SDK to reference the search api and an authenticated http connection to
invoke it.
Args:
owner_email: String email address of the user who owns the task.
user_domain: String domain for our apps domain.
email_query_prefix: Admin SDK search query prefix (used in email:%s*).
use_glob: True if need to add * to the end of the search query.
"""
self._http = credentials_utils.GetAuthorizedHttp(owner_email)
self._user_domain = user_domain
self._email_query_prefix = email_query_prefix
self._search_query = 'email:%s' % email_query_prefix
if use_glob:
self._search_query += '*'
# Have seen the following error from build():
# 'DeadlineExceededError: The API call urlfetch.Fetch() took too long '
# 'to respond and was cancelled.'
directory_service = build('admin', 'directory_v1', http=self._http)
self._users_collection = directory_service.users()
def _FetchUserListPage(self, next_page_token=None):
"""Helper that handles exceptions retrieving pages of users.
Args:
next_page_token: Used for ongoing paging of users.
Returns:
List of users retrieved (one page with default page size: 100 users).
"""
# 'deleted' users are not examined.
# https://developers.google.com/admin-sdk/directory/v1/reference/users/list
request = self._users_collection.list(domain=self._user_domain,
maxResults=_MAX_RESULT_PAGE_SIZE,
query=self._search_query,
pageToken=next_page_token)
# Not infrequently seeing:
# 'HTTPException: Deadline exceeded while waiting for HTTP response '
# 'from URL: https://www.googleapis.com/admin/directory/v1/users'
# '?query=email%3A6%2A&domain=capgsfishing.com&alt=json&maxResults=500'
# Default socket timeout seems to be 5s so increasing it to 10s
# in GetAuthorizedHttp() seems to have helped.
return request.execute(http=self._http)
def GetUserAttributes(self, user_email):
"""Helper to retrieve user attributes from the Admin SDK API.
Args:
user_email: String email address of the form user@domain.com.
Returns:
Dictionary of user_attributes discovered.
Raises:
MessageRecallError: If unable to execute the API call.
"""
request = self._users_collection.get(userKey=user_email)
try:
return request.execute(
http=credentials_utils.GetAuthorizedHttp(user_email))
except (HttpError, httplib.HTTPException) as e:
if e.resp.status == 403: # If user is not an admin...
return {}
raise
def GetUserAttribute(self, user_email, attribute_tag):
"""Helper to retrieve one user attribute using the Admin SDK API.
Args:
user_email: String email address of the form <EMAIL>.
attribute_tag: String tag of the attribute to retrieve.
Returns:
Dictionary value of the user_attribute or None.
"""
return self.GetUserAttributes(user_email).get(attribute_tag)
def RetrieveDomainUsers(self):
"""Retrieves domain user list page by page and allows iteration of users.
Yields:
List of Strings: user emails of the next page of users or [].
"""
next_page_token = None
while True:
users_list = self._FetchUserListPage(next_page_token=next_page_token)
yield [(user['primaryEmail'], user['suspended'])
for user in users_list.get('users', [])
if user['primaryEmail'] and user['primaryEmail'].startswith(
self._email_query_prefix)]
next_page_token = users_list.get('nextPageToken')
if not next_page_token:
break | en | 0.782797 | # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Functions to search users using the Google Admin SDK API. # Default is 100. Class to organize large, multi-page user searches. Uses http_utils to add error handling and retry using backoff. Initialize the search class. Build the items needed to page through domain user lists which are expected to be >100k users at times. Need a users collection object from the ADMIN SDK to reference the search api and an authenticated http connection to invoke it. Args: owner_email: String email address of the user who owns the task. user_domain: String domain for our apps domain. email_query_prefix: Admin SDK search query prefix (used in email:%s*). use_glob: True if need to add * to the end of the search query. # Have seen the following error from build(): # 'DeadlineExceededError: The API call urlfetch.Fetch() took too long ' # 'to respond and was cancelled.' Helper that handles exceptions retrieving pages of users. Args: next_page_token: Used for ongoing paging of users. Returns: List of users retrieved (one page with default page size: 100 users). # 'deleted' users are not examined. # https://developers.google.com/admin-sdk/directory/v1/reference/users/list # Not infrequently seeing: # 'HTTPException: Deadline exceeded while waiting for HTTP response ' # 'from URL: https://www.googleapis.com/admin/directory/v1/users' # '?query=email%3A6%2A&domain=capgsfishing.com&alt=json&maxResults=500' # Default socket timeout seems to be 5s so increasing it to 10s # in GetAuthorizedHttp() seems to have helped. Helper to retrieve user attributes from the Admin SDK API. Args: user_email: String email address of the form user@domain.com. Returns: Dictionary of user_attributes discovered. Raises: MessageRecallError: If unable to execute the API call. # If user is not an admin... Helper to retrieve one user attribute using the Admin SDK API. Args: user_email: String email address of the form <EMAIL>. attribute_tag: String tag of the attribute to retrieve. Returns: Dictionary value of the user_attribute or None. Retrieves domain user list page by page and allows iteration of users. Yields: List of Strings: user emails of the next page of users or []. | 2.279559 | 2 |
agtc/code_generation/type_engine.py | PetarMihalj/AGT | 10 | 6620873 | <reponame>PetarMihalj/AGT<filename>agtc/code_generation/type_engine.py
import sys
from typing import Dict, List, Tuple
from enum import Enum
from . import type_system as ts
from .recursive_logger import RecursiveLogger
from . import inference_errors as ierr
from .scope_manager import GlobalScopeManager
class TypingContext:
def __init__(self, func_defs, struct_defs):
self.func_defs = func_defs
self.struct_defs = struct_defs
self.scope_man = GlobalScopeManager()
self.function_type_container: Dict[Tuple, ts.FunctionType] = dict()
self.concrete_type_container: Dict[Tuple, ts.Type] = dict()
self.visited_resolve_methods: Set[Tuple] = set()
self.code_blocks: List = []
self.recursive_logger: RecursiveLogger = RecursiveLogger()
def resolve_function(self,
name: str,
type_argument_types: Tuple[ts.Type],
argument_types: Tuple[ts.Type]
) -> ts.FunctionType:
from . import type_gens
desc = (
name,
type_argument_types,
argument_types,
)
self.recursive_logger.go_in()
self.recursive_logger.log(f"{desc} ???")
# cache inspection and modificaiton
if desc in self.function_type_container:
f = self.function_type_container[desc]
self.recursive_logger.log(f"{desc} -> {f}")
self.recursive_logger.go_out()
return f
elif desc in self.visited_resolve_methods:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer function {desc} (already tried)!")
self.visited_resolve_methods.add(desc)
# function generators invocation
candidates = []
candidates_lower_prio = []
for fm in type_gens.func_methods:
try:
res = fm(self, name, type_argument_types, argument_types)
if isinstance(res, Tuple):
candidates_lower_prio.append(res[0])
else:
candidates.append(res)
except ierr.TypeGenError as e:
pass
# func definition resolution
for fd in self.func_defs:
fd: sa.FunctionDefinition
if all([
fd.name == name,
len(fd.type_parameter_names) == len(type_argument_types),
len(fd.parameter_names) == len(argument_types),
]):
try:
r = fd.te_visit(self, type_argument_types, argument_types)
candidates.append(r)
except ierr.ChoiceSkipError as cse:
pass
# decision
if len(candidates)==1:
self.function_type_container[desc] = candidates[0]
self.recursive_logger.log(f"{desc} -> {candidates[0]}")
self.recursive_logger.go_out()
return candidates[0]
elif len(candidates)==0:
if len(candidates_lower_prio)==1:
self.function_type_container[desc] = candidates_lower_prio[0]
self.recursive_logger.log(f"{desc} -> {candidates_lower_prio[0]}")
self.recursive_logger.go_out()
return candidates_lower_prio[0]
elif len(candidates_lower_prio)==0:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer function {desc} (no candidates)!")
else:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer function {desc} (too many DEFAULT candidates)!")
else:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer function {desc} (too many candidates)!")
def resolve_concrete(self,
name: str,
type_argument_types: Tuple[ts.Type],
) -> ts.ConcreteType:
from . import type_gens
desc = (
name,
type_argument_types,
)
self.recursive_logger.go_in()
self.recursive_logger.log(f"{desc} ???")
# cache inspection and modificaiton
if desc in self.concrete_type_container:
s = self.concrete_type_container[desc]
self.recursive_logger.log(f"{desc} -> {s}")
self.recursive_logger.go_out()
return s
elif desc in self.visited_resolve_methods:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer concrete type {desc} (already tried)!")
self.visited_resolve_methods.add(desc)
# concrete generators invocation
candidates = []
for sm in type_gens.concrete_methods:
try:
res = sm(self, name, type_argument_types)
candidates.append(res)
except ierr.TypeGenError as e:
pass
# struct definition resolution
for sd in self.struct_defs:
sd: sa.StructDefinition
if all([
sd.name == name,
len(sd.type_parameter_names) == len(type_argument_types),
]):
try:
r = sd.te_visit(self, type_argument_types)
candidates.append(r)
except ierr.ChoiceSkipError as cse:
pass
# decision
if len(candidates)==1:
self.concrete_type_container[desc] = candidates[0]
self.recursive_logger.log(f"{desc} -> {candidates[0]}")
self.recursive_logger.go_out()
return candidates[0]
elif len(candidates)==0:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer concrete type {desc} (no candidates)!")
else:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer concrete type {desc} (too many candidates)!")
class TypingResult:
def __init__(self, tc: TypingContext):
self.code_blocks = tc.code_blocks
self.recursive_logger = tc.recursive_logger
main_desc = ("main", (), ())
if main_desc in tc.function_type_container:
self.main_name = tc.function_type_container[main_desc].mangled_name
else:
self.main_name = None
def run(func_defs, struct_defs):
tc = TypingContext(func_defs, struct_defs)
m = tc.resolve_function("main", (), ())
return TypingResult(tc)
| import sys
from typing import Dict, List, Tuple
from enum import Enum
from . import type_system as ts
from .recursive_logger import RecursiveLogger
from . import inference_errors as ierr
from .scope_manager import GlobalScopeManager
class TypingContext:
def __init__(self, func_defs, struct_defs):
self.func_defs = func_defs
self.struct_defs = struct_defs
self.scope_man = GlobalScopeManager()
self.function_type_container: Dict[Tuple, ts.FunctionType] = dict()
self.concrete_type_container: Dict[Tuple, ts.Type] = dict()
self.visited_resolve_methods: Set[Tuple] = set()
self.code_blocks: List = []
self.recursive_logger: RecursiveLogger = RecursiveLogger()
def resolve_function(self,
name: str,
type_argument_types: Tuple[ts.Type],
argument_types: Tuple[ts.Type]
) -> ts.FunctionType:
from . import type_gens
desc = (
name,
type_argument_types,
argument_types,
)
self.recursive_logger.go_in()
self.recursive_logger.log(f"{desc} ???")
# cache inspection and modificaiton
if desc in self.function_type_container:
f = self.function_type_container[desc]
self.recursive_logger.log(f"{desc} -> {f}")
self.recursive_logger.go_out()
return f
elif desc in self.visited_resolve_methods:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer function {desc} (already tried)!")
self.visited_resolve_methods.add(desc)
# function generators invocation
candidates = []
candidates_lower_prio = []
for fm in type_gens.func_methods:
try:
res = fm(self, name, type_argument_types, argument_types)
if isinstance(res, Tuple):
candidates_lower_prio.append(res[0])
else:
candidates.append(res)
except ierr.TypeGenError as e:
pass
# func definition resolution
for fd in self.func_defs:
fd: sa.FunctionDefinition
if all([
fd.name == name,
len(fd.type_parameter_names) == len(type_argument_types),
len(fd.parameter_names) == len(argument_types),
]):
try:
r = fd.te_visit(self, type_argument_types, argument_types)
candidates.append(r)
except ierr.ChoiceSkipError as cse:
pass
# decision
if len(candidates)==1:
self.function_type_container[desc] = candidates[0]
self.recursive_logger.log(f"{desc} -> {candidates[0]}")
self.recursive_logger.go_out()
return candidates[0]
elif len(candidates)==0:
if len(candidates_lower_prio)==1:
self.function_type_container[desc] = candidates_lower_prio[0]
self.recursive_logger.log(f"{desc} -> {candidates_lower_prio[0]}")
self.recursive_logger.go_out()
return candidates_lower_prio[0]
elif len(candidates_lower_prio)==0:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer function {desc} (no candidates)!")
else:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer function {desc} (too many DEFAULT candidates)!")
else:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer function {desc} (too many candidates)!")
def resolve_concrete(self,
name: str,
type_argument_types: Tuple[ts.Type],
) -> ts.ConcreteType:
from . import type_gens
desc = (
name,
type_argument_types,
)
self.recursive_logger.go_in()
self.recursive_logger.log(f"{desc} ???")
# cache inspection and modificaiton
if desc in self.concrete_type_container:
s = self.concrete_type_container[desc]
self.recursive_logger.log(f"{desc} -> {s}")
self.recursive_logger.go_out()
return s
elif desc in self.visited_resolve_methods:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer concrete type {desc} (already tried)!")
self.visited_resolve_methods.add(desc)
# concrete generators invocation
candidates = []
for sm in type_gens.concrete_methods:
try:
res = sm(self, name, type_argument_types)
candidates.append(res)
except ierr.TypeGenError as e:
pass
# struct definition resolution
for sd in self.struct_defs:
sd: sa.StructDefinition
if all([
sd.name == name,
len(sd.type_parameter_names) == len(type_argument_types),
]):
try:
r = sd.te_visit(self, type_argument_types)
candidates.append(r)
except ierr.ChoiceSkipError as cse:
pass
# decision
if len(candidates)==1:
self.concrete_type_container[desc] = candidates[0]
self.recursive_logger.log(f"{desc} -> {candidates[0]}")
self.recursive_logger.go_out()
return candidates[0]
elif len(candidates)==0:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer concrete type {desc} (no candidates)!")
else:
self.recursive_logger.go_out()
raise ierr.InferenceError(f"Can not infer concrete type {desc} (too many candidates)!")
class TypingResult:
def __init__(self, tc: TypingContext):
self.code_blocks = tc.code_blocks
self.recursive_logger = tc.recursive_logger
main_desc = ("main", (), ())
if main_desc in tc.function_type_container:
self.main_name = tc.function_type_container[main_desc].mangled_name
else:
self.main_name = None
def run(func_defs, struct_defs):
tc = TypingContext(func_defs, struct_defs)
m = tc.resolve_function("main", (), ())
return TypingResult(tc) | en | 0.608905 | # cache inspection and modificaiton # function generators invocation # func definition resolution # decision # cache inspection and modificaiton # concrete generators invocation # struct definition resolution # decision | 2.050972 | 2 |
kirsche/utils/semanticscholar.py | kausalflow/kirsche | 4 | 6620874 | <gh_stars>1-10
import json
from kirsche.utils.web import get_page_content
from loguru import logger
def get_paper_info(paper_id: list, API_BASE=None) -> list:
"""
Get paper info from Semantic Scholar API
:param paper_id: list of paper ids
:param API_BASE: base url for the API, default is semanticscholar
"""
if API_BASE is None:
API_BASE = "https://api.semanticscholar.org/v1/paper/"
logger.debug(f"Getting paper info using base URL {paper_id}")
# Get paper info from Semantic Scholar API
url = API_BASE + paper_id
test_content = get_page_content(url)
if test_content["status"] != 200:
raise Exception(
f"Error: Semantic Scholar API returned status code {test_content['status']}"
)
else:
paper_info = json.loads(test_content["content"].text)
return paper_info
| import json
from kirsche.utils.web import get_page_content
from loguru import logger
def get_paper_info(paper_id: list, API_BASE=None) -> list:
"""
Get paper info from Semantic Scholar API
:param paper_id: list of paper ids
:param API_BASE: base url for the API, default is semanticscholar
"""
if API_BASE is None:
API_BASE = "https://api.semanticscholar.org/v1/paper/"
logger.debug(f"Getting paper info using base URL {paper_id}")
# Get paper info from Semantic Scholar API
url = API_BASE + paper_id
test_content = get_page_content(url)
if test_content["status"] != 200:
raise Exception(
f"Error: Semantic Scholar API returned status code {test_content['status']}"
)
else:
paper_info = json.loads(test_content["content"].text)
return paper_info | en | 0.696112 | Get paper info from Semantic Scholar API :param paper_id: list of paper ids :param API_BASE: base url for the API, default is semanticscholar # Get paper info from Semantic Scholar API | 2.491072 | 2 |
francis/tasks/genshin.py | trantinan2512/Francis | 0 | 6620875 | import json
import re
from datetime import datetime
import bs4
import discord
from pytz import timezone
from .webspiders import WebSpider
class GenshinCrawler():
def __init__(self, bot):
self.bot = bot
self.news_spider = WebSpider(self.bot, 'site_genshin')
self.site_url = 'https://genshin.mihoyo.com/content/yuanshen/getContentList?pageSize=5&pageNum=1&channelId=10'
self.content_url = 'https://genshin.mihoyo.com/content/yuanshen/getContent?contentId='
self.webpage_url = 'https://genshin.mihoyo.com/en/news/detail/'
self.newline_re = re.compile('\n{2,}')
async def do_crawl(self):
await self.parse_data()
async def parse_data(self):
checking_data = self.news_spider.form_checking_data()
site_datas = self.fetch_news_data()
if not site_datas or checking_data is None:
return
for data in site_datas:
title = data['title']
intro = data['intro']
content_id = data['contentId']
if (content_id, title) in checking_data:
continue
# fetch content data
content_data = self.fetch_news_content_data(content_id)
if not content_data:
continue
image_url = ''
for ext in data['ext']:
if ext['arrtName'] == 'banner':
if not ext['value']:
continue
if 'url' not in ext['value'][0]:
continue
image_url = ext['value'][0]['url']
channel_ids = data['channelId']
_type = ''
_color = discord.Color.dark_teal()
if '11' in channel_ids:
_type = 'Info'
_color = discord.Color.dark_blue()
elif '12' in channel_ids:
_type = 'Update'
_color = discord.Color.dark_magenta()
elif '13' in channel_ids:
_type = 'Event'
_color = discord.Color.dark_purple()
intro = f'*{intro}*\n-----\n' if intro else ''
content_html = content_data['content']
content_text = bs4.BeautifulSoup(content_html, 'html.parser').get_text()
content_text = content_text.replace(u'\xa0', '')
content_text = self.newline_re.sub('\n\n', content_text)
embed_desc = intro
post_url = f'{self.webpage_url}{content_id}'
for line in content_text.split('\n'):
if len(embed_desc + f'{line}\n') > 1900:
embed_desc += f'...\n***[Read more]({post_url})***'
break
embed_desc += f'{line}\n'
embed = discord.Embed(
title=title,
description=embed_desc,
timestamp=datetime.utcnow(),
url=post_url,
color=_color,
)
if _type:
embed.set_footer(text=_type)
if image_url:
embed.set_image(url=image_url)
posting_channel = self.bot.get_channel(id=754706712358944799)
message = await posting_channel.send(embed=embed)
# try to auto-publish the message
try:
await message.publish()
except discord.Forbidden:
pass
now = datetime.now()
vn_tz = now.astimezone(timezone('Asia/Ho_Chi_Minh'))
fetched_data = {
'id': content_id,
'fetch_date': vn_tz.strftime('%d/%m/%Y'),
'fetch_time': vn_tz.strftime('%H:%M:%S'),
'title': title,
'description': embed_desc,
'image': image_url
}
# save to drive and print the result title
self.news_spider.sheet.insert_row([value for value in fetched_data.values()], index=2)
checking_data = self.news_spider.form_checking_data()
def fetch_news_data(self):
data_text = self.news_spider.get_content_by_url(self.site_url)
if not data_text:
return
try:
data = json.loads(data_text)
return data['data']['list']
except json.JSONDecodeError:
return
def fetch_news_content_data(self, content_id):
data_text = self.news_spider.get_content_by_url(self.content_url + content_id)
if not data_text:
return
try:
data = json.loads(data_text)
return data['data']
except json.JSONDecodeError:
return
| import json
import re
from datetime import datetime
import bs4
import discord
from pytz import timezone
from .webspiders import WebSpider
class GenshinCrawler():
def __init__(self, bot):
self.bot = bot
self.news_spider = WebSpider(self.bot, 'site_genshin')
self.site_url = 'https://genshin.mihoyo.com/content/yuanshen/getContentList?pageSize=5&pageNum=1&channelId=10'
self.content_url = 'https://genshin.mihoyo.com/content/yuanshen/getContent?contentId='
self.webpage_url = 'https://genshin.mihoyo.com/en/news/detail/'
self.newline_re = re.compile('\n{2,}')
async def do_crawl(self):
await self.parse_data()
async def parse_data(self):
checking_data = self.news_spider.form_checking_data()
site_datas = self.fetch_news_data()
if not site_datas or checking_data is None:
return
for data in site_datas:
title = data['title']
intro = data['intro']
content_id = data['contentId']
if (content_id, title) in checking_data:
continue
# fetch content data
content_data = self.fetch_news_content_data(content_id)
if not content_data:
continue
image_url = ''
for ext in data['ext']:
if ext['arrtName'] == 'banner':
if not ext['value']:
continue
if 'url' not in ext['value'][0]:
continue
image_url = ext['value'][0]['url']
channel_ids = data['channelId']
_type = ''
_color = discord.Color.dark_teal()
if '11' in channel_ids:
_type = 'Info'
_color = discord.Color.dark_blue()
elif '12' in channel_ids:
_type = 'Update'
_color = discord.Color.dark_magenta()
elif '13' in channel_ids:
_type = 'Event'
_color = discord.Color.dark_purple()
intro = f'*{intro}*\n-----\n' if intro else ''
content_html = content_data['content']
content_text = bs4.BeautifulSoup(content_html, 'html.parser').get_text()
content_text = content_text.replace(u'\xa0', '')
content_text = self.newline_re.sub('\n\n', content_text)
embed_desc = intro
post_url = f'{self.webpage_url}{content_id}'
for line in content_text.split('\n'):
if len(embed_desc + f'{line}\n') > 1900:
embed_desc += f'...\n***[Read more]({post_url})***'
break
embed_desc += f'{line}\n'
embed = discord.Embed(
title=title,
description=embed_desc,
timestamp=datetime.utcnow(),
url=post_url,
color=_color,
)
if _type:
embed.set_footer(text=_type)
if image_url:
embed.set_image(url=image_url)
posting_channel = self.bot.get_channel(id=754706712358944799)
message = await posting_channel.send(embed=embed)
# try to auto-publish the message
try:
await message.publish()
except discord.Forbidden:
pass
now = datetime.now()
vn_tz = now.astimezone(timezone('Asia/Ho_Chi_Minh'))
fetched_data = {
'id': content_id,
'fetch_date': vn_tz.strftime('%d/%m/%Y'),
'fetch_time': vn_tz.strftime('%H:%M:%S'),
'title': title,
'description': embed_desc,
'image': image_url
}
# save to drive and print the result title
self.news_spider.sheet.insert_row([value for value in fetched_data.values()], index=2)
checking_data = self.news_spider.form_checking_data()
def fetch_news_data(self):
data_text = self.news_spider.get_content_by_url(self.site_url)
if not data_text:
return
try:
data = json.loads(data_text)
return data['data']['list']
except json.JSONDecodeError:
return
def fetch_news_content_data(self, content_id):
data_text = self.news_spider.get_content_by_url(self.content_url + content_id)
if not data_text:
return
try:
data = json.loads(data_text)
return data['data']
except json.JSONDecodeError:
return
| en | 0.680792 | # fetch content data # try to auto-publish the message # save to drive and print the result title | 2.538759 | 3 |
tests/rules/eicar_rule_test.py | twaldear/binaryalert | 1,324 | 6620876 | <reponame>twaldear/binaryalert<gh_stars>1000+
"""Test the correctness of the EICAR YARA rule."""
import os
import unittest
import yara
THIS_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) # Directory containing this file.
EICAR_RULE_FILE = os.path.join(THIS_DIRECTORY, '..', '..', 'rules', 'public', 'eicar.yara')
EICAR_TXT_FILE = os.path.join(THIS_DIRECTORY, '..', 'files', 'eicar.txt')
class EicarRuleTest(unittest.TestCase):
"""Verify that the EICAR rules file matches only the expected string."""
def setUp(self):
"""Compile the EICAR YARA rule."""
with open(EICAR_TXT_FILE, 'r') as f:
self.eicar_string = f.read()
self.eicar_rule = yara.compile(EICAR_RULE_FILE)
def test_match_eicar_string(self):
"""Should match the exact EICAR string."""
matches = self.eicar_rule.match(data=self.eicar_string)
self.assertEqual(
['eicar_av_test', 'eicar_substring_test'],
[match.rule for match in matches]
)
def test_match_eicar_with_trailing_spaces(self):
"""Trailing whitespace is allowed after the EICAR string."""
matches = self.eicar_rule.match(data='{} \n\t'.format(self.eicar_string))
self.assertEqual(
['eicar_av_test', 'eicar_substring_test'],
[match.rule for match in matches]
)
def test_no_match_if_eicar_is_not_beginning(self):
"""No match for eicar_av_test if EICAR string is not the beginning of the file."""
matches = self.eicar_rule.match(data='other-text {}'.format(self.eicar_string))
self.assertEqual(
['eicar_substring_test'],
[match.rule for match in matches]
)
def test_no_match_if_eicar_is_not_end(self):
"""No match for eicar_av_test if non-whitespace comes after the EICAR string."""
matches = self.eicar_rule.match(data='{} other-text'.format(self.eicar_string))
self.assertEqual(
['eicar_substring_test'],
[match.rule for match in matches]
)
| """Test the correctness of the EICAR YARA rule."""
import os
import unittest
import yara
THIS_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) # Directory containing this file.
EICAR_RULE_FILE = os.path.join(THIS_DIRECTORY, '..', '..', 'rules', 'public', 'eicar.yara')
EICAR_TXT_FILE = os.path.join(THIS_DIRECTORY, '..', 'files', 'eicar.txt')
class EicarRuleTest(unittest.TestCase):
"""Verify that the EICAR rules file matches only the expected string."""
def setUp(self):
"""Compile the EICAR YARA rule."""
with open(EICAR_TXT_FILE, 'r') as f:
self.eicar_string = f.read()
self.eicar_rule = yara.compile(EICAR_RULE_FILE)
def test_match_eicar_string(self):
"""Should match the exact EICAR string."""
matches = self.eicar_rule.match(data=self.eicar_string)
self.assertEqual(
['eicar_av_test', 'eicar_substring_test'],
[match.rule for match in matches]
)
def test_match_eicar_with_trailing_spaces(self):
"""Trailing whitespace is allowed after the EICAR string."""
matches = self.eicar_rule.match(data='{} \n\t'.format(self.eicar_string))
self.assertEqual(
['eicar_av_test', 'eicar_substring_test'],
[match.rule for match in matches]
)
def test_no_match_if_eicar_is_not_beginning(self):
"""No match for eicar_av_test if EICAR string is not the beginning of the file."""
matches = self.eicar_rule.match(data='other-text {}'.format(self.eicar_string))
self.assertEqual(
['eicar_substring_test'],
[match.rule for match in matches]
)
def test_no_match_if_eicar_is_not_end(self):
"""No match for eicar_av_test if non-whitespace comes after the EICAR string."""
matches = self.eicar_rule.match(data='{} other-text'.format(self.eicar_string))
self.assertEqual(
['eicar_substring_test'],
[match.rule for match in matches]
) | en | 0.756195 | Test the correctness of the EICAR YARA rule. # Directory containing this file. Verify that the EICAR rules file matches only the expected string. Compile the EICAR YARA rule. Should match the exact EICAR string. Trailing whitespace is allowed after the EICAR string. No match for eicar_av_test if EICAR string is not the beginning of the file. No match for eicar_av_test if non-whitespace comes after the EICAR string. | 3.049201 | 3 |
tests/datasource/test_new_datasource_with_runtime_data_connector_pandas_execution_engine.py | andyjessen/great_expectations | 0 | 6620877 | <gh_stars>0
from typing import Any, Dict, List, Union
import pandas as pd
import pytest
from great_expectations.execution_engine.pandas_batch_data import PandasBatchData
try:
sqlalchemy = pytest.importorskip("sqlalchemy")
except ImportError:
sqlalchemy = None
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import (
Batch,
BatchDefinition,
IDDict,
RuntimeBatchRequest,
)
from great_expectations.core.yaml_handler import YAMLHandler
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.datasource.new_datasource import Datasource
yaml = YAMLHandler()
@pytest.fixture
def datasource_with_runtime_data_connector_and_pandas_execution_engine():
basic_datasource: Datasource = instantiate_class_from_config(
yaml.load(
"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
test_runtime_data_connector:
class_name: RuntimeDataConnector
batch_identifiers:
- pipeline_stage_name
- airflow_run_id
- custom_key_0
assets:
asset_a:
batch_identifiers:
- day
- month
asset_b:
batch_identifiers:
- day
- month
- year
""",
),
runtime_environment={"name": "my_datasource"},
config_defaults={"module_name": "great_expectations.datasource"},
)
return basic_datasource
#########################################
# Tests with data passed in as batch_data
#########################################
# Tests with PandasExecutionEngine : batch_data
def test_pandas_execution_engine_self_check(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
report = (
datasource_with_runtime_data_connector_and_pandas_execution_engine.self_check()
)
assert report == {
"data_connectors": {
"count": 1,
"test_runtime_data_connector": {
"class_name": "RuntimeDataConnector",
"data_asset_count": 2,
"data_assets": {
"asset_a": {
"batch_definition_count": 0,
"example_data_references": [],
},
"asset_b": {
"batch_definition_count": 0,
"example_data_references": [],
},
},
"example_data_asset_names": ["asset_a", "asset_b"],
"example_unmatched_data_references": [],
"unmatched_data_reference_count": 0,
},
},
"execution_engine": {
"boto3_options": {},
"azure_options": {},
"gcs_options": {},
"caching": True,
"class_name": "PandasExecutionEngine",
"discard_subset_failing_expectations": False,
"module_name": "great_expectations.execution_engine.pandas_execution_engine",
},
}
def test_batch_data_pandas_execution_engine_unknown_datasource(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# raised by _validate_batch_request() in Datasource
with pytest.raises(ValueError):
# Test for an unknown datasource
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name="non_existent_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers={"default_identifier_name": "identifier_name"},
)
)
def test_batch_data_pandas_execution_engine_unknown_data_connector(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# raised by _validate_batch_request() in Datasource
with pytest.raises(ValueError):
# Test for an unknown data_connector
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
data_connector_name="non_existent_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers={"default_identifier_name": "identifier_name"},
)
)
def test_batch_data_pandas_execution_engine_no_batch_identifiers(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
with pytest.raises(TypeError):
# batch_identifiers missing (set to None)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers=None,
)
)
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
with pytest.raises(TypeError):
# batch_identifiers missing
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
)
)
def test_batch_data_pandas_execution_engine_incorrect_batch_identifiers(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# raised by _validate_batch_identifiers_configuration() in RuntimeDataConnector
with pytest.raises(ge_exceptions.DataConnectorError):
# runtime_parameters are not configured in the DataConnector
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers={"i_dont_exist": "i_dont_either"},
)
)
def test_batch_data_pandas_execution_engine_all_keys_present_for_batch_identifiers(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: Dict[str, Union[str, int]] = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
def test_batch_data_pandas_execution_engine_batch_identifiers_error_mostly_legal_keys(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: Dict[str, int] = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
"i_am_illegal_key": "i_am_illegal_value",
}
# Ensure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, all legal keys plus a single illegal key are present.
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_batch_data_pandas_execution_engine_batch_identifiers_error_one_illegal_key(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: Dict[str, str] = {"unknown_key": "some_value"}
# Ensure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, a single illegal key is present.
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_batch_data_pandas_execution_engine_set_data_asset_name_for_runtime_data(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: Dict[str, Union[str, int]] = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# set : my_runtime_data_asset
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_runtime_data_asset",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert batch_list[0].batch_definition.data_asset_name == "my_runtime_data_asset"
def test_pandas_execution_engine_get_available_data_asset_names(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
expected_available_data_asset_names: Dict[str, List[str]] = {
"test_runtime_data_connector": ["asset_a", "asset_b"]
}
available_data_asset_names: Dict[
str, List[str]
] = (
datasource_with_runtime_data_connector_and_pandas_execution_engine.get_available_data_asset_names()
)
assert available_data_asset_names == expected_available_data_asset_names
def test_batch_data_pandas_execution_engine_get_batch_definition_list_from_batch_request_length_one(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: Dict[str, int] = {
"airflow_run_id": 1234567890,
}
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
# batches are a little bit more difficult to test because of batch_markers
# they are ones that uniquely identify the data
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "my_data_asset"
assert isinstance(my_batch_1.data.dataframe, pd.DataFrame)
assert my_batch_1.data.dataframe.shape == (2, 2)
assert my_batch_1.data.dataframe["col2"].values[1] == 4
assert (
my_batch_1.batch_markers["pandas_data_fingerprint"]
== "1e461a0df5fe0a6db2c3bc4ef88ef1f0"
)
def test_batch_data_pandas_execution_engine_get_batch_definitions_and_get_batch_basics(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
data_connector_name: str = "test_runtime_data_connector"
data_asset_name: str = "test_asset_1"
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": {
"airflow_run_id": 1234567890,
},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
assert (
len(
datasource_with_runtime_data_connector_and_pandas_execution_engine.get_available_batch_definitions(
batch_request=batch_request
)
)
== 1
)
my_df: pd.DataFrame = pd.DataFrame({"x": range(10), "y": range(10)})
batch: Batch = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_from_batch_definition(
batch_definition=BatchDefinition(
"my_datasource",
"_pipeline",
"_pipeline",
batch_identifiers=IDDict({"some_random_id": 1}),
),
batch_data=my_df,
)
assert batch.batch_request == {}
def test_batch_data_pandas_execution_engine_get_batch_list_with_named_asset(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {"day": 1, "month": 12}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "asset_a",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "asset_a"
assert isinstance(my_batch_1.data.dataframe, pd.DataFrame)
assert my_batch_1.data.dataframe.shape == (2, 2)
assert my_batch_1.data.dataframe["col2"].values[1] == 4
assert (
my_batch_1.batch_markers["pandas_data_fingerprint"]
== "1e461a0df5fe0a6db2c3bc4ef88ef1f0"
)
def test_batch_data_pandas_execution_engine_get_batch_list_with_named_asset_two_batch_requests(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {"day": 1, "month": 12}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "asset_a",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
# batches are a little bit more difficult to test because of batch_markers
# they are ones that uniquely identify the data
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "asset_a"
assert isinstance(my_batch_1.data.dataframe, pd.DataFrame)
assert my_batch_1.data.dataframe.shape == (2, 2)
assert my_batch_1.batch_definition.batch_identifiers == batch_identifiers
assert my_batch_1.data.dataframe["col2"].values[1] == 4
assert (
my_batch_1.batch_markers["pandas_data_fingerprint"]
== "1e461a0df5fe0a6db2c3bc4ef88ef1f0"
)
# second batch request
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [5, 6], "col2": [7, 8]})
batch_identifiers = {"day": 2, "month": 12}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "asset_a",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
my_batch_2 = batch_list[0]
assert my_batch_2.batch_spec is not None
assert my_batch_2.batch_definition["data_asset_name"] == "asset_a"
assert isinstance(my_batch_2.data.dataframe, pd.DataFrame)
assert my_batch_2.data.dataframe.shape == (2, 2)
assert my_batch_2.batch_definition.batch_identifiers == batch_identifiers
assert my_batch_2.data.dataframe["col2"].values[1] == 8
assert (
my_batch_2.batch_markers["pandas_data_fingerprint"]
== "548e148ff5a8e932a3a2a1d0d8ff7f84"
)
###################################
# Tests with data passed in as path
###################################
# Tests with Pandas Execution Engine
def test_file_path_pandas_execution_engine_batch_list_from_batch_request_success(
datasource_with_runtime_data_connector_and_pandas_execution_engine, taxi_test_file
):
batch_identifiers: Dict[str, int] = {
"airflow_run_id": 1234567890,
}
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"path": taxi_test_file,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "my_data_asset"
assert isinstance(my_batch_1.data, PandasBatchData)
assert len(my_batch_1.data.dataframe) == 10000
assert len(my_batch_1.data.dataframe.columns) == 18
def test_file_path_pandas_execution_engine_batch_definition_list_from_batch_request_success_no_headers(
datasource_with_runtime_data_connector_and_pandas_execution_engine, taxi_test_file
):
batch_identifiers: Dict[str, int] = {
"airflow_run_id": 1234567890,
}
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"path": taxi_test_file,
},
"batch_identifiers": batch_identifiers,
"batch_spec_passthrough": {"reader_options": {"header": None}},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "my_data_asset"
assert isinstance(my_batch_1.data, PandasBatchData)
assert (
len(my_batch_1.data.dataframe) == 10001
) # one more line because of header being set to None
assert len(my_batch_1.data.dataframe.columns) == 18
def test_file_path_pandas_execution_engine_batch_definition_list_from_batch_request_not_supported_file_directory(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
taxi_test_file_directory,
):
batch_identifiers: Dict[str, int] = {
"airflow_run_id": 1234567890,
}
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"path": taxi_test_file_directory,
},
"batch_identifiers": batch_identifiers,
"batch_spec_passthrough": {
"reader_method": "read_csv",
},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises((IsADirectoryError, pd.errors.ParserError)):
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_file_path_pandas_execution_engine_batch_definition_list_from_batch_request_failed_wrong_file_path(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
batch_identifiers = {
"airflow_run_id": 1234567890,
}
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"path": "I_dont_exist",
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# raised by guess_reader_method_from_path() in ExecutionEngine
with pytest.raises(ge_exceptions.ExecutionEngineError):
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_file_path_pandas_execution_engine_batch_definition_list_from_batch_request_failed_wrong_reader_method(
datasource_with_runtime_data_connector_and_pandas_execution_engine, taxi_test_file
):
batch_identifiers: Dict[str, int] = {
"airflow_run_id": 1234567890,
}
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"path": taxi_test_file,
},
"batch_identifiers": batch_identifiers,
"batch_spec_passthrough": {"reader_method": "i_am_not_valid"},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# raised by _get_reader_fn() in ExecutionEngine
with pytest.raises(ge_exceptions.ExecutionEngineError):
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_file_path_pandas_execution_engine_get_batch_list_with_named_asset(
datasource_with_runtime_data_connector_and_pandas_execution_engine, taxi_test_file
):
batch_identifiers = {"day": 1, "month": 12}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "asset_a",
"runtime_parameters": {
"path": taxi_test_file,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "asset_a"
assert isinstance(my_batch_1.data.dataframe, pd.DataFrame)
assert my_batch_1.data.dataframe.shape == (10000, 18)
assert (
my_batch_1.batch_markers["pandas_data_fingerprint"]
== "c4f929e6d4fab001fedc9e075bf4b612"
)
assert my_batch_1.batch_definition.batch_identifiers == batch_identifiers
def test_file_path_pandas_execution_engine_get_batch_list_with_named_asset_two_batch_requests(
datasource_with_runtime_data_connector_and_pandas_execution_engine, taxi_test_file
):
batch_identifiers: dict = {"day": 1, "month": 12}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "asset_a",
"runtime_parameters": {
"path": taxi_test_file,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
# batches are a little bit more difficult to test because of batch_markers
# they are ones that uniquely identify the data
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "asset_a"
assert isinstance(my_batch_1.data.dataframe, pd.DataFrame)
assert my_batch_1.data.dataframe.shape == (10000, 18)
assert my_batch_1.batch_definition.batch_identifiers == batch_identifiers
assert (
my_batch_1.batch_markers["pandas_data_fingerprint"]
== "<KEY>"
)
# second batch request
batch_identifiers: dict = {"day": 2, "month": 12}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "asset_a",
"runtime_parameters": {
"path": taxi_test_file,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
my_batch_2 = batch_list[0]
assert my_batch_2.batch_spec is not None
assert my_batch_2.batch_definition["data_asset_name"] == "asset_a"
assert isinstance(my_batch_2.data.dataframe, pd.DataFrame)
assert my_batch_2.data.dataframe.shape == (10000, 18)
assert my_batch_2.batch_definition.batch_identifiers == batch_identifiers
assert (
my_batch_2.batch_markers["pandas_data_fingerprint"]
== "<KEY>"
)
| from typing import Any, Dict, List, Union
import pandas as pd
import pytest
from great_expectations.execution_engine.pandas_batch_data import PandasBatchData
try:
sqlalchemy = pytest.importorskip("sqlalchemy")
except ImportError:
sqlalchemy = None
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import (
Batch,
BatchDefinition,
IDDict,
RuntimeBatchRequest,
)
from great_expectations.core.yaml_handler import YAMLHandler
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.datasource.new_datasource import Datasource
yaml = YAMLHandler()
@pytest.fixture
def datasource_with_runtime_data_connector_and_pandas_execution_engine():
basic_datasource: Datasource = instantiate_class_from_config(
yaml.load(
"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
test_runtime_data_connector:
class_name: RuntimeDataConnector
batch_identifiers:
- pipeline_stage_name
- airflow_run_id
- custom_key_0
assets:
asset_a:
batch_identifiers:
- day
- month
asset_b:
batch_identifiers:
- day
- month
- year
""",
),
runtime_environment={"name": "my_datasource"},
config_defaults={"module_name": "great_expectations.datasource"},
)
return basic_datasource
#########################################
# Tests with data passed in as batch_data
#########################################
# Tests with PandasExecutionEngine : batch_data
def test_pandas_execution_engine_self_check(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
report = (
datasource_with_runtime_data_connector_and_pandas_execution_engine.self_check()
)
assert report == {
"data_connectors": {
"count": 1,
"test_runtime_data_connector": {
"class_name": "RuntimeDataConnector",
"data_asset_count": 2,
"data_assets": {
"asset_a": {
"batch_definition_count": 0,
"example_data_references": [],
},
"asset_b": {
"batch_definition_count": 0,
"example_data_references": [],
},
},
"example_data_asset_names": ["asset_a", "asset_b"],
"example_unmatched_data_references": [],
"unmatched_data_reference_count": 0,
},
},
"execution_engine": {
"boto3_options": {},
"azure_options": {},
"gcs_options": {},
"caching": True,
"class_name": "PandasExecutionEngine",
"discard_subset_failing_expectations": False,
"module_name": "great_expectations.execution_engine.pandas_execution_engine",
},
}
def test_batch_data_pandas_execution_engine_unknown_datasource(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# raised by _validate_batch_request() in Datasource
with pytest.raises(ValueError):
# Test for an unknown datasource
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name="non_existent_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers={"default_identifier_name": "identifier_name"},
)
)
def test_batch_data_pandas_execution_engine_unknown_data_connector(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# raised by _validate_batch_request() in Datasource
with pytest.raises(ValueError):
# Test for an unknown data_connector
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
data_connector_name="non_existent_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers={"default_identifier_name": "identifier_name"},
)
)
def test_batch_data_pandas_execution_engine_no_batch_identifiers(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
with pytest.raises(TypeError):
# batch_identifiers missing (set to None)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers=None,
)
)
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
with pytest.raises(TypeError):
# batch_identifiers missing
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
)
)
def test_batch_data_pandas_execution_engine_incorrect_batch_identifiers(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# raised by _validate_batch_identifiers_configuration() in RuntimeDataConnector
with pytest.raises(ge_exceptions.DataConnectorError):
# runtime_parameters are not configured in the DataConnector
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers={"i_dont_exist": "i_dont_either"},
)
)
def test_batch_data_pandas_execution_engine_all_keys_present_for_batch_identifiers(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: Dict[str, Union[str, int]] = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
def test_batch_data_pandas_execution_engine_batch_identifiers_error_mostly_legal_keys(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: Dict[str, int] = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
"i_am_illegal_key": "i_am_illegal_value",
}
# Ensure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, all legal keys plus a single illegal key are present.
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_batch_data_pandas_execution_engine_batch_identifiers_error_one_illegal_key(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: Dict[str, str] = {"unknown_key": "some_value"}
# Ensure that keys in batch_identifiers that are not among batch_identifiers declared in
# configuration are not accepted. In this test, a single illegal key is present.
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_batch_data_pandas_execution_engine_set_data_asset_name_for_runtime_data(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: Dict[str, Union[str, int]] = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
# set : my_runtime_data_asset
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_runtime_data_asset",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert batch_list[0].batch_definition.data_asset_name == "my_runtime_data_asset"
def test_pandas_execution_engine_get_available_data_asset_names(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
expected_available_data_asset_names: Dict[str, List[str]] = {
"test_runtime_data_connector": ["asset_a", "asset_b"]
}
available_data_asset_names: Dict[
str, List[str]
] = (
datasource_with_runtime_data_connector_and_pandas_execution_engine.get_available_data_asset_names()
)
assert available_data_asset_names == expected_available_data_asset_names
def test_batch_data_pandas_execution_engine_get_batch_definition_list_from_batch_request_length_one(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: Dict[str, int] = {
"airflow_run_id": 1234567890,
}
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
# batches are a little bit more difficult to test because of batch_markers
# they are ones that uniquely identify the data
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "my_data_asset"
assert isinstance(my_batch_1.data.dataframe, pd.DataFrame)
assert my_batch_1.data.dataframe.shape == (2, 2)
assert my_batch_1.data.dataframe["col2"].values[1] == 4
assert (
my_batch_1.batch_markers["pandas_data_fingerprint"]
== "1e461a0df5fe0a6db2c3bc4ef88ef1f0"
)
def test_batch_data_pandas_execution_engine_get_batch_definitions_and_get_batch_basics(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
data_connector_name: str = "test_runtime_data_connector"
data_asset_name: str = "test_asset_1"
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": {
"airflow_run_id": 1234567890,
},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
assert (
len(
datasource_with_runtime_data_connector_and_pandas_execution_engine.get_available_batch_definitions(
batch_request=batch_request
)
)
== 1
)
my_df: pd.DataFrame = pd.DataFrame({"x": range(10), "y": range(10)})
batch: Batch = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_from_batch_definition(
batch_definition=BatchDefinition(
"my_datasource",
"_pipeline",
"_pipeline",
batch_identifiers=IDDict({"some_random_id": 1}),
),
batch_data=my_df,
)
assert batch.batch_request == {}
def test_batch_data_pandas_execution_engine_get_batch_list_with_named_asset(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {"day": 1, "month": 12}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "asset_a",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "asset_a"
assert isinstance(my_batch_1.data.dataframe, pd.DataFrame)
assert my_batch_1.data.dataframe.shape == (2, 2)
assert my_batch_1.data.dataframe["col2"].values[1] == 4
assert (
my_batch_1.batch_markers["pandas_data_fingerprint"]
== "1e461a0df5fe0a6db2c3bc4ef88ef1f0"
)
def test_batch_data_pandas_execution_engine_get_batch_list_with_named_asset_two_batch_requests(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {"day": 1, "month": 12}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "asset_a",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
# batches are a little bit more difficult to test because of batch_markers
# they are ones that uniquely identify the data
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "asset_a"
assert isinstance(my_batch_1.data.dataframe, pd.DataFrame)
assert my_batch_1.data.dataframe.shape == (2, 2)
assert my_batch_1.batch_definition.batch_identifiers == batch_identifiers
assert my_batch_1.data.dataframe["col2"].values[1] == 4
assert (
my_batch_1.batch_markers["pandas_data_fingerprint"]
== "1e461a0df5fe0a6db2c3bc4ef88ef1f0"
)
# second batch request
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [5, 6], "col2": [7, 8]})
batch_identifiers = {"day": 2, "month": 12}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "asset_a",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
my_batch_2 = batch_list[0]
assert my_batch_2.batch_spec is not None
assert my_batch_2.batch_definition["data_asset_name"] == "asset_a"
assert isinstance(my_batch_2.data.dataframe, pd.DataFrame)
assert my_batch_2.data.dataframe.shape == (2, 2)
assert my_batch_2.batch_definition.batch_identifiers == batch_identifiers
assert my_batch_2.data.dataframe["col2"].values[1] == 8
assert (
my_batch_2.batch_markers["pandas_data_fingerprint"]
== "548e148ff5a8e932a3a2a1d0d8ff7f84"
)
###################################
# Tests with data passed in as path
###################################
# Tests with Pandas Execution Engine
def test_file_path_pandas_execution_engine_batch_list_from_batch_request_success(
datasource_with_runtime_data_connector_and_pandas_execution_engine, taxi_test_file
):
batch_identifiers: Dict[str, int] = {
"airflow_run_id": 1234567890,
}
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"path": taxi_test_file,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "my_data_asset"
assert isinstance(my_batch_1.data, PandasBatchData)
assert len(my_batch_1.data.dataframe) == 10000
assert len(my_batch_1.data.dataframe.columns) == 18
def test_file_path_pandas_execution_engine_batch_definition_list_from_batch_request_success_no_headers(
datasource_with_runtime_data_connector_and_pandas_execution_engine, taxi_test_file
):
batch_identifiers: Dict[str, int] = {
"airflow_run_id": 1234567890,
}
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"path": taxi_test_file,
},
"batch_identifiers": batch_identifiers,
"batch_spec_passthrough": {"reader_options": {"header": None}},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "my_data_asset"
assert isinstance(my_batch_1.data, PandasBatchData)
assert (
len(my_batch_1.data.dataframe) == 10001
) # one more line because of header being set to None
assert len(my_batch_1.data.dataframe.columns) == 18
def test_file_path_pandas_execution_engine_batch_definition_list_from_batch_request_not_supported_file_directory(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
taxi_test_file_directory,
):
batch_identifiers: Dict[str, int] = {
"airflow_run_id": 1234567890,
}
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"path": taxi_test_file_directory,
},
"batch_identifiers": batch_identifiers,
"batch_spec_passthrough": {
"reader_method": "read_csv",
},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises((IsADirectoryError, pd.errors.ParserError)):
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_file_path_pandas_execution_engine_batch_definition_list_from_batch_request_failed_wrong_file_path(
datasource_with_runtime_data_connector_and_pandas_execution_engine,
):
batch_identifiers = {
"airflow_run_id": 1234567890,
}
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"path": "I_dont_exist",
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# raised by guess_reader_method_from_path() in ExecutionEngine
with pytest.raises(ge_exceptions.ExecutionEngineError):
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_file_path_pandas_execution_engine_batch_definition_list_from_batch_request_failed_wrong_reader_method(
datasource_with_runtime_data_connector_and_pandas_execution_engine, taxi_test_file
):
batch_identifiers: Dict[str, int] = {
"airflow_run_id": 1234567890,
}
batch_request: Dict[str, Any] = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"path": taxi_test_file,
},
"batch_identifiers": batch_identifiers,
"batch_spec_passthrough": {"reader_method": "i_am_not_valid"},
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# raised by _get_reader_fn() in ExecutionEngine
with pytest.raises(ge_exceptions.ExecutionEngineError):
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
def test_file_path_pandas_execution_engine_get_batch_list_with_named_asset(
datasource_with_runtime_data_connector_and_pandas_execution_engine, taxi_test_file
):
batch_identifiers = {"day": 1, "month": 12}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "asset_a",
"runtime_parameters": {
"path": taxi_test_file,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "asset_a"
assert isinstance(my_batch_1.data.dataframe, pd.DataFrame)
assert my_batch_1.data.dataframe.shape == (10000, 18)
assert (
my_batch_1.batch_markers["pandas_data_fingerprint"]
== "c4f929e6d4fab001fedc9e075bf4b612"
)
assert my_batch_1.batch_definition.batch_identifiers == batch_identifiers
def test_file_path_pandas_execution_engine_get_batch_list_with_named_asset_two_batch_requests(
datasource_with_runtime_data_connector_and_pandas_execution_engine, taxi_test_file
):
batch_identifiers: dict = {"day": 1, "month": 12}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "asset_a",
"runtime_parameters": {
"path": taxi_test_file,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
# batches are a little bit more difficult to test because of batch_markers
# they are ones that uniquely identify the data
assert len(batch_list) == 1
my_batch_1 = batch_list[0]
assert my_batch_1.batch_spec is not None
assert my_batch_1.batch_definition["data_asset_name"] == "asset_a"
assert isinstance(my_batch_1.data.dataframe, pd.DataFrame)
assert my_batch_1.data.dataframe.shape == (10000, 18)
assert my_batch_1.batch_definition.batch_identifiers == batch_identifiers
assert (
my_batch_1.batch_markers["pandas_data_fingerprint"]
== "<KEY>"
)
# second batch request
batch_identifiers: dict = {"day": 2, "month": 12}
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": datasource_with_runtime_data_connector_and_pandas_execution_engine.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "asset_a",
"runtime_parameters": {
"path": taxi_test_file,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_list: List[
Batch
] = datasource_with_runtime_data_connector_and_pandas_execution_engine.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
my_batch_2 = batch_list[0]
assert my_batch_2.batch_spec is not None
assert my_batch_2.batch_definition["data_asset_name"] == "asset_a"
assert isinstance(my_batch_2.data.dataframe, pd.DataFrame)
assert my_batch_2.data.dataframe.shape == (10000, 18)
assert my_batch_2.batch_definition.batch_identifiers == batch_identifiers
assert (
my_batch_2.batch_markers["pandas_data_fingerprint"]
== "<KEY>"
) | en | 0.799695 | class_name: Datasource execution_engine: class_name: PandasExecutionEngine data_connectors: test_runtime_data_connector: class_name: RuntimeDataConnector batch_identifiers: - pipeline_stage_name - airflow_run_id - custom_key_0 assets: asset_a: batch_identifiers: - day - month asset_b: batch_identifiers: - day - month - year ######################################### # Tests with data passed in as batch_data ######################################### # Tests with PandasExecutionEngine : batch_data # raised by _validate_batch_request() in Datasource # Test for an unknown datasource # noinspection PyUnusedLocal # raised by _validate_batch_request() in Datasource # Test for an unknown data_connector # noinspection PyUnusedLocal # raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__() # batch_identifiers missing (set to None) # raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__() # batch_identifiers missing # raised by _validate_batch_identifiers_configuration() in RuntimeDataConnector # runtime_parameters are not configured in the DataConnector # Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count). # Ensure that keys in batch_identifiers that are not among batch_identifiers declared in # configuration are not accepted. In this test, all legal keys plus a single illegal key are present. # noinspection PyUnusedLocal # Ensure that keys in batch_identifiers that are not among batch_identifiers declared in # configuration are not accepted. In this test, a single illegal key is present. # noinspection PyUnusedLocal # set : my_runtime_data_asset # batches are a little bit more difficult to test because of batch_markers # they are ones that uniquely identify the data # Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count). # Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count). # batches are a little bit more difficult to test because of batch_markers # they are ones that uniquely identify the data # second batch request # Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count). ################################### # Tests with data passed in as path ################################### # Tests with Pandas Execution Engine # one more line because of header being set to None # raised by guess_reader_method_from_path() in ExecutionEngine # raised by _get_reader_fn() in ExecutionEngine # Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count). # Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count). # batches are a little bit more difficult to test because of batch_markers # they are ones that uniquely identify the data # second batch request # Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count). | 2.299028 | 2 |
socfaker/products.py | priamai/soc-faker | 122 | 6620878 | class Products(object):
"""The Products class is the main entrypoint for all product related data within soc-faker
Returns:
Products: A class which contains properties about different security products
"""
@property
def azure(self):
"""Azure class contains properties related to Azure products
Returns:
Azure: Microsoft Azure object containing properties and methods for generating data about Microsoft Azure products and services
"""
from .azure import Azure
return Azure()
@property
def alienvault(self):
"""AlienVault class contains properties related to AlienVault products
Returns:
AlienVauilt: AlienVault object containing properties and methods for generating data about AlienVault products and services
"""
from .alienvault import AlienVault
return AlienVault()
@property
def elastic(self):
"""Elastic class contains properties related to Elastic products
Returns:
Elastic: Elastic object containing properties and methods for generating data about Elastic products and services
"""
from .elastic import Elastic
return Elastic()
@property
def servicenow(self):
"""ServiceNow class contains properties related to ServiceNow products
Returns:
ServiceNow: ServiceNow object containing properties and methods for generating data about ServiceNow products and services
"""
from .servicenow import ServiceNow
return ServiceNow()
@property
def qualysguard(self):
"""QualysGuard class contains properties related to Azure products
Returns:
QualysGuard: QualysGuard object containing properties and methods for generating data about QualysGuard
"""
from .qualysguard import QualysGuard
return QualysGuard()
| class Products(object):
"""The Products class is the main entrypoint for all product related data within soc-faker
Returns:
Products: A class which contains properties about different security products
"""
@property
def azure(self):
"""Azure class contains properties related to Azure products
Returns:
Azure: Microsoft Azure object containing properties and methods for generating data about Microsoft Azure products and services
"""
from .azure import Azure
return Azure()
@property
def alienvault(self):
"""AlienVault class contains properties related to AlienVault products
Returns:
AlienVauilt: AlienVault object containing properties and methods for generating data about AlienVault products and services
"""
from .alienvault import AlienVault
return AlienVault()
@property
def elastic(self):
"""Elastic class contains properties related to Elastic products
Returns:
Elastic: Elastic object containing properties and methods for generating data about Elastic products and services
"""
from .elastic import Elastic
return Elastic()
@property
def servicenow(self):
"""ServiceNow class contains properties related to ServiceNow products
Returns:
ServiceNow: ServiceNow object containing properties and methods for generating data about ServiceNow products and services
"""
from .servicenow import ServiceNow
return ServiceNow()
@property
def qualysguard(self):
"""QualysGuard class contains properties related to Azure products
Returns:
QualysGuard: QualysGuard object containing properties and methods for generating data about QualysGuard
"""
from .qualysguard import QualysGuard
return QualysGuard()
| en | 0.842968 | The Products class is the main entrypoint for all product related data within soc-faker Returns: Products: A class which contains properties about different security products Azure class contains properties related to Azure products Returns: Azure: Microsoft Azure object containing properties and methods for generating data about Microsoft Azure products and services AlienVault class contains properties related to AlienVault products Returns: AlienVauilt: AlienVault object containing properties and methods for generating data about AlienVault products and services Elastic class contains properties related to Elastic products Returns: Elastic: Elastic object containing properties and methods for generating data about Elastic products and services ServiceNow class contains properties related to ServiceNow products Returns: ServiceNow: ServiceNow object containing properties and methods for generating data about ServiceNow products and services QualysGuard class contains properties related to Azure products Returns: QualysGuard: QualysGuard object containing properties and methods for generating data about QualysGuard | 2.736951 | 3 |
tests/unit/test_version_utils.py | medmunds/tox-gh-matrix | 0 | 6620879 | <gh_stars>0
import re
import sys
import pytest
from tox.interpreters import InterpreterInfo
from tox_gh_matrix.version_utils import (
basepython_to_gh_python_version,
format_version_info,
interpreter_info_to_version,
python_version_to_prerelease_spec,
)
@pytest.mark.parametrize(
"basepython,expected",
[
("python3.8", "3.8"),
("python3.10", "3.10"),
("python2.7", "2.7"),
("pypy3.6", "pypy-3.6"),
("jython3.5", "jython-3.5"),
("python", ""),
("pypy", "pypy"),
("pypy3", "pypy-3"),
(sys.executable, ""), # tox default basepython
],
)
def test_basepython_to_python_version(basepython, expected):
assert basepython_to_gh_python_version(basepython) == expected
@pytest.mark.parametrize(
"basepython",
[
"python3.5.1", # only N.M version supported
"pypy3.7.10", # only N.M version supported
"/my/custom/python", # non-sys.executable absolute path
],
)
def test_basepython_to_python_version_invalid(basepython):
expected_error = rf"Unexpected basepython format {re.escape(repr(basepython))}"
with pytest.raises(ValueError, match=expected_error):
basepython_to_gh_python_version(basepython)
@pytest.mark.parametrize(
"python,expected",
[
("3.7", "3.7.0-alpha - 3.7"),
# Unclear how to specify pypy range, so don't:
("pypy-3.7", "pypy-3.7"),
# These forms aren't currently handled:
("", ""),
("3", "3"),
("3.7.3", "3.7.3"),
],
)
def test_python_version_to_prerelease_spec(python, expected):
assert python_version_to_prerelease_spec(python) == expected
@pytest.mark.parametrize(
"implementation,version_info,extra_version_info,expected",
[
("CPython", (2, 7, 12, "final", 0), None, "2.7.12"),
("CPython", (3, 11, 0, "alpha", 0), None, "3.11.0-alpha.0"),
("PyPy", (3, 8, 6, "final", 0), (3, 7, 0, "final", 0), "pypy-3.8.6-3.7.0"),
("Jython", (3, 4, 8, "final", 0), None, "jython-3.4.8"),
],
)
def test_interpreter_info_to_version(
implementation, version_info, extra_version_info, expected, ignore_extra_kwargs
):
# (InterpreterInfo constructor has changed required kwargs
# over time, in ways which aren't relevant to this plugin.)
interpreter_info = ignore_extra_kwargs(
InterpreterInfo,
implementation=implementation,
executable="n/a for this test",
version_info=version_info,
sysplatform="n/a for this test",
is_64=False, # n/a for this test
os_sep="/", # n/a for this test
extra_version_info=extra_version_info,
)
assert interpreter_info_to_version(interpreter_info) == expected
@pytest.mark.parametrize(
"info,expected",
[
((2, 7, 12, "final", 0), "2.7.12"),
((3, 10, 4, "alpha", 0), "3.10.4-alpha.0"),
((3, 10, 4, "final", 3), "3.10.4-final.3"),
((3, 11, 0, "final", 0), "3.11.0"),
],
)
def test_format_version_info(info, expected):
assert format_version_info(info) == expected
| import re
import sys
import pytest
from tox.interpreters import InterpreterInfo
from tox_gh_matrix.version_utils import (
basepython_to_gh_python_version,
format_version_info,
interpreter_info_to_version,
python_version_to_prerelease_spec,
)
@pytest.mark.parametrize(
"basepython,expected",
[
("python3.8", "3.8"),
("python3.10", "3.10"),
("python2.7", "2.7"),
("pypy3.6", "pypy-3.6"),
("jython3.5", "jython-3.5"),
("python", ""),
("pypy", "pypy"),
("pypy3", "pypy-3"),
(sys.executable, ""), # tox default basepython
],
)
def test_basepython_to_python_version(basepython, expected):
assert basepython_to_gh_python_version(basepython) == expected
@pytest.mark.parametrize(
"basepython",
[
"python3.5.1", # only N.M version supported
"pypy3.7.10", # only N.M version supported
"/my/custom/python", # non-sys.executable absolute path
],
)
def test_basepython_to_python_version_invalid(basepython):
expected_error = rf"Unexpected basepython format {re.escape(repr(basepython))}"
with pytest.raises(ValueError, match=expected_error):
basepython_to_gh_python_version(basepython)
@pytest.mark.parametrize(
"python,expected",
[
("3.7", "3.7.0-alpha - 3.7"),
# Unclear how to specify pypy range, so don't:
("pypy-3.7", "pypy-3.7"),
# These forms aren't currently handled:
("", ""),
("3", "3"),
("3.7.3", "3.7.3"),
],
)
def test_python_version_to_prerelease_spec(python, expected):
assert python_version_to_prerelease_spec(python) == expected
@pytest.mark.parametrize(
"implementation,version_info,extra_version_info,expected",
[
("CPython", (2, 7, 12, "final", 0), None, "2.7.12"),
("CPython", (3, 11, 0, "alpha", 0), None, "3.11.0-alpha.0"),
("PyPy", (3, 8, 6, "final", 0), (3, 7, 0, "final", 0), "pypy-3.8.6-3.7.0"),
("Jython", (3, 4, 8, "final", 0), None, "jython-3.4.8"),
],
)
def test_interpreter_info_to_version(
implementation, version_info, extra_version_info, expected, ignore_extra_kwargs
):
# (InterpreterInfo constructor has changed required kwargs
# over time, in ways which aren't relevant to this plugin.)
interpreter_info = ignore_extra_kwargs(
InterpreterInfo,
implementation=implementation,
executable="n/a for this test",
version_info=version_info,
sysplatform="n/a for this test",
is_64=False, # n/a for this test
os_sep="/", # n/a for this test
extra_version_info=extra_version_info,
)
assert interpreter_info_to_version(interpreter_info) == expected
@pytest.mark.parametrize(
"info,expected",
[
((2, 7, 12, "final", 0), "2.7.12"),
((3, 10, 4, "alpha", 0), "3.10.4-alpha.0"),
((3, 10, 4, "final", 3), "3.10.4-final.3"),
((3, 11, 0, "final", 0), "3.11.0"),
],
)
def test_format_version_info(info, expected):
assert format_version_info(info) == expected | en | 0.801185 | # tox default basepython # only N.M version supported # only N.M version supported # non-sys.executable absolute path # Unclear how to specify pypy range, so don't: # These forms aren't currently handled: # (InterpreterInfo constructor has changed required kwargs # over time, in ways which aren't relevant to this plugin.) # n/a for this test # n/a for this test | 2.33638 | 2 |
build/lib/Xethru_radar/test_lib.py | Justgo13/sensor_library | 0 | 6620880 | import unittest
import Xethru_radar.X4_parser as parser
class TestParser(unittest.TestCase):
def test_iq(self):
"""
Method to test if .dat binary file was converted successfully to .csv file with in-phase and quadrature
components together.
:return:
1
"""
file_iq = parser.iq_data('X4data.dat')
self.asserEqual(file_iq,1)
def test_raw(self):
"""
Method to test if .dat binary file was converted successfully to .csv file with in-phase and quadrature
component separated.
:return:
1
"""
file_raw = parser.raw_data('X4data.dat')
self.asserEqual(file_raw,1)
if __name__ == '__main__':
unittest.main() | import unittest
import Xethru_radar.X4_parser as parser
class TestParser(unittest.TestCase):
def test_iq(self):
"""
Method to test if .dat binary file was converted successfully to .csv file with in-phase and quadrature
components together.
:return:
1
"""
file_iq = parser.iq_data('X4data.dat')
self.asserEqual(file_iq,1)
def test_raw(self):
"""
Method to test if .dat binary file was converted successfully to .csv file with in-phase and quadrature
component separated.
:return:
1
"""
file_raw = parser.raw_data('X4data.dat')
self.asserEqual(file_raw,1)
if __name__ == '__main__':
unittest.main() | en | 0.940952 | Method to test if .dat binary file was converted successfully to .csv file with in-phase and quadrature components together. :return: 1 Method to test if .dat binary file was converted successfully to .csv file with in-phase and quadrature component separated. :return: 1 | 2.956188 | 3 |
karma/bot/management/commands/start_bot.py | justinas/upkarma | 2 | 6620881 | <filename>karma/bot/management/commands/start_bot.py
from django.core.management.base import BaseCommand, CommandError
import logging
import traceback
import sys
import time
from karma.bot.core import Bot
from karma.utils import get_redis_client
class Command(BaseCommand):
def handle(self, *args, **options):
log = logging.getLogger('karma.bot')
red = get_redis_client()
max_id = red.get('max_id')
if not max_id:
# TODO: assume 0 instead? dangerous
raise CommandError('max_id is not set')
max_id = int(max_id)
try:
bot = Bot()
log.debug('start_bot starting')
bot.run(max_id)
except KeyboardInterrupt:
sys.exit(0)
except BaseException as e:
print(traceback.print_exc(), file=sys.stderr)
sys.exit(1)
| <filename>karma/bot/management/commands/start_bot.py
from django.core.management.base import BaseCommand, CommandError
import logging
import traceback
import sys
import time
from karma.bot.core import Bot
from karma.utils import get_redis_client
class Command(BaseCommand):
def handle(self, *args, **options):
log = logging.getLogger('karma.bot')
red = get_redis_client()
max_id = red.get('max_id')
if not max_id:
# TODO: assume 0 instead? dangerous
raise CommandError('max_id is not set')
max_id = int(max_id)
try:
bot = Bot()
log.debug('start_bot starting')
bot.run(max_id)
except KeyboardInterrupt:
sys.exit(0)
except BaseException as e:
print(traceback.print_exc(), file=sys.stderr)
sys.exit(1)
| en | 0.396919 | # TODO: assume 0 instead? dangerous | 2.009487 | 2 |
Chapter 06 - Files and Exceptions/Assignments/6.1 Introduction to File Input and Output/51223.py | EllisBarnes00/COP-1000 | 0 | 6620882 | <reponame>EllisBarnes00/COP-1000
input = open("rawdata", "r")
datum = int(input.read())
input.close() | input = open("rawdata", "r")
datum = int(input.read())
input.close() | none | 1 | 2.581396 | 3 | |
pylogging/log_levels.py | ansrivas/pylogging | 13 | 6620883 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Module to set log level."""
import logging
from future.utils import raise_with_traceback as rwt
class LogLevel(object):
"""Log levels definition."""
levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
@staticmethod
def get_level(level):
"""Get log level from a string."""
if level.upper() not in LogLevel.levels.keys():
rwt(AttributeError('Log level not found: {0}'.format(level.upper())))
return LogLevel.levels[level.upper()]
| # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Module to set log level."""
import logging
from future.utils import raise_with_traceback as rwt
class LogLevel(object):
"""Log levels definition."""
levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
@staticmethod
def get_level(level):
"""Get log level from a string."""
if level.upper() not in LogLevel.levels.keys():
rwt(AttributeError('Log level not found: {0}'.format(level.upper())))
return LogLevel.levels[level.upper()]
| en | 0.707826 | # !/usr/bin/env python # -*- coding: utf-8 -*- Module to set log level. Log levels definition. Get log level from a string. | 2.751249 | 3 |
src/gam/utils.py | GAM-team/GAM | 102 | 6620884 | import datetime
import re
import sys
import time
from hashlib import md5
from html.entities import name2codepoint
from html.parser import HTMLParser
import importlib
import json
import dateutil.parser
import types
from gam import controlflow
from gam import fileutils
from gam import transport
from gam.var import *
class LazyLoader(types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies.
`contrib`, and `ffmpeg` are examples of modules that are large and not always
needed, and this allows them to only be loaded when they are used.
"""
# The lint error here is incorrect.
def __init__(self, local_name, parent_module_globals, name): # pylint: disable=super-on-old-class
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super().__init__(name)
def _load(self):
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
class _DeHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.__text = []
def handle_data(self, data):
self.__text.append(data)
def handle_charref(self, name):
self.__text.append(
chr(int(name[1:], 16)) if name.startswith('x') else chr(int(name)))
def handle_entityref(self, name):
cp = name2codepoint.get(name)
if cp:
self.__text.append(chr(cp))
else:
self.__text.append('&' + name)
def handle_starttag(self, tag, attrs):
if tag == 'p':
self.__text.append('\n\n')
elif tag == 'br':
self.__text.append('\n')
elif tag == 'a':
for attr in attrs:
if attr[0] == 'href':
self.__text.append(f'({attr[1]}) ')
break
elif tag == 'div':
if not attrs:
self.__text.append('\n')
elif tag in {'http:', 'https'}:
self.__text.append(f' ({tag}//{attrs[0][0]}) ')
def handle_startendtag(self, tag, attrs):
if tag == 'br':
self.__text.append('\n\n')
def text(self):
return re.sub(r'\n{2}\n+', '\n\n',
re.sub(r'\n +', '\n', ''.join(self.__text))).strip()
def commonprefix(m):
'''Given a list of strings m, return string which is prefix common to all'''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
def dehtml(text):
try:
parser = _DeHTMLParser()
parser.feed(str(text))
parser.close()
return parser.text()
except:
from traceback import print_exc
print_exc(file=sys.stderr)
return text
def indentMultiLineText(message, n=0):
return message.replace('\n', f"\n{' ' * n}").rstrip()
def flatten_json(structure, key='', path='', flattened=None, listLimit=None):
if flattened is None:
flattened = {}
if not isinstance(structure, (dict, list)):
flattened[((path + '.') if path else '') + key] = structure
elif isinstance(structure, list):
for i, item in enumerate(structure):
if listLimit and (i >= listLimit):
break
flatten_json(item,
f'{i}',
'.'.join([item for item in [path, key] if item]),
flattened=flattened,
listLimit=listLimit)
else:
for new_key, value in list(structure.items()):
if new_key in ['kind', 'etag', '@type']:
continue
if value == NEVER_TIME:
value = 'Never'
flatten_json(value,
new_key,
'.'.join([item for item in [path, key] if item]),
flattened=flattened,
listLimit=listLimit)
return flattened
def formatTimestampYMD(timestamp):
return datetime.datetime.fromtimestamp(int(timestamp) /
1000).strftime('%Y-%m-%d')
def formatTimestampYMDHMS(timestamp):
return datetime.datetime.fromtimestamp(int(timestamp) /
1000).strftime('%Y-%m-%d %H:%M:%S')
def formatTimestampYMDHMSF(timestamp):
return str(datetime.datetime.fromtimestamp(int(timestamp) / 1000))
def formatFileSize(fileSize):
if fileSize == 0:
return '0kb'
if fileSize < ONE_KILO_BYTES:
return '1kb'
if fileSize < ONE_MEGA_BYTES:
return f'{fileSize // ONE_KILO_BYTES}kb'
if fileSize < ONE_GIGA_BYTES:
return f'{fileSize // ONE_MEGA_BYTES}mb'
return f'{fileSize // ONE_GIGA_BYTES}gb'
def formatMilliSeconds(millis):
seconds, millis = divmod(millis, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'{hours:02d}:{minutes:02d}:{seconds:02d}'
def integerLimits(minVal, maxVal, item='integer'):
if (minVal is not None) and (maxVal is not None):
return f'{item} {minVal}<=x<={maxVal}'
if minVal is not None:
return f'{item} x>={minVal}'
if maxVal is not None:
return f'{item} x<={maxVal}'
return f'{item} x'
def get_string(i, item, optional=False, minLen=1, maxLen=None):
if i < len(sys.argv):
argstr = sys.argv[i]
if argstr:
if (len(argstr) >= minLen) and ((maxLen is None) or
(len(argstr) <= maxLen)):
return argstr
controlflow.system_error_exit(
2,
f'expected <{integerLimits(minLen, maxLen, "string length")} for {item}>'
)
if optional or (minLen == 0):
return ''
controlflow.system_error_exit(2, f'expected a Non-empty <{item}>')
elif optional:
return ''
controlflow.system_error_exit(2, f'expected a <{item}>')
def get_delta(argstr, pattern):
tg = pattern.match(argstr.lower())
if tg is None:
return None
sign = tg.group(1)
delta = int(tg.group(2))
unit = tg.group(3)
if unit == 'y':
deltaTime = datetime.timedelta(days=delta * 365)
elif unit == 'w':
deltaTime = datetime.timedelta(weeks=delta)
elif unit == 'd':
deltaTime = datetime.timedelta(days=delta)
elif unit == 'h':
deltaTime = datetime.timedelta(hours=delta)
elif unit == 'm':
deltaTime = datetime.timedelta(minutes=delta)
if sign == '-':
return -deltaTime
return deltaTime
def get_delta_date(argstr):
deltaDate = get_delta(argstr, DELTA_DATE_PATTERN)
if deltaDate is None:
controlflow.system_error_exit(
2, f'expected a <{DELTA_DATE_FORMAT_REQUIRED}>; got {argstr}')
return deltaDate
def get_delta_time(argstr):
deltaTime = get_delta(argstr, DELTA_TIME_PATTERN)
if deltaTime is None:
controlflow.system_error_exit(
2, f'expected a <{DELTA_TIME_FORMAT_REQUIRED}>; got {argstr}')
return deltaTime
def get_hhmm(argstr):
argstr = argstr.strip()
if argstr:
try:
dateTime = datetime.datetime.strptime(argstr, HHMM_FORMAT)
return argstr
except ValueError:
controlflow.system_error_exit(
2, f'expected a <{HHMM_FORMAT_REQUIRED}>; got {argstr}')
controlflow.system_error_exit(2, f'expected a <{HHMM_FORMAT_REQUIRED}>')
def get_yyyymmdd(argstr, minLen=1, returnTimeStamp=False, returnDateTime=False):
argstr = argstr.strip()
if argstr:
if argstr[0] in ['+', '-']:
today = datetime.date.today()
argstr = (datetime.datetime(today.year, today.month, today.day) +
get_delta_date(argstr)).strftime(YYYYMMDD_FORMAT)
try:
dateTime = datetime.datetime.strptime(argstr, YYYYMMDD_FORMAT)
if returnTimeStamp:
return time.mktime(dateTime.timetuple()) * 1000
if returnDateTime:
return dateTime
return argstr
except ValueError:
controlflow.system_error_exit(
2, f'expected a <{YYYYMMDD_FORMAT_REQUIRED}>; got {argstr}')
elif minLen == 0:
return ''
controlflow.system_error_exit(2, f'expected a <{YYYYMMDD_FORMAT_REQUIRED}>')
def get_time_or_delta_from_now(time_string):
"""Get an ISO 8601 time or a positive/negative delta applied to now.
Args:
time_string (string): The time or delta (e.g. '2017-09-01T12:34:56Z' or '-4h') or never
Returns:
string: iso8601 formatted datetime in UTC.
"""
time_string = time_string.strip().upper()
if time_string:
if time_string == 'NEVER':
return NEVER_TIME
if time_string[0] not in ['+', '-']:
return time_string
return (datetime.datetime.utcnow() +
get_delta_time(time_string)).isoformat() + 'Z'
controlflow.system_error_exit(
2, f'expected a <{YYYYMMDDTHHMMSS_FORMAT_REQUIRED}>')
def get_row_filter_date_or_delta_from_now(date_string):
"""Get an ISO 8601 date or a positive/negative delta applied to now.
Args:
date_string (string): The time or delta (e.g. '2017-09-01' or '-4y')
Returns:
string: iso8601 formatted datetime in UTC.
"""
date_string = date_string.strip().upper()
if date_string:
if date_string[0] in ['+', '-']:
deltaDate = get_delta(date_string, DELTA_DATE_PATTERN)
if deltaDate is None:
return (False, DELTA_DATE_FORMAT_REQUIRED)
today = datetime.date.today()
return (True,
(datetime.datetime(today.year, today.month, today.day) +
deltaDate).isoformat() + 'Z')
try:
deltaDate = dateutil.parser.parse(date_string, ignoretz=True)
return (True,
datetime.datetime(deltaDate.year, deltaDate.month,
deltaDate.day).isoformat() + 'Z')
except ValueError:
pass
return (False, YYYYMMDD_FORMAT_REQUIRED)
def get_row_filter_time_or_delta_from_now(time_string):
"""Get an ISO 8601 time or a positive/negative delta applied to now.
Args:
time_string (string): The time or delta (e.g. '2017-09-01T12:34:56Z' or '-4h')
Returns:
string: iso8601 formatted datetime in UTC.
Exits:
2: Not a valid delta.
"""
time_string = time_string.strip().upper()
if time_string:
if time_string[0] in ['+', '-']:
deltaTime = get_delta(time_string, DELTA_TIME_PATTERN)
if deltaTime is None:
return (False, DELTA_TIME_FORMAT_REQUIRED)
return (True,
(datetime.datetime.utcnow() + deltaTime).isoformat() + 'Z')
try:
deltaTime = dateutil.parser.parse(time_string, ignoretz=True)
return (True, deltaTime.isoformat() + 'Z')
except ValueError:
pass
return (False, YYYYMMDDTHHMMSS_FORMAT_REQUIRED)
def get_date_zero_time_or_full_time(time_string):
time_string = time_string.strip()
if time_string:
if YYYYMMDD_PATTERN.match(time_string):
return get_yyyymmdd(time_string) + 'T00:00:00.000Z'
return get_time_or_delta_from_now(time_string)
controlflow.system_error_exit(
2, f'expected a <{YYYYMMDDTHHMMSS_FORMAT_REQUIRED}>')
def md5_matches_file(local_file, expected_md5, exitOnError):
f = fileutils.open_file(local_file, 'rb')
hash_md5 = md5()
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
actual_hash = hash_md5.hexdigest()
if exitOnError and actual_hash != expected_md5:
controlflow.system_error_exit(
6, f'actual hash was {actual_hash}. Exiting on corrupt file.')
return actual_hash == expected_md5
URL_SHORTENER_ENDPOINT = 'https://gam-shortn.appspot.com/create'
def shorten_url(long_url, httpc=None):
if GC_Defaults[GC_NO_SHORT_URLS]:
return long_url
if not httpc:
httpc = transport.create_http(timeout=10)
headers = {'Content-Type': 'application/json', 'User-Agent': GAM_INFO}
try:
payload = json.dumps({'long_url': long_url})
resp, content = httpc.request(URL_SHORTENER_ENDPOINT,
'POST',
payload,
headers=headers)
except:
return long_url
if resp.status != 200:
return long_url
try:
if isinstance(content, bytes):
content = content.decode()
return json.loads(content).get('short_url', long_url)
except:
return long_url
| import datetime
import re
import sys
import time
from hashlib import md5
from html.entities import name2codepoint
from html.parser import HTMLParser
import importlib
import json
import dateutil.parser
import types
from gam import controlflow
from gam import fileutils
from gam import transport
from gam.var import *
class LazyLoader(types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies.
`contrib`, and `ffmpeg` are examples of modules that are large and not always
needed, and this allows them to only be loaded when they are used.
"""
# The lint error here is incorrect.
def __init__(self, local_name, parent_module_globals, name): # pylint: disable=super-on-old-class
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super().__init__(name)
def _load(self):
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
class _DeHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.__text = []
def handle_data(self, data):
self.__text.append(data)
def handle_charref(self, name):
self.__text.append(
chr(int(name[1:], 16)) if name.startswith('x') else chr(int(name)))
def handle_entityref(self, name):
cp = name2codepoint.get(name)
if cp:
self.__text.append(chr(cp))
else:
self.__text.append('&' + name)
def handle_starttag(self, tag, attrs):
if tag == 'p':
self.__text.append('\n\n')
elif tag == 'br':
self.__text.append('\n')
elif tag == 'a':
for attr in attrs:
if attr[0] == 'href':
self.__text.append(f'({attr[1]}) ')
break
elif tag == 'div':
if not attrs:
self.__text.append('\n')
elif tag in {'http:', 'https'}:
self.__text.append(f' ({tag}//{attrs[0][0]}) ')
def handle_startendtag(self, tag, attrs):
if tag == 'br':
self.__text.append('\n\n')
def text(self):
return re.sub(r'\n{2}\n+', '\n\n',
re.sub(r'\n +', '\n', ''.join(self.__text))).strip()
def commonprefix(m):
'''Given a list of strings m, return string which is prefix common to all'''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
def dehtml(text):
try:
parser = _DeHTMLParser()
parser.feed(str(text))
parser.close()
return parser.text()
except:
from traceback import print_exc
print_exc(file=sys.stderr)
return text
def indentMultiLineText(message, n=0):
return message.replace('\n', f"\n{' ' * n}").rstrip()
def flatten_json(structure, key='', path='', flattened=None, listLimit=None):
if flattened is None:
flattened = {}
if not isinstance(structure, (dict, list)):
flattened[((path + '.') if path else '') + key] = structure
elif isinstance(structure, list):
for i, item in enumerate(structure):
if listLimit and (i >= listLimit):
break
flatten_json(item,
f'{i}',
'.'.join([item for item in [path, key] if item]),
flattened=flattened,
listLimit=listLimit)
else:
for new_key, value in list(structure.items()):
if new_key in ['kind', 'etag', '@type']:
continue
if value == NEVER_TIME:
value = 'Never'
flatten_json(value,
new_key,
'.'.join([item for item in [path, key] if item]),
flattened=flattened,
listLimit=listLimit)
return flattened
def formatTimestampYMD(timestamp):
return datetime.datetime.fromtimestamp(int(timestamp) /
1000).strftime('%Y-%m-%d')
def formatTimestampYMDHMS(timestamp):
return datetime.datetime.fromtimestamp(int(timestamp) /
1000).strftime('%Y-%m-%d %H:%M:%S')
def formatTimestampYMDHMSF(timestamp):
return str(datetime.datetime.fromtimestamp(int(timestamp) / 1000))
def formatFileSize(fileSize):
if fileSize == 0:
return '0kb'
if fileSize < ONE_KILO_BYTES:
return '1kb'
if fileSize < ONE_MEGA_BYTES:
return f'{fileSize // ONE_KILO_BYTES}kb'
if fileSize < ONE_GIGA_BYTES:
return f'{fileSize // ONE_MEGA_BYTES}mb'
return f'{fileSize // ONE_GIGA_BYTES}gb'
def formatMilliSeconds(millis):
seconds, millis = divmod(millis, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'{hours:02d}:{minutes:02d}:{seconds:02d}'
def integerLimits(minVal, maxVal, item='integer'):
if (minVal is not None) and (maxVal is not None):
return f'{item} {minVal}<=x<={maxVal}'
if minVal is not None:
return f'{item} x>={minVal}'
if maxVal is not None:
return f'{item} x<={maxVal}'
return f'{item} x'
def get_string(i, item, optional=False, minLen=1, maxLen=None):
if i < len(sys.argv):
argstr = sys.argv[i]
if argstr:
if (len(argstr) >= minLen) and ((maxLen is None) or
(len(argstr) <= maxLen)):
return argstr
controlflow.system_error_exit(
2,
f'expected <{integerLimits(minLen, maxLen, "string length")} for {item}>'
)
if optional or (minLen == 0):
return ''
controlflow.system_error_exit(2, f'expected a Non-empty <{item}>')
elif optional:
return ''
controlflow.system_error_exit(2, f'expected a <{item}>')
def get_delta(argstr, pattern):
tg = pattern.match(argstr.lower())
if tg is None:
return None
sign = tg.group(1)
delta = int(tg.group(2))
unit = tg.group(3)
if unit == 'y':
deltaTime = datetime.timedelta(days=delta * 365)
elif unit == 'w':
deltaTime = datetime.timedelta(weeks=delta)
elif unit == 'd':
deltaTime = datetime.timedelta(days=delta)
elif unit == 'h':
deltaTime = datetime.timedelta(hours=delta)
elif unit == 'm':
deltaTime = datetime.timedelta(minutes=delta)
if sign == '-':
return -deltaTime
return deltaTime
def get_delta_date(argstr):
deltaDate = get_delta(argstr, DELTA_DATE_PATTERN)
if deltaDate is None:
controlflow.system_error_exit(
2, f'expected a <{DELTA_DATE_FORMAT_REQUIRED}>; got {argstr}')
return deltaDate
def get_delta_time(argstr):
deltaTime = get_delta(argstr, DELTA_TIME_PATTERN)
if deltaTime is None:
controlflow.system_error_exit(
2, f'expected a <{DELTA_TIME_FORMAT_REQUIRED}>; got {argstr}')
return deltaTime
def get_hhmm(argstr):
argstr = argstr.strip()
if argstr:
try:
dateTime = datetime.datetime.strptime(argstr, HHMM_FORMAT)
return argstr
except ValueError:
controlflow.system_error_exit(
2, f'expected a <{HHMM_FORMAT_REQUIRED}>; got {argstr}')
controlflow.system_error_exit(2, f'expected a <{HHMM_FORMAT_REQUIRED}>')
def get_yyyymmdd(argstr, minLen=1, returnTimeStamp=False, returnDateTime=False):
argstr = argstr.strip()
if argstr:
if argstr[0] in ['+', '-']:
today = datetime.date.today()
argstr = (datetime.datetime(today.year, today.month, today.day) +
get_delta_date(argstr)).strftime(YYYYMMDD_FORMAT)
try:
dateTime = datetime.datetime.strptime(argstr, YYYYMMDD_FORMAT)
if returnTimeStamp:
return time.mktime(dateTime.timetuple()) * 1000
if returnDateTime:
return dateTime
return argstr
except ValueError:
controlflow.system_error_exit(
2, f'expected a <{YYYYMMDD_FORMAT_REQUIRED}>; got {argstr}')
elif minLen == 0:
return ''
controlflow.system_error_exit(2, f'expected a <{YYYYMMDD_FORMAT_REQUIRED}>')
def get_time_or_delta_from_now(time_string):
"""Get an ISO 8601 time or a positive/negative delta applied to now.
Args:
time_string (string): The time or delta (e.g. '2017-09-01T12:34:56Z' or '-4h') or never
Returns:
string: iso8601 formatted datetime in UTC.
"""
time_string = time_string.strip().upper()
if time_string:
if time_string == 'NEVER':
return NEVER_TIME
if time_string[0] not in ['+', '-']:
return time_string
return (datetime.datetime.utcnow() +
get_delta_time(time_string)).isoformat() + 'Z'
controlflow.system_error_exit(
2, f'expected a <{YYYYMMDDTHHMMSS_FORMAT_REQUIRED}>')
def get_row_filter_date_or_delta_from_now(date_string):
"""Get an ISO 8601 date or a positive/negative delta applied to now.
Args:
date_string (string): The time or delta (e.g. '2017-09-01' or '-4y')
Returns:
string: iso8601 formatted datetime in UTC.
"""
date_string = date_string.strip().upper()
if date_string:
if date_string[0] in ['+', '-']:
deltaDate = get_delta(date_string, DELTA_DATE_PATTERN)
if deltaDate is None:
return (False, DELTA_DATE_FORMAT_REQUIRED)
today = datetime.date.today()
return (True,
(datetime.datetime(today.year, today.month, today.day) +
deltaDate).isoformat() + 'Z')
try:
deltaDate = dateutil.parser.parse(date_string, ignoretz=True)
return (True,
datetime.datetime(deltaDate.year, deltaDate.month,
deltaDate.day).isoformat() + 'Z')
except ValueError:
pass
return (False, YYYYMMDD_FORMAT_REQUIRED)
def get_row_filter_time_or_delta_from_now(time_string):
"""Get an ISO 8601 time or a positive/negative delta applied to now.
Args:
time_string (string): The time or delta (e.g. '2017-09-01T12:34:56Z' or '-4h')
Returns:
string: iso8601 formatted datetime in UTC.
Exits:
2: Not a valid delta.
"""
time_string = time_string.strip().upper()
if time_string:
if time_string[0] in ['+', '-']:
deltaTime = get_delta(time_string, DELTA_TIME_PATTERN)
if deltaTime is None:
return (False, DELTA_TIME_FORMAT_REQUIRED)
return (True,
(datetime.datetime.utcnow() + deltaTime).isoformat() + 'Z')
try:
deltaTime = dateutil.parser.parse(time_string, ignoretz=True)
return (True, deltaTime.isoformat() + 'Z')
except ValueError:
pass
return (False, YYYYMMDDTHHMMSS_FORMAT_REQUIRED)
def get_date_zero_time_or_full_time(time_string):
time_string = time_string.strip()
if time_string:
if YYYYMMDD_PATTERN.match(time_string):
return get_yyyymmdd(time_string) + 'T00:00:00.000Z'
return get_time_or_delta_from_now(time_string)
controlflow.system_error_exit(
2, f'expected a <{YYYYMMDDTHHMMSS_FORMAT_REQUIRED}>')
def md5_matches_file(local_file, expected_md5, exitOnError):
f = fileutils.open_file(local_file, 'rb')
hash_md5 = md5()
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
actual_hash = hash_md5.hexdigest()
if exitOnError and actual_hash != expected_md5:
controlflow.system_error_exit(
6, f'actual hash was {actual_hash}. Exiting on corrupt file.')
return actual_hash == expected_md5
URL_SHORTENER_ENDPOINT = 'https://gam-shortn.appspot.com/create'
def shorten_url(long_url, httpc=None):
if GC_Defaults[GC_NO_SHORT_URLS]:
return long_url
if not httpc:
httpc = transport.create_http(timeout=10)
headers = {'Content-Type': 'application/json', 'User-Agent': GAM_INFO}
try:
payload = json.dumps({'long_url': long_url})
resp, content = httpc.request(URL_SHORTENER_ENDPOINT,
'POST',
payload,
headers=headers)
except:
return long_url
if resp.status != 200:
return long_url
try:
if isinstance(content, bytes):
content = content.decode()
return json.loads(content).get('short_url', long_url)
except:
return long_url
| en | 0.724253 | Lazily import a module, mainly to avoid pulling in large dependencies. `contrib`, and `ffmpeg` are examples of modules that are large and not always needed, and this allows them to only be loaded when they are used. # The lint error here is incorrect. # pylint: disable=super-on-old-class # Import the target module and insert it into the parent's namespace # Update this object's dict so that if someone keeps a reference to the # LazyLoader, lookups are efficient (__getattr__ is only called on lookups # that fail). Given a list of strings m, return string which is prefix common to all Get an ISO 8601 time or a positive/negative delta applied to now. Args: time_string (string): The time or delta (e.g. '2017-09-01T12:34:56Z' or '-4h') or never Returns: string: iso8601 formatted datetime in UTC. Get an ISO 8601 date or a positive/negative delta applied to now. Args: date_string (string): The time or delta (e.g. '2017-09-01' or '-4y') Returns: string: iso8601 formatted datetime in UTC. Get an ISO 8601 time or a positive/negative delta applied to now. Args: time_string (string): The time or delta (e.g. '2017-09-01T12:34:56Z' or '-4h') Returns: string: iso8601 formatted datetime in UTC. Exits: 2: Not a valid delta. | 2.490111 | 2 |
Lib/site-packages/qutebrowser/qutebrowser.py | fochoao/cpython | 0 | 6620885 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 <NAME> (The Compiler) <<EMAIL>>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Early initialization and main entry point.
qutebrowser's initialization process roughly looks like this:
- This file gets imported, either via the setuptools entry point or
__main__.py.
- At import time, we check for the correct Python version and show an error if
it's too old.
- The main() function in this file gets invoked
- Argument parsing takes place
- earlyinit.early_init() gets invoked to do various low-level initialization
and checks whether all dependencies are met.
- app.run() gets called, which takes over.
See the docstring of app.py for details.
"""
import sys
import json
import qutebrowser
try:
from qutebrowser.misc.checkpyver import check_python_version
except ImportError:
try:
# python2
from .misc.checkpyver import check_python_version
except (SystemError, ValueError):
# Import without module - SystemError on Python3, ValueError (?!?) on
# Python2
sys.stderr.write("Please don't run this script directly, do something "
"like python3 -m qutebrowser instead.\n")
sys.stderr.flush()
sys.exit(100)
check_python_version()
import argparse # pylint: disable=wrong-import-order
from qutebrowser.misc import earlyinit
def get_argparser():
"""Get the argparse parser."""
parser = argparse.ArgumentParser(prog='qutebrowser',
description=qutebrowser.__description__)
parser.add_argument('-B', '--basedir', help="Base directory for all "
"storage.")
parser.add_argument('-C', '--config-py', help="Path to config.py.",
metavar='CONFIG')
parser.add_argument('-V', '--version', help="Show version and quit.",
action='store_true')
parser.add_argument('-s', '--set', help="Set a temporary setting for "
"this session.", nargs=2, action='append',
dest='temp_settings', default=[],
metavar=('OPTION', 'VALUE'))
parser.add_argument('-r', '--restore', help="Restore a named session.",
dest='session')
parser.add_argument('-R', '--override-restore', help="Don't restore a "
"session even if one would be restored.",
action='store_true')
parser.add_argument('--target', choices=['auto', 'tab', 'tab-bg',
'tab-silent', 'tab-bg-silent',
'window', 'private-window'],
help="How URLs should be opened if there is already a "
"qutebrowser instance running.")
parser.add_argument('--backend', choices=['webkit', 'webengine'],
help="Which backend to use.")
parser.add_argument('--desktop-file-name',
default="org.qutebrowser.qutebrowser",
help="Set the base name of the desktop entry for this "
"application. Used to set the app_id under Wayland. See "
"https://doc.qt.io/qt-5/qguiapplication.html#desktopFileName-prop")
parser.add_argument('--json-args', help=argparse.SUPPRESS)
parser.add_argument('--temp-basedir-restarted',
help=argparse.SUPPRESS,
action='store_true')
# WORKAROUND to be able to restart from older qutebrowser versions into this one.
# Should be removed at some point.
parser.add_argument('--enable-webengine-inspector',
help=argparse.SUPPRESS,
action='store_true')
debug = parser.add_argument_group('debug arguments')
debug.add_argument('-l', '--loglevel', dest='loglevel',
help="Override the configured console loglevel",
choices=['critical', 'error', 'warning', 'info',
'debug', 'vdebug'])
debug.add_argument('--logfilter', type=logfilter_error,
help="Comma-separated list of things to be logged "
"to the debug log on stdout.")
debug.add_argument('--loglines',
help="How many lines of the debug log to keep in RAM "
"(-1: unlimited).",
default=2000, type=int)
debug.add_argument('-d', '--debug', help="Turn on debugging options.",
action='store_true')
debug.add_argument('--json-logging', action='store_true', help="Output log"
" lines in JSON format (one object per line).")
debug.add_argument('--nocolor', help="Turn off colored logging.",
action='store_false', dest='color')
debug.add_argument('--force-color', help="Force colored logging",
action='store_true')
debug.add_argument('--nowindow', action='store_true', help="Don't show "
"the main window.")
debug.add_argument('-T', '--temp-basedir', action='store_true', help="Use "
"a temporary basedir.")
debug.add_argument('--no-err-windows', action='store_true', help="Don't "
"show any error windows (used for tests/smoke.py).")
debug.add_argument('--qt-arg', help="Pass an argument with a value to Qt. "
"For example, you can do "
"`--qt-arg geometry 650x555+200+300` to set the window "
"geometry.", nargs=2, metavar=('NAME', 'VALUE'),
action='append')
debug.add_argument('--qt-flag', help="Pass an argument to Qt as flag.",
nargs=1, action='append')
debug.add_argument('-D', '--debug-flag', type=debug_flag_error,
default=[], help="Pass name of debugging feature to be"
" turned on.", action='append', dest='debug_flags')
parser.add_argument('command', nargs='*', help="Commands to execute on "
"startup.", metavar=':command')
# URLs will actually be in command
parser.add_argument('url', nargs='*', help="URLs to open on startup "
"(empty as a window separator).")
return parser
def directory(arg):
if not arg:
raise argparse.ArgumentTypeError("Invalid empty value")
def logfilter_error(logfilter):
"""Validate logger names passed to --logfilter.
Args:
logfilter: A comma separated list of logger names.
"""
from qutebrowser.utils import log
try:
log.LogFilter.parse(logfilter)
except log.InvalidLogFilterError as e:
raise argparse.ArgumentTypeError(e)
return logfilter
def debug_flag_error(flag):
"""Validate flags passed to --debug-flag.
Available flags:
debug-exit: Turn on debugging of late exit.
pdb-postmortem: Drop into pdb on exceptions.
no-sql-history: Don't store history items.
no-scroll-filtering: Process all scrolling updates.
log-requests: Log all network requests.
log-cookies: Log cookies in cookie filter.
log-scroll-pos: Log all scrolling changes.
log-sensitive-keys: Log keypresses in passthrough modes.
stack: Enable Chromium stack logging.
chromium: Enable Chromium logging.
wait-renderer-process: Wait for debugger in renderer process.
avoid-chromium-init: Enable `--version` without initializing Chromium.
werror: Turn Python warnings into errors.
test-notification-service: Use the testing libnotify service.
"""
valid_flags = ['debug-exit', 'pdb-postmortem', 'no-sql-history',
'no-scroll-filtering', 'log-requests', 'log-cookies',
'log-scroll-pos', 'log-sensitive-keys', 'stack', 'chromium',
'wait-renderer-process', 'avoid-chromium-init', 'werror',
'test-notification-service']
if flag in valid_flags:
return flag
else:
raise argparse.ArgumentTypeError("Invalid debug flag - valid flags: {}"
.format(', '.join(valid_flags)))
def _unpack_json_args(args):
"""Restore arguments from --json-args after a restart.
When restarting, we serialize the argparse namespace into json, and
construct a "fake" argparse.Namespace here based on the data loaded
from json.
"""
new_args = vars(args)
data = json.loads(args.json_args)
new_args.update(data)
return argparse.Namespace(**new_args)
def main():
parser = get_argparser()
argv = sys.argv[1:]
args = parser.parse_args(argv)
if args.json_args is not None:
args = _unpack_json_args(args)
earlyinit.early_init(args)
# We do this imports late as earlyinit needs to be run first (because of
# version checking and other early initialization)
from qutebrowser import app
return app.run(args)
| # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 <NAME> (The Compiler) <<EMAIL>>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Early initialization and main entry point.
qutebrowser's initialization process roughly looks like this:
- This file gets imported, either via the setuptools entry point or
__main__.py.
- At import time, we check for the correct Python version and show an error if
it's too old.
- The main() function in this file gets invoked
- Argument parsing takes place
- earlyinit.early_init() gets invoked to do various low-level initialization
and checks whether all dependencies are met.
- app.run() gets called, which takes over.
See the docstring of app.py for details.
"""
import sys
import json
import qutebrowser
try:
from qutebrowser.misc.checkpyver import check_python_version
except ImportError:
try:
# python2
from .misc.checkpyver import check_python_version
except (SystemError, ValueError):
# Import without module - SystemError on Python3, ValueError (?!?) on
# Python2
sys.stderr.write("Please don't run this script directly, do something "
"like python3 -m qutebrowser instead.\n")
sys.stderr.flush()
sys.exit(100)
check_python_version()
import argparse # pylint: disable=wrong-import-order
from qutebrowser.misc import earlyinit
def get_argparser():
"""Get the argparse parser."""
parser = argparse.ArgumentParser(prog='qutebrowser',
description=qutebrowser.__description__)
parser.add_argument('-B', '--basedir', help="Base directory for all "
"storage.")
parser.add_argument('-C', '--config-py', help="Path to config.py.",
metavar='CONFIG')
parser.add_argument('-V', '--version', help="Show version and quit.",
action='store_true')
parser.add_argument('-s', '--set', help="Set a temporary setting for "
"this session.", nargs=2, action='append',
dest='temp_settings', default=[],
metavar=('OPTION', 'VALUE'))
parser.add_argument('-r', '--restore', help="Restore a named session.",
dest='session')
parser.add_argument('-R', '--override-restore', help="Don't restore a "
"session even if one would be restored.",
action='store_true')
parser.add_argument('--target', choices=['auto', 'tab', 'tab-bg',
'tab-silent', 'tab-bg-silent',
'window', 'private-window'],
help="How URLs should be opened if there is already a "
"qutebrowser instance running.")
parser.add_argument('--backend', choices=['webkit', 'webengine'],
help="Which backend to use.")
parser.add_argument('--desktop-file-name',
default="org.qutebrowser.qutebrowser",
help="Set the base name of the desktop entry for this "
"application. Used to set the app_id under Wayland. See "
"https://doc.qt.io/qt-5/qguiapplication.html#desktopFileName-prop")
parser.add_argument('--json-args', help=argparse.SUPPRESS)
parser.add_argument('--temp-basedir-restarted',
help=argparse.SUPPRESS,
action='store_true')
# WORKAROUND to be able to restart from older qutebrowser versions into this one.
# Should be removed at some point.
parser.add_argument('--enable-webengine-inspector',
help=argparse.SUPPRESS,
action='store_true')
debug = parser.add_argument_group('debug arguments')
debug.add_argument('-l', '--loglevel', dest='loglevel',
help="Override the configured console loglevel",
choices=['critical', 'error', 'warning', 'info',
'debug', 'vdebug'])
debug.add_argument('--logfilter', type=logfilter_error,
help="Comma-separated list of things to be logged "
"to the debug log on stdout.")
debug.add_argument('--loglines',
help="How many lines of the debug log to keep in RAM "
"(-1: unlimited).",
default=2000, type=int)
debug.add_argument('-d', '--debug', help="Turn on debugging options.",
action='store_true')
debug.add_argument('--json-logging', action='store_true', help="Output log"
" lines in JSON format (one object per line).")
debug.add_argument('--nocolor', help="Turn off colored logging.",
action='store_false', dest='color')
debug.add_argument('--force-color', help="Force colored logging",
action='store_true')
debug.add_argument('--nowindow', action='store_true', help="Don't show "
"the main window.")
debug.add_argument('-T', '--temp-basedir', action='store_true', help="Use "
"a temporary basedir.")
debug.add_argument('--no-err-windows', action='store_true', help="Don't "
"show any error windows (used for tests/smoke.py).")
debug.add_argument('--qt-arg', help="Pass an argument with a value to Qt. "
"For example, you can do "
"`--qt-arg geometry 650x555+200+300` to set the window "
"geometry.", nargs=2, metavar=('NAME', 'VALUE'),
action='append')
debug.add_argument('--qt-flag', help="Pass an argument to Qt as flag.",
nargs=1, action='append')
debug.add_argument('-D', '--debug-flag', type=debug_flag_error,
default=[], help="Pass name of debugging feature to be"
" turned on.", action='append', dest='debug_flags')
parser.add_argument('command', nargs='*', help="Commands to execute on "
"startup.", metavar=':command')
# URLs will actually be in command
parser.add_argument('url', nargs='*', help="URLs to open on startup "
"(empty as a window separator).")
return parser
def directory(arg):
if not arg:
raise argparse.ArgumentTypeError("Invalid empty value")
def logfilter_error(logfilter):
"""Validate logger names passed to --logfilter.
Args:
logfilter: A comma separated list of logger names.
"""
from qutebrowser.utils import log
try:
log.LogFilter.parse(logfilter)
except log.InvalidLogFilterError as e:
raise argparse.ArgumentTypeError(e)
return logfilter
def debug_flag_error(flag):
"""Validate flags passed to --debug-flag.
Available flags:
debug-exit: Turn on debugging of late exit.
pdb-postmortem: Drop into pdb on exceptions.
no-sql-history: Don't store history items.
no-scroll-filtering: Process all scrolling updates.
log-requests: Log all network requests.
log-cookies: Log cookies in cookie filter.
log-scroll-pos: Log all scrolling changes.
log-sensitive-keys: Log keypresses in passthrough modes.
stack: Enable Chromium stack logging.
chromium: Enable Chromium logging.
wait-renderer-process: Wait for debugger in renderer process.
avoid-chromium-init: Enable `--version` without initializing Chromium.
werror: Turn Python warnings into errors.
test-notification-service: Use the testing libnotify service.
"""
valid_flags = ['debug-exit', 'pdb-postmortem', 'no-sql-history',
'no-scroll-filtering', 'log-requests', 'log-cookies',
'log-scroll-pos', 'log-sensitive-keys', 'stack', 'chromium',
'wait-renderer-process', 'avoid-chromium-init', 'werror',
'test-notification-service']
if flag in valid_flags:
return flag
else:
raise argparse.ArgumentTypeError("Invalid debug flag - valid flags: {}"
.format(', '.join(valid_flags)))
def _unpack_json_args(args):
"""Restore arguments from --json-args after a restart.
When restarting, we serialize the argparse namespace into json, and
construct a "fake" argparse.Namespace here based on the data loaded
from json.
"""
new_args = vars(args)
data = json.loads(args.json_args)
new_args.update(data)
return argparse.Namespace(**new_args)
def main():
parser = get_argparser()
argv = sys.argv[1:]
args = parser.parse_args(argv)
if args.json_args is not None:
args = _unpack_json_args(args)
earlyinit.early_init(args)
# We do this imports late as earlyinit needs to be run first (because of
# version checking and other early initialization)
from qutebrowser import app
return app.run(args)
| en | 0.76171 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2021 <NAME> (The Compiler) <<EMAIL>> # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <https://www.gnu.org/licenses/>. Early initialization and main entry point. qutebrowser's initialization process roughly looks like this: - This file gets imported, either via the setuptools entry point or __main__.py. - At import time, we check for the correct Python version and show an error if it's too old. - The main() function in this file gets invoked - Argument parsing takes place - earlyinit.early_init() gets invoked to do various low-level initialization and checks whether all dependencies are met. - app.run() gets called, which takes over. See the docstring of app.py for details. # python2 # Import without module - SystemError on Python3, ValueError (?!?) on # Python2 # pylint: disable=wrong-import-order Get the argparse parser. #desktopFileName-prop") # WORKAROUND to be able to restart from older qutebrowser versions into this one. # Should be removed at some point. # URLs will actually be in command Validate logger names passed to --logfilter. Args: logfilter: A comma separated list of logger names. Validate flags passed to --debug-flag. Available flags: debug-exit: Turn on debugging of late exit. pdb-postmortem: Drop into pdb on exceptions. no-sql-history: Don't store history items. no-scroll-filtering: Process all scrolling updates. log-requests: Log all network requests. log-cookies: Log cookies in cookie filter. log-scroll-pos: Log all scrolling changes. log-sensitive-keys: Log keypresses in passthrough modes. stack: Enable Chromium stack logging. chromium: Enable Chromium logging. wait-renderer-process: Wait for debugger in renderer process. avoid-chromium-init: Enable `--version` without initializing Chromium. werror: Turn Python warnings into errors. test-notification-service: Use the testing libnotify service. Restore arguments from --json-args after a restart. When restarting, we serialize the argparse namespace into json, and construct a "fake" argparse.Namespace here based on the data loaded from json. # We do this imports late as earlyinit needs to be run first (because of # version checking and other early initialization) | 2.016862 | 2 |
deeplearning EX/Ex2.py | deliciousYSH/Misc.Code | 0 | 6620886 | import torch as t
import torchvision as tv
import torchvision.transforms as transforms
from torchvision.transforms import ToPILImage
from torch import nn
import torch as t
from torch.nn import functional as F
from torch import optim
show = ToPILImage() # 可以把Tensor转成Image,方便可视化
t.set_num_threads(8)
# 第一次运行程序torchvision会自动下载CIFAR-10数据集,
# 大约100M,需花费一定的时间,
# 如果已经下载有CIFAR-10,可通过root参数指定
# 定义对数据的预处理
transform = transforms.Compose([
transforms.ToTensor(), # 转为Tensor
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # 归一化
])
# 训练集
trainset = tv.datasets.CIFAR10(
root='./home/cy/tmp/data/',
train=True,
download=True,
transform=transform)
trainloader = t.utils.data.DataLoader(
trainset,
batch_size=4,
shuffle=True,
num_workers=2)
# 测试集
testset = tv.datasets.CIFAR10(
'./home/cy/tmp/data/',
train=False,
download=True,
transform=transform)
testloader = t.utils.data.DataLoader(
testset,
batch_size=4,
shuffle=False,
num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
(data, label) = trainset[100]
print(classes[label])
# (data + 1) / 2是为了还原被归一化的数据
show((data + 1) / 2).resize((100, 100))
class ResidualBlock(nn.Module):
'''
实现子module: Residual Block
'''
def __init__(self, inchannel, outchannel, stride=1, shortcut=None):
super(ResidualBlock, self).__init__()
self.left = nn.Sequential(
nn.Conv2d(inchannel, outchannel, 3, stride, 1, bias=False),
nn.BatchNorm2d(outchannel),
nn.ReLU(inplace=True),
nn.Conv2d(outchannel, outchannel, 3, 1, 1, bias=False),
nn.BatchNorm2d(outchannel))
self.right = shortcut
def forward(self, x):
out = self.left(x)
residual = x if self.right is None else self.right(x)
out += residual
return F.relu(out)
class ResNet(nn.Module):
'''
实现主module:ResNet34
ResNet34 包含多个layer,每个layer又包含多个residual block
用子module来实现residual block,用_make_layer函数来实现layer
'''
def __init__(self, num_classes=1000):
super(ResNet, self).__init__()
# 前几层图像转换
self.pre = nn.Sequential(
nn.Conv2d(3, 16, 3, 1, 1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2, 1))
# 重复的layer,分别有3,4,6,3个residual block
self.layer1 = self._make_layer(16, 16, 3)
self.layer2 = self._make_layer(16, 32, 4, stride=1)
self.layer3 = self._make_layer(32, 64, 6, stride=1)
self.layer4 = self._make_layer(64, 64, 3, stride=1)
# 分类用的全连接
self.fc = nn.Linear(256, num_classes)
def _make_layer(self, inchannel, outchannel, block_num, stride=1):
'''
构建layer,包含多个residual block
'''
shortcut = nn.Sequential(
nn.Conv2d(inchannel, outchannel, 1, stride, bias=False),
nn.BatchNorm2d(outchannel))
layers = []
layers.append(ResidualBlock(inchannel, outchannel, stride, shortcut))
for i in range(1, block_num):
layers.append(ResidualBlock(outchannel, outchannel))
return nn.Sequential(*layers)
def forward(self, x):
x = self.pre(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.avg_pool2d(x, 7)
x = x.view(x.size(0), -1)
return self.fc(x)
model = ResNet()
# print(model)
criterion = nn.CrossEntropyLoss() # 交叉熵损失函数
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
a=0
for epoch in range(1):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
device = t.device("cuda:0" if t.cuda.is_available() else "cpu")
# 输入数据
inputs, labels = data
model.to(device)
inputs = inputs.to(device)
labels = labels.to(device)
# 梯度清零
optimizer.zero_grad()
# forward + backward
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
# 更新参数
optimizer.step()
# 打印log信息
# loss 是一个scalar,需要使用loss.item()来获取数值,不能使用loss[0]
running_loss += loss.item()
print(a)
a+=1
if i % 500 == 499: # 每2000个batch打印一下训练状态
print('[%d, %5d] loss: %.3f' \
% (epoch + 1, i + 1, running_loss / 500))
running_loss = 0.0
print('Finished Training')
correct = 0 # 预测正确的图片数
total = 0 # 总共的图片数
device = t.device("cuda:0" if t.cuda.is_available() else "cpu")
# 由于测试的时候不需要求导,可以暂时关闭autograd,提高速度,节约内存
with t.no_grad():
for data in testloader:
images, labels = data
labels = labels.to(device)
images = images.to(device)
outputs = model(images)
_, predicted = t.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('10000张测试集中的准确率为: %d %%' % (100 * correct / total))
| import torch as t
import torchvision as tv
import torchvision.transforms as transforms
from torchvision.transforms import ToPILImage
from torch import nn
import torch as t
from torch.nn import functional as F
from torch import optim
show = ToPILImage() # 可以把Tensor转成Image,方便可视化
t.set_num_threads(8)
# 第一次运行程序torchvision会自动下载CIFAR-10数据集,
# 大约100M,需花费一定的时间,
# 如果已经下载有CIFAR-10,可通过root参数指定
# 定义对数据的预处理
transform = transforms.Compose([
transforms.ToTensor(), # 转为Tensor
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # 归一化
])
# 训练集
trainset = tv.datasets.CIFAR10(
root='./home/cy/tmp/data/',
train=True,
download=True,
transform=transform)
trainloader = t.utils.data.DataLoader(
trainset,
batch_size=4,
shuffle=True,
num_workers=2)
# 测试集
testset = tv.datasets.CIFAR10(
'./home/cy/tmp/data/',
train=False,
download=True,
transform=transform)
testloader = t.utils.data.DataLoader(
testset,
batch_size=4,
shuffle=False,
num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
(data, label) = trainset[100]
print(classes[label])
# (data + 1) / 2是为了还原被归一化的数据
show((data + 1) / 2).resize((100, 100))
class ResidualBlock(nn.Module):
'''
实现子module: Residual Block
'''
def __init__(self, inchannel, outchannel, stride=1, shortcut=None):
super(ResidualBlock, self).__init__()
self.left = nn.Sequential(
nn.Conv2d(inchannel, outchannel, 3, stride, 1, bias=False),
nn.BatchNorm2d(outchannel),
nn.ReLU(inplace=True),
nn.Conv2d(outchannel, outchannel, 3, 1, 1, bias=False),
nn.BatchNorm2d(outchannel))
self.right = shortcut
def forward(self, x):
out = self.left(x)
residual = x if self.right is None else self.right(x)
out += residual
return F.relu(out)
class ResNet(nn.Module):
'''
实现主module:ResNet34
ResNet34 包含多个layer,每个layer又包含多个residual block
用子module来实现residual block,用_make_layer函数来实现layer
'''
def __init__(self, num_classes=1000):
super(ResNet, self).__init__()
# 前几层图像转换
self.pre = nn.Sequential(
nn.Conv2d(3, 16, 3, 1, 1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2, 1))
# 重复的layer,分别有3,4,6,3个residual block
self.layer1 = self._make_layer(16, 16, 3)
self.layer2 = self._make_layer(16, 32, 4, stride=1)
self.layer3 = self._make_layer(32, 64, 6, stride=1)
self.layer4 = self._make_layer(64, 64, 3, stride=1)
# 分类用的全连接
self.fc = nn.Linear(256, num_classes)
def _make_layer(self, inchannel, outchannel, block_num, stride=1):
'''
构建layer,包含多个residual block
'''
shortcut = nn.Sequential(
nn.Conv2d(inchannel, outchannel, 1, stride, bias=False),
nn.BatchNorm2d(outchannel))
layers = []
layers.append(ResidualBlock(inchannel, outchannel, stride, shortcut))
for i in range(1, block_num):
layers.append(ResidualBlock(outchannel, outchannel))
return nn.Sequential(*layers)
def forward(self, x):
x = self.pre(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.avg_pool2d(x, 7)
x = x.view(x.size(0), -1)
return self.fc(x)
model = ResNet()
# print(model)
criterion = nn.CrossEntropyLoss() # 交叉熵损失函数
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
a=0
for epoch in range(1):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
device = t.device("cuda:0" if t.cuda.is_available() else "cpu")
# 输入数据
inputs, labels = data
model.to(device)
inputs = inputs.to(device)
labels = labels.to(device)
# 梯度清零
optimizer.zero_grad()
# forward + backward
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
# 更新参数
optimizer.step()
# 打印log信息
# loss 是一个scalar,需要使用loss.item()来获取数值,不能使用loss[0]
running_loss += loss.item()
print(a)
a+=1
if i % 500 == 499: # 每2000个batch打印一下训练状态
print('[%d, %5d] loss: %.3f' \
% (epoch + 1, i + 1, running_loss / 500))
running_loss = 0.0
print('Finished Training')
correct = 0 # 预测正确的图片数
total = 0 # 总共的图片数
device = t.device("cuda:0" if t.cuda.is_available() else "cpu")
# 由于测试的时候不需要求导,可以暂时关闭autograd,提高速度,节约内存
with t.no_grad():
for data in testloader:
images, labels = data
labels = labels.to(device)
images = images.to(device)
outputs = model(images)
_, predicted = t.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('10000张测试集中的准确率为: %d %%' % (100 * correct / total))
| zh | 0.916323 | # 可以把Tensor转成Image,方便可视化 # 第一次运行程序torchvision会自动下载CIFAR-10数据集, # 大约100M,需花费一定的时间, # 如果已经下载有CIFAR-10,可通过root参数指定 # 定义对数据的预处理 # 转为Tensor # 归一化 # 训练集 # 测试集 # (data + 1) / 2是为了还原被归一化的数据 实现子module: Residual Block 实现主module:ResNet34 ResNet34 包含多个layer,每个layer又包含多个residual block 用子module来实现residual block,用_make_layer函数来实现layer # 前几层图像转换 # 重复的layer,分别有3,4,6,3个residual block # 分类用的全连接 构建layer,包含多个residual block # print(model) # 交叉熵损失函数 # 输入数据 # 梯度清零 # forward + backward # 更新参数 # 打印log信息 # loss 是一个scalar,需要使用loss.item()来获取数值,不能使用loss[0] # 每2000个batch打印一下训练状态 # 预测正确的图片数 # 总共的图片数 # 由于测试的时候不需要求导,可以暂时关闭autograd,提高速度,节约内存 | 2.672476 | 3 |
acs_test_suites/OTC/libs/testlib/scripts/relay/relay_steps.py | wangji1/test-framework-and-suites-for-android | 0 | 6620887 | <filename>acs_test_suites/OTC/libs/testlib/scripts/relay/relay_steps.py<gh_stars>0
#!/usr/bin/env python
"""
Copyright (C) 2018 Intel Corporation
?
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
?
http://www.apache.org/licenses/LICENSE-2.0
?
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
?
SPDX-License-Identifier: Apache-2.0
"""
from testlib.scripts.relay.relay_step import relay_step
from testlib.scripts.android.adb.adb_step import step as adb_step
from testlib.scripts.android.android_step import step as android_step
from testlib.scripts.relay import relay_utils
from testlib.scripts.connections.local import local_utils
from testlib.scripts.connections.local import local_steps
from testlib.scripts.android.adb import adb_steps
from testlib.scripts.android.adb import adb_utils
from testlib.scripts.android.ui import ui_steps
from testlib.base.ParallelSteps import ParallelSteps
from testlib.scripts.android.fastboot import fastboot_utils
from testlib.scripts.android.fastboot import fastboot_steps
from testlib.utils.statics.android import statics
import datetime
import re
import time
class power_off_device(relay_step):
""" description:
Shuts down the device.
usage:
relay_steps.power_off_device(serial=serial,
relay_type = relay_type,
relay_port = relay_port,
power_port = power_port)()
tags:
shutdown, relay
"""
def __init__(self, serial, except_charging=False, timeout=120, device_info="", **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.timeout = timeout
self.except_charging = except_charging
self.device_info = device_info
def do(self):
if self.device_info != "broxtonp":
self.relay.power_on()
self.relay.power_off()
def check_condition(self):
wait_time = 0
while local_utils.is_device_connected(self.serial, self.except_charging) and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
return not local_utils.is_device_connected(self.serial, self.except_charging)
class long_press_power_shutdown(relay_step):
""" description:
Shuts down the device.
usage:
relay_steps.power_off_device(serial=serial,
relay_type = relay_type,
relay_port = relay_port,
power_port = power_port)()
tags:
shutdown, relay
"""
def __init__(self, serial, except_charging=False, timeout=120,
wait_ui=True, long_press_time=15, device_info="", **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.except_charging = except_charging
self.timeout = timeout
self.wait_ui = wait_ui
self.long_press_time = long_press_time
self.device_info = device_info
def do(self):
self.relay.long_press_power_shutdown(
long_press_time=self.long_press_time)
def check_condition(self):
if self.device_info == "broxtonp":
wait_time = 0
while self.serial not in local_utils.get_connected_android_devices()['android'] and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
if self.wait_ui:
return adb_steps.wait_for_ui_processes(serial=self.serial)()
return self.serial in local_utils.get_connected_android_devices()['android']
class gracefully_power_off_device(relay_step):
""" description:
Shuts down the device.
usage:
relay_steps.power_off_device(serial=serial,
relay_type = relay_type,
relay_port = relay_port,
power_port = power_port)()
tags:
shutdown, relay
"""
def __init__(self, serial, timeout=120, except_charging=False, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.timeout = timeout
self.except_charging = except_charging
def do(self):
self.relay.long_press_power()
ui_steps.click_button(serial=self.serial, view_to_find={
"text": "Power off"}, wait_for_event_occurs=False)()
def check_condition(self):
wait_time = 0
while local_utils.is_device_connected(self.serial, self.except_charging) and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
return not local_utils.is_device_connected(self.serial, self.except_charging)
class power_on_device(relay_step):
""" description:
Powers up the device.
usage:
relay_steps.power_on_device(serial=serial,
relay_type = relay_type,
relay_port = relay_port,
power_port = power_port)()
tags:
startup, relay
"""
def __init__(self, serial, timeout=120, except_charging=False, not_check_result=False, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.timeout = timeout
self.except_charging = except_charging
self.not_check_result = not_check_result
def do(self):
self.relay.power_on()
def check_condition(self):
if self.not_check_result:
return True
wait_time = 0
while not local_utils.is_device_connected(self.serial, self.except_charging) and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
time.sleep(10)
return local_utils.is_device_connected(self.serial, self.except_charging)
class reboot_fastboot(relay_step):
""" description:
Reboots to fastboot.
usage:
relay_steps.reboot_fastboot(serial = serial,
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
v_up_port=v_up_port,
v_down_port=v_down_port,
USB_VC_cut_port=USB_VC_cut_port)()
tags:
fastboot, android, reboot, relay
"""
def __init__(self, serial, timeout=10, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.timeout = timeout
def do(self):
# hard boot to fastboot
self.relay.power_off()
self.relay.enter_fastboot()
def check_condition(self):
wait_time = 0
while self.serial not in local_utils.get_fastboot_devices() and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
return self.serial in local_utils.get_fastboot_devices()
class reboot_main_os(relay_step):
""" description:
Reboots to main OS.
usage:
relay_steps.reboot_main_os(serial = serial,
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
force_reboot = True)()
- force_reboot: if equals True then the device will be rebooted
even if it is already into main OS
tags:
reboot, android, relay
"""
def __init__(self, serial, timeout=30, force_reboot=False,
wait_ui=True, delay_power_on=0, device_info="", **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.kwargs = kwargs
self.timeout = timeout
self.force_reboot = force_reboot
self.wait_ui = wait_ui
self.delay_power_on = delay_power_on
self.device_info = device_info
def do(self):
# hard boot to main OS
if self.serial not in local_utils.get_connected_android_devices()['android'] or self.force_reboot:
power_off_device(serial=self.serial, except_charging=True,
device_info=self.device_info, **self.kwargs)()
if self.delay_power_on > 0:
time.sleep(self.delay_power_on)
power_on_device(serial=self.serial,
except_charging=True, **self.kwargs)()
def check_condition(self):
wait_time = 0
while self.serial not in local_utils.get_connected_android_devices()['android'] and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
if self.wait_ui:
return adb_steps.wait_for_ui_processes(serial=self.serial)()
return self.serial in local_utils.get_connected_android_devices()['android']
class recovery_reboot(relay_step):
""" description:
Reboots to main OS.
usage:
relay_steps.recovery_reboot(serial = serial,
mode = "fastboot",
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
v_up_port = v_up_port,
v_down_port = v_down_port)()
- mode: "fastboot" or "android"
tags:
reboot, recovery, android, relay
"""
def __init__(self, serial, mode="android", menu_position=0, timeout=30, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.kwargs = kwargs
self.timeout = timeout
self.mode = mode
self.menu_position = menu_position
def do(self):
# hard boot from recovery to main OS
relay_utils.select_ros_menu_item(self.relay, mode=self.menu_position)
self.relay.press_power()
def check_condition(self):
wait_time = 0
while self.serial not in local_utils.get_connected_android_devices()[self.mode] and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
return self.serial in local_utils.get_connected_android_devices()[self.mode]
class recovery_factory_reset(relay_step):
""" description:
Reset to factory defaults from recovery OS. At the end the system
remains in ROS.
usage:
relay_steps.recovery_factory_reset(serial = serial,
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port)()
tags:
reboot, recovery, android, relay
"""
def __init__(self, serial, timeout=600, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.kwargs = kwargs
self.timeout = timeout
self.mode = "factory_reset"
def do(self):
# hard boot from recovery to main OS
relay_utils.select_ros_menu_item(self.relay,
mode=statics.Device(serial=serial).ros_menu_entry[option]) # noqa
self.relay.press_power()
time.sleep(1)
# accept the factory reset
self.relay.press_volume_down()
self.relay.press_power()
def check_condition(self):
wait_time = 0
while self.serial not in local_utils.get_connected_android_devices()["recovery"] and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
return self.serial in local_utils.get_connected_android_devices()["recovery"]
class reboot_safe_mode(relay_step):
""" description:
Reboots into safe mode.
usage:
relay_steps.reboot_safe_mode(serial = serial,
mode = "android",
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port)()
- mode: "fastboot" or "android"
tags:
reboot, recovery, android, relay
"""
def __init__(self, serial, app_to_find, mode="android", timeout=30, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.mode = mode
self.timeout = timeout
self.app_to_find = app_to_find
self.kwargs = kwargs
self.set_errorm(
"", "Cannot reboot in safe mode - device {0}".format(self.serial))
self.set_passm(
"Rebooted in safe mode - device {0}".format(self.serial))
def do(self):
# Long press on power button
self.relay.long_press_power()
# Long press on power to enable reboot safe mode prompt and select OK
ui_steps.long_click(serial=self.serial,
view_to_find={
"resourceId": "android:id/contentPanel"},
view_to_check={"text": "Reboot to safe mode"})()
ui_steps.click_button(serial=self.serial,
view_to_find={"text": "OK"})()
# Wait for the device to reboot
local_steps.wait_until_disconnected(serial=self.serial)()
local_steps.wait_for_adb(serial=self.serial)()
adb_steps.wait_for_ui(serial=self.serial)()
def check_condition(self):
self.step_data = ui_steps.find_app_from_allapps(serial=self.serial,
view_to_find={
"text": self.app_to_find},
presence=False)()
return self.step_data
class reboot_safe_mode_magic_key(relay_step):
""" description:
Enters safe mode when booting by pressing volume down button.
usage:
relay_steps.reboot_safe_mode_magic_key(serial = serial,
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
v_up_port=v_up_port
v_down_port=v_down_port)()
tags:
reboot, android, relay, safe_mode, power_off, power_on
"""
def __init__(self, serial, app_to_find, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.app_to_find = app_to_find
self.kwargs = kwargs
self.set_errorm(
"", "Cannot reboot in safe mode - device {0}".format(self.serial))
self.set_passm(
"Rebooted in safe mode - device {0}".format(self.serial))
def do(self):
# Turn off the device
power_off_device(serial=self.serial,
except_charging=True,
**self.kwargs)()
# Power on the device
power_on_device(serial=self.serial,
**self.kwargs)()
# Press volume down
self.relay.relay.on(self.relay.v_down_port)
# Wait for UI processes
adb_steps.wait_for_ui_processes(serial=self.serial)()
# Release volume down button
self.relay.relay.off(self.relay.v_down_port)
def check_condition(self):
# Unlock device
ui_steps.wake_up_device(serial=self.serial)()
ui_steps.unlock_device(serial=self.serial)()
# Check if the app is NOT displayed in normal mode
ui_steps.find_app_from_allapps(serial=self.serial,
view_to_find={"text": self.app_to_find},
presence=False)()
return True
class connect_disconnect_usb(relay_step):
""" description:
usage:
relay_steps.disconnect_usb(serial=serial,
connect=True,
USB_VC_cut_port = USB_VC_cut_port)()
tags:
usb, relay
"""
def __init__(self, serial, connect, timeout=30, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.timeout = timeout
self.connect = connect
def do(self):
if self.connect:
self.relay.uncut_usb_vc()
else:
self.relay.cut_usb_vc()
def check_condition(self):
wait_time = 0
result = False
while wait_time < self.timeout:
if self.connect:
time.sleep(10)
if local_utils.is_device_connected(self.serial):
result = True
break
else:
if not local_utils.is_device_connected(self.serial):
result = True
break
time.sleep(2)
wait_time += 2
return result
class take_screenshot(relay_step, adb_step):
""" description:
Takes screenshot using power button and volume button.
usage:
relay_steps.take_screenshot(serial = serial,
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
v_up_port=v_up_port
v_down_port=v_down_port)()
tags:
screenshot, android, relay
"""
def __init__(self, serial, screenshots_folder, timeout=30, **kwargs):
adb_step.__init__(self, serial=serial, **kwargs)
relay_step.__init__(self, **kwargs)
self.serial = serial
self.screenshots_folder = screenshots_folder
self.timeout = timeout
self.kwargs = kwargs
self.result = True
self.step_data = None
self.set_errorm(
"", "Cannot take screenshot - device {0}".format(self.serial))
self.set_passm(
"Screenshot taken successfully - device {0}".format(self.serial))
def do(self):
# Delete the contents of the "/sdcard/Pictures/Screenshots" folder on the DUT
if adb_utils.folder_exists(serial=self.serial, folder=self.screenshots_folder):
adb_steps.delete_folder_content(
serial=self.serial, folder=self.screenshots_folder)()
# Take screeenshot
self.relay.take_screenshot()
# Check if the screenshot exists
screenshot_date = datetime.datetime.strftime(
datetime.datetime.now(), "%Y%m%d")
# List the contents of the screenshot folder
output = adb_utils.wait_for_file_with_text(text_contained=screenshot_date,
dir_to_search=self.screenshots_folder,
serial=self.serial)
if output is None:
self.result = False
self.set_errorm("", "Failed when waiting for file with text {0} - device {1}".format(screenshot_date,
self.serial))
return
# Check if the file has the correct name (screenshot_date in name)
if not re.search("\w+{0}".format(screenshot_date), output):
self.result = False
# Get the file size
self.step_data = int(re.findall("\s(\d+)\s", output)[0])
def check_condition(self):
# The correct file should exist and the size should not be 0
return self.result and self.step_data > 0
class choose_fastboot_menu(relay_step):
""" description:
Selects the provided menu option from fastboot.
usage:
relay_steps.choose_fastboot_menu(serial = serial,
option = "normal_boot",
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
v_up_port = v_up_port,
v_down_port = v_down_port)()
- option:
- normal_boot
- power_off
- bootloader
- recovery
- reboot
tags:
reboot, fastboot, android, relay
"""
def __init__(self, serial, option=None, menu_position=None, timeout=60, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.kwargs = kwargs
self.timeout = timeout
self.option = option
if not menu_position:
self.menu_position = statics.Device(
serial=self.serial).fastboot_menu_entry[self.option]
else:
self.menu_position = menu_position
def do(self):
# choose the option from bootloader
# sleep to give time the getvar from statics.Device to finish
time.sleep(1)
# the animation
relay_utils.select_fastboot_menu_item(self.relay, self.menu_position)
time.sleep(0.5)
self.relay.press_power()
def check_condition(self):
wait_time = 0
if self.option in ["normal_boot", "reboot"]:
device_state = "android"
elif self.option == "power_off":
device_state = "charge_os"
return True
elif self.option == "bootloader":
device_state = "fastboot"
time.sleep(3)
elif self.option == "recovery":
device_state = "recovery"
else:
self.set_errorm("", "Invalid menu option {0}".format(self.option))
return False
while self.serial not in local_utils.get_connected_android_devices()[device_state] and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
return self.serial in local_utils.get_connected_android_devices()[device_state]
class choose_crashmode_menu(choose_fastboot_menu):
""" description:
Selects the provided menu option from fastboot.
usage:
relay_steps.choose_crashmode_menu(serial = serial,
menu_position=3,
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
v_up_port = v_up_port,
v_down_port = v_down_port)()
- option:
- normal_boot
- power_off
- bootloader
- recovery
- reboot
tags:
reboot, crashmode, android, relay
"""
def __init__(self, serial, option=None, menu_position=None, timeout=60, **kwargs):
choose_fastboot_menu.__init__(
self, serial, option, timeout=60, **kwargs)
def do(self):
# overwrite the do method
relay_utils.select_crashmode_menu_item(self.relay, self.menu_position)
self.relay.press_power()
# check method is the one from inherited class
# def check_condition(self):
class change_state(relay_step, android_step):
""" description:
Changes bootloader state if the oem_unlock_enabled is set to yes.
Else it will attempt to unlock and check for message in output.
usage:
relay_steps.change_state(serial=serial,
dessert=dessert,
unlock_bootloader=lock_state,
relay_type=relay_type,
relay_port=relay_port,
power_port=power_port,
v_down_port=v_down_port,
v_up_port=v_up_port)()
tags:
fastboot, android, bootloader, unlock, relay
"""
def __init__(self, unlock_bootloader, dessert, oem_unlock_enabled="yes", **kwargs):
android_step.__init__(self, **kwargs)
relay_step.__init__(self, **kwargs)
self.unlock_bootloader = unlock_bootloader
self.dessert = dessert
self.oem_unlock_enabled = oem_unlock_enabled
# Define the unlock command depending on Android version
if self.dessert == "L":
self.unlock_cmd = "oem"
elif self.dessert >= "M":
self.unlock_cmd = "flashing"
# Define the state command
if self.unlock_bootloader == "yes":
self.state_cmd = "unlock"
elif self.unlock_bootloader == "no":
self.state_cmd = "lock"
if self.oem_unlock_enabled == "yes":
self.string_to_check = "finished. total time:"
self.set_errorm("", "Could not change state to \"{0}\"".format(
self.unlock_bootloader))
self.set_passm("State Changed to \"{0}\"".format(
self.unlock_bootloader))
else:
self.string_to_check = "Unlock is not allowed"
self.set_errorm("", "State changed to \"{0}\", even if OEM unlock is disabled".format(
self.unlock_bootloader))
self.set_passm("State not changed to \"{0}\". OEM unlock is disabled".format(
self.unlock_bootloader))
def do(self):
# use_control_process=False
psteps = ParallelSteps(use_control_process=False)
# Accept lock/ unlock action using volume and power keys
def accept_lock_unlock():
time.sleep(1)
self.relay.press_volume_down()
time.sleep(1)
self.relay.press_power()
current_state = fastboot_utils.get_var(serial=self.serial,
var="unlocked")
if self.unlock_bootloader not in str(current_state):
# Run the lock/ unlock command in a parallel step
step_id_lock = psteps.add_step(fastboot_steps.command,
serial=self.serial,
command=self.unlock_cmd + " " + self.state_cmd,
stderr_grep=self.string_to_check,
timeout=120000)
time.sleep(2)
# Run accept lock unlock if the operations is allowed
if self.oem_unlock_enabled == "yes":
accept_lock_unlock()
# Interpret the parallel step result
psteps.interpret_step(step_id_lock)
def check_condition(self):
# If the OEM unlocking is disabled, pass if the state is unchanged
if self.oem_unlock_enabled != "yes":
return self.unlock_bootloader != fastboot_utils.get_var(serial=self.serial,
var="unlocked")
# If the OEM unlocking is enabled, pass if tha state
return self.unlock_bootloader == fastboot_utils.get_var(serial=self.serial,
var="unlocked")
class create_panic_and_check_state(relay_step, android_step):
""" description:
Creates kernel panics (or stops the watchdog daemon) watchdog_counter_max times.
It then checks the device boot state (MOS, crash mode or fastboot), depending on wait_for_state value
usage:
create_panic_and_check_state(serial=serial,
create_crash_mode=panic/watchdog,
wait_for_state=state)()
state = [android, crashmode, fastboot]
tags:
adb, panic, watchdog, fastboot, crashmode, android
"""
def __init__(self, create_crash_mode, wait_for_state, use_combo_button, **kwargs):
self.create_crash_mode = create_crash_mode
self.wait_for_state = wait_for_state
self.panic_command = "\"echo e > /proc/sysrq-trigger\""
self.watchdog_process_name = "/sbin/watchdogd"
self.wait_for_fastboot_timeout = 70
self.use_combo_button = use_combo_button
relay_step.__init__(self, **kwargs)
android_step.__init__(self, **kwargs)
def do(self):
# Create panic
adb_steps.root_connect_device(serial=self.serial)()
if self.create_crash_mode == "panic":
# Using local_steps to run the command because adb_steps.command() does not create kernel panic
local_steps.command(
"adb -s {0} shell {1}".format(self.serial, self.panic_command))()
# Wait for the device to disconnect
local_steps.wait_until_disconnected(serial=self.serial)()
if self.use_combo_button is False:
print "======================wait 120s======================="
time.sleep(120)
else:
adb_steps.kill_process(serial=self.serial,
process_name=self.watchdog_process_name,
with_reboot=True,
reboot_timeout=60)()
# Wait for a device state
if self.wait_for_state == "fastboot":
# Set the pass and error messages
self.set_errorm(
"", "The device with serial {0} is not in fastboot".format(self.serial))
self.set_passm(
"The device with serial {0} is in fastboot".format(self.serial))
# If the expected state is fastboot, we need to press the volume down button after the panic is created
# This will boot the device in fastboot
# Create a parallel step
# use_control_process=False
psteps = ParallelSteps(use_control_process=False)
step_id_combo = psteps.add_step(local_steps.wait_for_fastboot,
serial=self.serial,
timeout=self.wait_for_fastboot_timeout)
self.relay.relay.on(self.relay.v_down_port)
time.sleep(self.wait_for_fastboot_timeout - 10)
self.relay.relay.off(self.relay.v_down_port)
# Interpret the parallel step result
psteps.interpret_step(step_id_combo)
elif self.wait_for_state == "android":
# Set the pass and error messages
self.set_errorm(
"", "The device with serial {0} is not in MOS".format(self.serial))
self.set_passm(
"The device with serial {0} is in MOS".format(self.serial))
# Wait for adb connection
local_steps.wait_for_adb(serial=self.serial)()
# Wait for the UI processes
adb_steps.wait_for_ui_processes(serial=self.serial)()
elif self.wait_for_state == "crashmode":
# Set the pass and error messages
self.set_errorm(
"", "The device with serial {0} is not in crash mode".format(self.serial))
self.set_passm(
"The device with serial {0} is in crash mode".format(self.serial))
# Wait for crashmode
local_steps.wait_for_crashmode(serial=self.serial,
timeout=60)()
def check_condition(self):
# Check performed in do()
return True
| <filename>acs_test_suites/OTC/libs/testlib/scripts/relay/relay_steps.py<gh_stars>0
#!/usr/bin/env python
"""
Copyright (C) 2018 Intel Corporation
?
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
?
http://www.apache.org/licenses/LICENSE-2.0
?
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
?
SPDX-License-Identifier: Apache-2.0
"""
from testlib.scripts.relay.relay_step import relay_step
from testlib.scripts.android.adb.adb_step import step as adb_step
from testlib.scripts.android.android_step import step as android_step
from testlib.scripts.relay import relay_utils
from testlib.scripts.connections.local import local_utils
from testlib.scripts.connections.local import local_steps
from testlib.scripts.android.adb import adb_steps
from testlib.scripts.android.adb import adb_utils
from testlib.scripts.android.ui import ui_steps
from testlib.base.ParallelSteps import ParallelSteps
from testlib.scripts.android.fastboot import fastboot_utils
from testlib.scripts.android.fastboot import fastboot_steps
from testlib.utils.statics.android import statics
import datetime
import re
import time
class power_off_device(relay_step):
""" description:
Shuts down the device.
usage:
relay_steps.power_off_device(serial=serial,
relay_type = relay_type,
relay_port = relay_port,
power_port = power_port)()
tags:
shutdown, relay
"""
def __init__(self, serial, except_charging=False, timeout=120, device_info="", **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.timeout = timeout
self.except_charging = except_charging
self.device_info = device_info
def do(self):
if self.device_info != "broxtonp":
self.relay.power_on()
self.relay.power_off()
def check_condition(self):
wait_time = 0
while local_utils.is_device_connected(self.serial, self.except_charging) and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
return not local_utils.is_device_connected(self.serial, self.except_charging)
class long_press_power_shutdown(relay_step):
""" description:
Shuts down the device.
usage:
relay_steps.power_off_device(serial=serial,
relay_type = relay_type,
relay_port = relay_port,
power_port = power_port)()
tags:
shutdown, relay
"""
def __init__(self, serial, except_charging=False, timeout=120,
wait_ui=True, long_press_time=15, device_info="", **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.except_charging = except_charging
self.timeout = timeout
self.wait_ui = wait_ui
self.long_press_time = long_press_time
self.device_info = device_info
def do(self):
self.relay.long_press_power_shutdown(
long_press_time=self.long_press_time)
def check_condition(self):
if self.device_info == "broxtonp":
wait_time = 0
while self.serial not in local_utils.get_connected_android_devices()['android'] and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
if self.wait_ui:
return adb_steps.wait_for_ui_processes(serial=self.serial)()
return self.serial in local_utils.get_connected_android_devices()['android']
class gracefully_power_off_device(relay_step):
""" description:
Shuts down the device.
usage:
relay_steps.power_off_device(serial=serial,
relay_type = relay_type,
relay_port = relay_port,
power_port = power_port)()
tags:
shutdown, relay
"""
def __init__(self, serial, timeout=120, except_charging=False, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.timeout = timeout
self.except_charging = except_charging
def do(self):
self.relay.long_press_power()
ui_steps.click_button(serial=self.serial, view_to_find={
"text": "Power off"}, wait_for_event_occurs=False)()
def check_condition(self):
wait_time = 0
while local_utils.is_device_connected(self.serial, self.except_charging) and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
return not local_utils.is_device_connected(self.serial, self.except_charging)
class power_on_device(relay_step):
""" description:
Powers up the device.
usage:
relay_steps.power_on_device(serial=serial,
relay_type = relay_type,
relay_port = relay_port,
power_port = power_port)()
tags:
startup, relay
"""
def __init__(self, serial, timeout=120, except_charging=False, not_check_result=False, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.timeout = timeout
self.except_charging = except_charging
self.not_check_result = not_check_result
def do(self):
self.relay.power_on()
def check_condition(self):
if self.not_check_result:
return True
wait_time = 0
while not local_utils.is_device_connected(self.serial, self.except_charging) and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
time.sleep(10)
return local_utils.is_device_connected(self.serial, self.except_charging)
class reboot_fastboot(relay_step):
""" description:
Reboots to fastboot.
usage:
relay_steps.reboot_fastboot(serial = serial,
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
v_up_port=v_up_port,
v_down_port=v_down_port,
USB_VC_cut_port=USB_VC_cut_port)()
tags:
fastboot, android, reboot, relay
"""
def __init__(self, serial, timeout=10, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.timeout = timeout
def do(self):
# hard boot to fastboot
self.relay.power_off()
self.relay.enter_fastboot()
def check_condition(self):
wait_time = 0
while self.serial not in local_utils.get_fastboot_devices() and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
return self.serial in local_utils.get_fastboot_devices()
class reboot_main_os(relay_step):
""" description:
Reboots to main OS.
usage:
relay_steps.reboot_main_os(serial = serial,
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
force_reboot = True)()
- force_reboot: if equals True then the device will be rebooted
even if it is already into main OS
tags:
reboot, android, relay
"""
def __init__(self, serial, timeout=30, force_reboot=False,
wait_ui=True, delay_power_on=0, device_info="", **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.kwargs = kwargs
self.timeout = timeout
self.force_reboot = force_reboot
self.wait_ui = wait_ui
self.delay_power_on = delay_power_on
self.device_info = device_info
def do(self):
# hard boot to main OS
if self.serial not in local_utils.get_connected_android_devices()['android'] or self.force_reboot:
power_off_device(serial=self.serial, except_charging=True,
device_info=self.device_info, **self.kwargs)()
if self.delay_power_on > 0:
time.sleep(self.delay_power_on)
power_on_device(serial=self.serial,
except_charging=True, **self.kwargs)()
def check_condition(self):
wait_time = 0
while self.serial not in local_utils.get_connected_android_devices()['android'] and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
if self.wait_ui:
return adb_steps.wait_for_ui_processes(serial=self.serial)()
return self.serial in local_utils.get_connected_android_devices()['android']
class recovery_reboot(relay_step):
""" description:
Reboots to main OS.
usage:
relay_steps.recovery_reboot(serial = serial,
mode = "fastboot",
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
v_up_port = v_up_port,
v_down_port = v_down_port)()
- mode: "fastboot" or "android"
tags:
reboot, recovery, android, relay
"""
def __init__(self, serial, mode="android", menu_position=0, timeout=30, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.kwargs = kwargs
self.timeout = timeout
self.mode = mode
self.menu_position = menu_position
def do(self):
# hard boot from recovery to main OS
relay_utils.select_ros_menu_item(self.relay, mode=self.menu_position)
self.relay.press_power()
def check_condition(self):
wait_time = 0
while self.serial not in local_utils.get_connected_android_devices()[self.mode] and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
return self.serial in local_utils.get_connected_android_devices()[self.mode]
class recovery_factory_reset(relay_step):
""" description:
Reset to factory defaults from recovery OS. At the end the system
remains in ROS.
usage:
relay_steps.recovery_factory_reset(serial = serial,
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port)()
tags:
reboot, recovery, android, relay
"""
def __init__(self, serial, timeout=600, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.kwargs = kwargs
self.timeout = timeout
self.mode = "factory_reset"
def do(self):
# hard boot from recovery to main OS
relay_utils.select_ros_menu_item(self.relay,
mode=statics.Device(serial=serial).ros_menu_entry[option]) # noqa
self.relay.press_power()
time.sleep(1)
# accept the factory reset
self.relay.press_volume_down()
self.relay.press_power()
def check_condition(self):
wait_time = 0
while self.serial not in local_utils.get_connected_android_devices()["recovery"] and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
return self.serial in local_utils.get_connected_android_devices()["recovery"]
class reboot_safe_mode(relay_step):
""" description:
Reboots into safe mode.
usage:
relay_steps.reboot_safe_mode(serial = serial,
mode = "android",
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port)()
- mode: "fastboot" or "android"
tags:
reboot, recovery, android, relay
"""
def __init__(self, serial, app_to_find, mode="android", timeout=30, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.mode = mode
self.timeout = timeout
self.app_to_find = app_to_find
self.kwargs = kwargs
self.set_errorm(
"", "Cannot reboot in safe mode - device {0}".format(self.serial))
self.set_passm(
"Rebooted in safe mode - device {0}".format(self.serial))
def do(self):
# Long press on power button
self.relay.long_press_power()
# Long press on power to enable reboot safe mode prompt and select OK
ui_steps.long_click(serial=self.serial,
view_to_find={
"resourceId": "android:id/contentPanel"},
view_to_check={"text": "Reboot to safe mode"})()
ui_steps.click_button(serial=self.serial,
view_to_find={"text": "OK"})()
# Wait for the device to reboot
local_steps.wait_until_disconnected(serial=self.serial)()
local_steps.wait_for_adb(serial=self.serial)()
adb_steps.wait_for_ui(serial=self.serial)()
def check_condition(self):
self.step_data = ui_steps.find_app_from_allapps(serial=self.serial,
view_to_find={
"text": self.app_to_find},
presence=False)()
return self.step_data
class reboot_safe_mode_magic_key(relay_step):
""" description:
Enters safe mode when booting by pressing volume down button.
usage:
relay_steps.reboot_safe_mode_magic_key(serial = serial,
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
v_up_port=v_up_port
v_down_port=v_down_port)()
tags:
reboot, android, relay, safe_mode, power_off, power_on
"""
def __init__(self, serial, app_to_find, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.app_to_find = app_to_find
self.kwargs = kwargs
self.set_errorm(
"", "Cannot reboot in safe mode - device {0}".format(self.serial))
self.set_passm(
"Rebooted in safe mode - device {0}".format(self.serial))
def do(self):
# Turn off the device
power_off_device(serial=self.serial,
except_charging=True,
**self.kwargs)()
# Power on the device
power_on_device(serial=self.serial,
**self.kwargs)()
# Press volume down
self.relay.relay.on(self.relay.v_down_port)
# Wait for UI processes
adb_steps.wait_for_ui_processes(serial=self.serial)()
# Release volume down button
self.relay.relay.off(self.relay.v_down_port)
def check_condition(self):
# Unlock device
ui_steps.wake_up_device(serial=self.serial)()
ui_steps.unlock_device(serial=self.serial)()
# Check if the app is NOT displayed in normal mode
ui_steps.find_app_from_allapps(serial=self.serial,
view_to_find={"text": self.app_to_find},
presence=False)()
return True
class connect_disconnect_usb(relay_step):
""" description:
usage:
relay_steps.disconnect_usb(serial=serial,
connect=True,
USB_VC_cut_port = USB_VC_cut_port)()
tags:
usb, relay
"""
def __init__(self, serial, connect, timeout=30, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.timeout = timeout
self.connect = connect
def do(self):
if self.connect:
self.relay.uncut_usb_vc()
else:
self.relay.cut_usb_vc()
def check_condition(self):
wait_time = 0
result = False
while wait_time < self.timeout:
if self.connect:
time.sleep(10)
if local_utils.is_device_connected(self.serial):
result = True
break
else:
if not local_utils.is_device_connected(self.serial):
result = True
break
time.sleep(2)
wait_time += 2
return result
class take_screenshot(relay_step, adb_step):
""" description:
Takes screenshot using power button and volume button.
usage:
relay_steps.take_screenshot(serial = serial,
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
v_up_port=v_up_port
v_down_port=v_down_port)()
tags:
screenshot, android, relay
"""
def __init__(self, serial, screenshots_folder, timeout=30, **kwargs):
adb_step.__init__(self, serial=serial, **kwargs)
relay_step.__init__(self, **kwargs)
self.serial = serial
self.screenshots_folder = screenshots_folder
self.timeout = timeout
self.kwargs = kwargs
self.result = True
self.step_data = None
self.set_errorm(
"", "Cannot take screenshot - device {0}".format(self.serial))
self.set_passm(
"Screenshot taken successfully - device {0}".format(self.serial))
def do(self):
# Delete the contents of the "/sdcard/Pictures/Screenshots" folder on the DUT
if adb_utils.folder_exists(serial=self.serial, folder=self.screenshots_folder):
adb_steps.delete_folder_content(
serial=self.serial, folder=self.screenshots_folder)()
# Take screeenshot
self.relay.take_screenshot()
# Check if the screenshot exists
screenshot_date = datetime.datetime.strftime(
datetime.datetime.now(), "%Y%m%d")
# List the contents of the screenshot folder
output = adb_utils.wait_for_file_with_text(text_contained=screenshot_date,
dir_to_search=self.screenshots_folder,
serial=self.serial)
if output is None:
self.result = False
self.set_errorm("", "Failed when waiting for file with text {0} - device {1}".format(screenshot_date,
self.serial))
return
# Check if the file has the correct name (screenshot_date in name)
if not re.search("\w+{0}".format(screenshot_date), output):
self.result = False
# Get the file size
self.step_data = int(re.findall("\s(\d+)\s", output)[0])
def check_condition(self):
# The correct file should exist and the size should not be 0
return self.result and self.step_data > 0
class choose_fastboot_menu(relay_step):
""" description:
Selects the provided menu option from fastboot.
usage:
relay_steps.choose_fastboot_menu(serial = serial,
option = "normal_boot",
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
v_up_port = v_up_port,
v_down_port = v_down_port)()
- option:
- normal_boot
- power_off
- bootloader
- recovery
- reboot
tags:
reboot, fastboot, android, relay
"""
def __init__(self, serial, option=None, menu_position=None, timeout=60, **kwargs):
relay_step.__init__(self, **kwargs)
self.serial = serial
self.kwargs = kwargs
self.timeout = timeout
self.option = option
if not menu_position:
self.menu_position = statics.Device(
serial=self.serial).fastboot_menu_entry[self.option]
else:
self.menu_position = menu_position
def do(self):
# choose the option from bootloader
# sleep to give time the getvar from statics.Device to finish
time.sleep(1)
# the animation
relay_utils.select_fastboot_menu_item(self.relay, self.menu_position)
time.sleep(0.5)
self.relay.press_power()
def check_condition(self):
wait_time = 0
if self.option in ["normal_boot", "reboot"]:
device_state = "android"
elif self.option == "power_off":
device_state = "charge_os"
return True
elif self.option == "bootloader":
device_state = "fastboot"
time.sleep(3)
elif self.option == "recovery":
device_state = "recovery"
else:
self.set_errorm("", "Invalid menu option {0}".format(self.option))
return False
while self.serial not in local_utils.get_connected_android_devices()[device_state] and\
wait_time < self.timeout:
time.sleep(2)
wait_time += 2
return self.serial in local_utils.get_connected_android_devices()[device_state]
class choose_crashmode_menu(choose_fastboot_menu):
""" description:
Selects the provided menu option from fastboot.
usage:
relay_steps.choose_crashmode_menu(serial = serial,
menu_position=3,
relay_type = relay_type,
relay_port=relay_port,
power_port=power_port,
v_up_port = v_up_port,
v_down_port = v_down_port)()
- option:
- normal_boot
- power_off
- bootloader
- recovery
- reboot
tags:
reboot, crashmode, android, relay
"""
def __init__(self, serial, option=None, menu_position=None, timeout=60, **kwargs):
choose_fastboot_menu.__init__(
self, serial, option, timeout=60, **kwargs)
def do(self):
# overwrite the do method
relay_utils.select_crashmode_menu_item(self.relay, self.menu_position)
self.relay.press_power()
# check method is the one from inherited class
# def check_condition(self):
class change_state(relay_step, android_step):
""" description:
Changes bootloader state if the oem_unlock_enabled is set to yes.
Else it will attempt to unlock and check for message in output.
usage:
relay_steps.change_state(serial=serial,
dessert=dessert,
unlock_bootloader=lock_state,
relay_type=relay_type,
relay_port=relay_port,
power_port=power_port,
v_down_port=v_down_port,
v_up_port=v_up_port)()
tags:
fastboot, android, bootloader, unlock, relay
"""
def __init__(self, unlock_bootloader, dessert, oem_unlock_enabled="yes", **kwargs):
android_step.__init__(self, **kwargs)
relay_step.__init__(self, **kwargs)
self.unlock_bootloader = unlock_bootloader
self.dessert = dessert
self.oem_unlock_enabled = oem_unlock_enabled
# Define the unlock command depending on Android version
if self.dessert == "L":
self.unlock_cmd = "oem"
elif self.dessert >= "M":
self.unlock_cmd = "flashing"
# Define the state command
if self.unlock_bootloader == "yes":
self.state_cmd = "unlock"
elif self.unlock_bootloader == "no":
self.state_cmd = "lock"
if self.oem_unlock_enabled == "yes":
self.string_to_check = "finished. total time:"
self.set_errorm("", "Could not change state to \"{0}\"".format(
self.unlock_bootloader))
self.set_passm("State Changed to \"{0}\"".format(
self.unlock_bootloader))
else:
self.string_to_check = "Unlock is not allowed"
self.set_errorm("", "State changed to \"{0}\", even if OEM unlock is disabled".format(
self.unlock_bootloader))
self.set_passm("State not changed to \"{0}\". OEM unlock is disabled".format(
self.unlock_bootloader))
def do(self):
# use_control_process=False
psteps = ParallelSteps(use_control_process=False)
# Accept lock/ unlock action using volume and power keys
def accept_lock_unlock():
time.sleep(1)
self.relay.press_volume_down()
time.sleep(1)
self.relay.press_power()
current_state = fastboot_utils.get_var(serial=self.serial,
var="unlocked")
if self.unlock_bootloader not in str(current_state):
# Run the lock/ unlock command in a parallel step
step_id_lock = psteps.add_step(fastboot_steps.command,
serial=self.serial,
command=self.unlock_cmd + " " + self.state_cmd,
stderr_grep=self.string_to_check,
timeout=120000)
time.sleep(2)
# Run accept lock unlock if the operations is allowed
if self.oem_unlock_enabled == "yes":
accept_lock_unlock()
# Interpret the parallel step result
psteps.interpret_step(step_id_lock)
def check_condition(self):
# If the OEM unlocking is disabled, pass if the state is unchanged
if self.oem_unlock_enabled != "yes":
return self.unlock_bootloader != fastboot_utils.get_var(serial=self.serial,
var="unlocked")
# If the OEM unlocking is enabled, pass if tha state
return self.unlock_bootloader == fastboot_utils.get_var(serial=self.serial,
var="unlocked")
class create_panic_and_check_state(relay_step, android_step):
""" description:
Creates kernel panics (or stops the watchdog daemon) watchdog_counter_max times.
It then checks the device boot state (MOS, crash mode or fastboot), depending on wait_for_state value
usage:
create_panic_and_check_state(serial=serial,
create_crash_mode=panic/watchdog,
wait_for_state=state)()
state = [android, crashmode, fastboot]
tags:
adb, panic, watchdog, fastboot, crashmode, android
"""
def __init__(self, create_crash_mode, wait_for_state, use_combo_button, **kwargs):
self.create_crash_mode = create_crash_mode
self.wait_for_state = wait_for_state
self.panic_command = "\"echo e > /proc/sysrq-trigger\""
self.watchdog_process_name = "/sbin/watchdogd"
self.wait_for_fastboot_timeout = 70
self.use_combo_button = use_combo_button
relay_step.__init__(self, **kwargs)
android_step.__init__(self, **kwargs)
def do(self):
# Create panic
adb_steps.root_connect_device(serial=self.serial)()
if self.create_crash_mode == "panic":
# Using local_steps to run the command because adb_steps.command() does not create kernel panic
local_steps.command(
"adb -s {0} shell {1}".format(self.serial, self.panic_command))()
# Wait for the device to disconnect
local_steps.wait_until_disconnected(serial=self.serial)()
if self.use_combo_button is False:
print "======================wait 120s======================="
time.sleep(120)
else:
adb_steps.kill_process(serial=self.serial,
process_name=self.watchdog_process_name,
with_reboot=True,
reboot_timeout=60)()
# Wait for a device state
if self.wait_for_state == "fastboot":
# Set the pass and error messages
self.set_errorm(
"", "The device with serial {0} is not in fastboot".format(self.serial))
self.set_passm(
"The device with serial {0} is in fastboot".format(self.serial))
# If the expected state is fastboot, we need to press the volume down button after the panic is created
# This will boot the device in fastboot
# Create a parallel step
# use_control_process=False
psteps = ParallelSteps(use_control_process=False)
step_id_combo = psteps.add_step(local_steps.wait_for_fastboot,
serial=self.serial,
timeout=self.wait_for_fastboot_timeout)
self.relay.relay.on(self.relay.v_down_port)
time.sleep(self.wait_for_fastboot_timeout - 10)
self.relay.relay.off(self.relay.v_down_port)
# Interpret the parallel step result
psteps.interpret_step(step_id_combo)
elif self.wait_for_state == "android":
# Set the pass and error messages
self.set_errorm(
"", "The device with serial {0} is not in MOS".format(self.serial))
self.set_passm(
"The device with serial {0} is in MOS".format(self.serial))
# Wait for adb connection
local_steps.wait_for_adb(serial=self.serial)()
# Wait for the UI processes
adb_steps.wait_for_ui_processes(serial=self.serial)()
elif self.wait_for_state == "crashmode":
# Set the pass and error messages
self.set_errorm(
"", "The device with serial {0} is not in crash mode".format(self.serial))
self.set_passm(
"The device with serial {0} is in crash mode".format(self.serial))
# Wait for crashmode
local_steps.wait_for_crashmode(serial=self.serial,
timeout=60)()
def check_condition(self):
# Check performed in do()
return True
| en | 0.684016 | #!/usr/bin/env python Copyright (C) 2018 Intel Corporation ? Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at ? http://www.apache.org/licenses/LICENSE-2.0 ? Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ? SPDX-License-Identifier: Apache-2.0 description: Shuts down the device. usage: relay_steps.power_off_device(serial=serial, relay_type = relay_type, relay_port = relay_port, power_port = power_port)() tags: shutdown, relay description: Shuts down the device. usage: relay_steps.power_off_device(serial=serial, relay_type = relay_type, relay_port = relay_port, power_port = power_port)() tags: shutdown, relay description: Shuts down the device. usage: relay_steps.power_off_device(serial=serial, relay_type = relay_type, relay_port = relay_port, power_port = power_port)() tags: shutdown, relay description: Powers up the device. usage: relay_steps.power_on_device(serial=serial, relay_type = relay_type, relay_port = relay_port, power_port = power_port)() tags: startup, relay description: Reboots to fastboot. usage: relay_steps.reboot_fastboot(serial = serial, relay_type = relay_type, relay_port=relay_port, power_port=power_port, v_up_port=v_up_port, v_down_port=v_down_port, USB_VC_cut_port=USB_VC_cut_port)() tags: fastboot, android, reboot, relay # hard boot to fastboot description: Reboots to main OS. usage: relay_steps.reboot_main_os(serial = serial, relay_type = relay_type, relay_port=relay_port, power_port=power_port, force_reboot = True)() - force_reboot: if equals True then the device will be rebooted even if it is already into main OS tags: reboot, android, relay # hard boot to main OS description: Reboots to main OS. usage: relay_steps.recovery_reboot(serial = serial, mode = "fastboot", relay_type = relay_type, relay_port=relay_port, power_port=power_port, v_up_port = v_up_port, v_down_port = v_down_port)() - mode: "fastboot" or "android" tags: reboot, recovery, android, relay # hard boot from recovery to main OS description: Reset to factory defaults from recovery OS. At the end the system remains in ROS. usage: relay_steps.recovery_factory_reset(serial = serial, relay_type = relay_type, relay_port=relay_port, power_port=power_port)() tags: reboot, recovery, android, relay # hard boot from recovery to main OS # noqa # accept the factory reset description: Reboots into safe mode. usage: relay_steps.reboot_safe_mode(serial = serial, mode = "android", relay_type = relay_type, relay_port=relay_port, power_port=power_port)() - mode: "fastboot" or "android" tags: reboot, recovery, android, relay # Long press on power button # Long press on power to enable reboot safe mode prompt and select OK # Wait for the device to reboot description: Enters safe mode when booting by pressing volume down button. usage: relay_steps.reboot_safe_mode_magic_key(serial = serial, relay_type = relay_type, relay_port=relay_port, power_port=power_port, v_up_port=v_up_port v_down_port=v_down_port)() tags: reboot, android, relay, safe_mode, power_off, power_on # Turn off the device # Power on the device # Press volume down # Wait for UI processes # Release volume down button # Unlock device # Check if the app is NOT displayed in normal mode description: usage: relay_steps.disconnect_usb(serial=serial, connect=True, USB_VC_cut_port = USB_VC_cut_port)() tags: usb, relay description: Takes screenshot using power button and volume button. usage: relay_steps.take_screenshot(serial = serial, relay_type = relay_type, relay_port=relay_port, power_port=power_port, v_up_port=v_up_port v_down_port=v_down_port)() tags: screenshot, android, relay # Delete the contents of the "/sdcard/Pictures/Screenshots" folder on the DUT # Take screeenshot # Check if the screenshot exists # List the contents of the screenshot folder # Check if the file has the correct name (screenshot_date in name) # Get the file size # The correct file should exist and the size should not be 0 description: Selects the provided menu option from fastboot. usage: relay_steps.choose_fastboot_menu(serial = serial, option = "normal_boot", relay_type = relay_type, relay_port=relay_port, power_port=power_port, v_up_port = v_up_port, v_down_port = v_down_port)() - option: - normal_boot - power_off - bootloader - recovery - reboot tags: reboot, fastboot, android, relay # choose the option from bootloader # sleep to give time the getvar from statics.Device to finish # the animation description: Selects the provided menu option from fastboot. usage: relay_steps.choose_crashmode_menu(serial = serial, menu_position=3, relay_type = relay_type, relay_port=relay_port, power_port=power_port, v_up_port = v_up_port, v_down_port = v_down_port)() - option: - normal_boot - power_off - bootloader - recovery - reboot tags: reboot, crashmode, android, relay # overwrite the do method # check method is the one from inherited class # def check_condition(self): description: Changes bootloader state if the oem_unlock_enabled is set to yes. Else it will attempt to unlock and check for message in output. usage: relay_steps.change_state(serial=serial, dessert=dessert, unlock_bootloader=lock_state, relay_type=relay_type, relay_port=relay_port, power_port=power_port, v_down_port=v_down_port, v_up_port=v_up_port)() tags: fastboot, android, bootloader, unlock, relay # Define the unlock command depending on Android version # Define the state command # use_control_process=False # Accept lock/ unlock action using volume and power keys # Run the lock/ unlock command in a parallel step # Run accept lock unlock if the operations is allowed # Interpret the parallel step result # If the OEM unlocking is disabled, pass if the state is unchanged # If the OEM unlocking is enabled, pass if tha state description: Creates kernel panics (or stops the watchdog daemon) watchdog_counter_max times. It then checks the device boot state (MOS, crash mode or fastboot), depending on wait_for_state value usage: create_panic_and_check_state(serial=serial, create_crash_mode=panic/watchdog, wait_for_state=state)() state = [android, crashmode, fastboot] tags: adb, panic, watchdog, fastboot, crashmode, android # Create panic # Using local_steps to run the command because adb_steps.command() does not create kernel panic # Wait for the device to disconnect # Wait for a device state # Set the pass and error messages # If the expected state is fastboot, we need to press the volume down button after the panic is created # This will boot the device in fastboot # Create a parallel step # use_control_process=False # Interpret the parallel step result # Set the pass and error messages # Wait for adb connection # Wait for the UI processes # Set the pass and error messages # Wait for crashmode # Check performed in do() | 1.629307 | 2 |
src/behavior_tree_learning/core/sbt/py_tree.py | dgerod/behavior_tree_learning | 7 | 6620888 | <reponame>dgerod/behavior_tree_learning
import time
import py_trees as pt
from behavior_tree_learning.core.sbt.world import World
from behavior_tree_learning.core.sbt.behavior_tree import BehaviorTreeStringRepresentation
from behavior_tree_learning.core.sbt.node_factory import BehaviorNodeFactory
class ExecutionParameters:
def __init__(self, max_ticks=30, max_time=30.0, max_straight_fails=1, successes_required=2):
self.max_ticks = max_ticks
self.max_time = max_time
self.max_straight_fails = max_straight_fails
self.successes_required = successes_required
class StringBehaviorTree(pt.trees.BehaviourTree):
class TraceInfo:
def __init__(self, verbose):
self.verbose = verbose
def __init__(self, string: str, behaviors: BehaviorNodeFactory, world: World = None, root=None, verbose=False):
if root is not None:
self.root = root
string = self.to_string()
self.bt = BehaviorTreeStringRepresentation(string)
self.depth = self.bt.depth()
self.length = self.bt.length()
self.failed = False
self.timeout = False
self._world = world
self._behavior_factory = behaviors
self._trace_info = self.TraceInfo(verbose)
if root is not None:
has_children = False
else:
self.root, has_children = self._behavior_factory.make_node(string[0], self._world, self._trace_info.verbose)
string.pop(0)
super().__init__(root=self.root)
if has_children:
self.create_from_string(string, self.root)
def to_string(self):
"""
Returns bt string (actually a list) from py tree root
by cleaning the ascii tree from py trees
Not complete or beautiful by any means but works for many trees
"""
string = pt.display.ascii_tree(self.root)
string = string.replace("[o] ", "")
string = string.replace("\t", "")
string = string.replace("-->", "")
string = string.replace("Fallback", "f(")
string = string.replace("Sequence", "s(")
bt = string.split("\n")
bt = bt[:-1]
prev_leading_spaces = 999999
for i in range(len(bt) - 1, -1, -1):
leading_spaces = len(bt[i]) - len(bt[i].lstrip(' '))
bt[i] = bt[i].lstrip(' ')
if leading_spaces > prev_leading_spaces:
for _ in range(round((leading_spaces - prev_leading_spaces) / 4)):
bt.insert(i + 1, ')')
prev_leading_spaces = leading_spaces
bt_obj = BehaviorTreeStringRepresentation(bt)
bt_obj.close()
return bt_obj.bt
def create_from_string(self, string: str, node):
"""
Recursive function to generate the tree from a string
"""
while len(string) > 0:
if string[0] == ")":
string.pop(0)
return node
new_node, has_children = self._behavior_factory.make_node(string[0], self._world, self._trace_info.verbose)
string.pop(0)
if has_children:
# Node is a control node or decorator with children - add subtree via string and then add to parent
new_node = self.create_from_string(string, new_node)
node.add_child(new_node)
else:
# Node is a leaf/action node - add to parent, then keep looking for siblings
node.add_child(new_node)
# This return is only reached if there are too few up nodes
return node
def run_bt(self, parameters: ExecutionParameters = ExecutionParameters()):
"""
Function executing the behavior tree
"""
if not self._world.startup(self._trace_info.verbose):
return False, 0
max_ticks = parameters.max_ticks
max_time = parameters.max_time
max_straight_fails = parameters.max_straight_fails
successes_required = parameters.successes_required
ticks = 0
straight_fails = 0
successes = 0
status_ok = True
start = time.time()
while (self.root.status is not pt.common.Status.FAILURE or straight_fails < max_straight_fails) \
and (self.root.status is not pt.common.Status.SUCCESS or successes < successes_required) \
and ticks < max_ticks and status_ok:
status_ok = self._world.is_alive()
if status_ok:
self.root.tick_once()
ticks += 1
if self.root.status is pt.common.Status.SUCCESS:
successes += 1
else:
successes = 0
if self.root.status is pt.common.Status.FAILURE:
straight_fails += 1
else:
straight_fails = 0
if time.time() - start > max_time:
status_ok = False
if self._trace_info.verbose:
print("Max time expired")
if self._trace_info.verbose:
print("Status: %s Ticks: %d, Time: %s" % (status_ok, ticks, time.time() - start))
if ticks >= max_ticks:
self.timeout = True
if straight_fails >= max_straight_fails:
self.failed = True
self._world.shutdown()
return status_ok, ticks
def save_figure(self, path: str, name: str = "bt"):
pt.display.render_dot_tree(self.root, name=name, target_directory=path)
| import time
import py_trees as pt
from behavior_tree_learning.core.sbt.world import World
from behavior_tree_learning.core.sbt.behavior_tree import BehaviorTreeStringRepresentation
from behavior_tree_learning.core.sbt.node_factory import BehaviorNodeFactory
class ExecutionParameters:
def __init__(self, max_ticks=30, max_time=30.0, max_straight_fails=1, successes_required=2):
self.max_ticks = max_ticks
self.max_time = max_time
self.max_straight_fails = max_straight_fails
self.successes_required = successes_required
class StringBehaviorTree(pt.trees.BehaviourTree):
class TraceInfo:
def __init__(self, verbose):
self.verbose = verbose
def __init__(self, string: str, behaviors: BehaviorNodeFactory, world: World = None, root=None, verbose=False):
if root is not None:
self.root = root
string = self.to_string()
self.bt = BehaviorTreeStringRepresentation(string)
self.depth = self.bt.depth()
self.length = self.bt.length()
self.failed = False
self.timeout = False
self._world = world
self._behavior_factory = behaviors
self._trace_info = self.TraceInfo(verbose)
if root is not None:
has_children = False
else:
self.root, has_children = self._behavior_factory.make_node(string[0], self._world, self._trace_info.verbose)
string.pop(0)
super().__init__(root=self.root)
if has_children:
self.create_from_string(string, self.root)
def to_string(self):
"""
Returns bt string (actually a list) from py tree root
by cleaning the ascii tree from py trees
Not complete or beautiful by any means but works for many trees
"""
string = pt.display.ascii_tree(self.root)
string = string.replace("[o] ", "")
string = string.replace("\t", "")
string = string.replace("-->", "")
string = string.replace("Fallback", "f(")
string = string.replace("Sequence", "s(")
bt = string.split("\n")
bt = bt[:-1]
prev_leading_spaces = 999999
for i in range(len(bt) - 1, -1, -1):
leading_spaces = len(bt[i]) - len(bt[i].lstrip(' '))
bt[i] = bt[i].lstrip(' ')
if leading_spaces > prev_leading_spaces:
for _ in range(round((leading_spaces - prev_leading_spaces) / 4)):
bt.insert(i + 1, ')')
prev_leading_spaces = leading_spaces
bt_obj = BehaviorTreeStringRepresentation(bt)
bt_obj.close()
return bt_obj.bt
def create_from_string(self, string: str, node):
"""
Recursive function to generate the tree from a string
"""
while len(string) > 0:
if string[0] == ")":
string.pop(0)
return node
new_node, has_children = self._behavior_factory.make_node(string[0], self._world, self._trace_info.verbose)
string.pop(0)
if has_children:
# Node is a control node or decorator with children - add subtree via string and then add to parent
new_node = self.create_from_string(string, new_node)
node.add_child(new_node)
else:
# Node is a leaf/action node - add to parent, then keep looking for siblings
node.add_child(new_node)
# This return is only reached if there are too few up nodes
return node
def run_bt(self, parameters: ExecutionParameters = ExecutionParameters()):
"""
Function executing the behavior tree
"""
if not self._world.startup(self._trace_info.verbose):
return False, 0
max_ticks = parameters.max_ticks
max_time = parameters.max_time
max_straight_fails = parameters.max_straight_fails
successes_required = parameters.successes_required
ticks = 0
straight_fails = 0
successes = 0
status_ok = True
start = time.time()
while (self.root.status is not pt.common.Status.FAILURE or straight_fails < max_straight_fails) \
and (self.root.status is not pt.common.Status.SUCCESS or successes < successes_required) \
and ticks < max_ticks and status_ok:
status_ok = self._world.is_alive()
if status_ok:
self.root.tick_once()
ticks += 1
if self.root.status is pt.common.Status.SUCCESS:
successes += 1
else:
successes = 0
if self.root.status is pt.common.Status.FAILURE:
straight_fails += 1
else:
straight_fails = 0
if time.time() - start > max_time:
status_ok = False
if self._trace_info.verbose:
print("Max time expired")
if self._trace_info.verbose:
print("Status: %s Ticks: %d, Time: %s" % (status_ok, ticks, time.time() - start))
if ticks >= max_ticks:
self.timeout = True
if straight_fails >= max_straight_fails:
self.failed = True
self._world.shutdown()
return status_ok, ticks
def save_figure(self, path: str, name: str = "bt"):
pt.display.render_dot_tree(self.root, name=name, target_directory=path) | en | 0.907158 | Returns bt string (actually a list) from py tree root by cleaning the ascii tree from py trees Not complete or beautiful by any means but works for many trees Recursive function to generate the tree from a string # Node is a control node or decorator with children - add subtree via string and then add to parent # Node is a leaf/action node - add to parent, then keep looking for siblings # This return is only reached if there are too few up nodes Function executing the behavior tree | 2.188058 | 2 |
eds/openmtc-gevent/server/openmtc-scl/src/openmtc_scl/plugins_eu_projects/device_emulator/__init__.py | piyush82/elastest-device-emulator-service | 0 | 6620889 | <reponame>piyush82/elastest-device-emulator-service<filename>eds/openmtc-gevent/server/openmtc-scl/src/openmtc_scl/plugins_eu_projects/device_emulator/__init__.py
from aplus import Promise
from futile.logging import LoggerMixin
from openmtc_server.Plugin import Plugin
from openmtc_etsi.exc import OpenMTCError
from gevent.server import DatagramServer, StreamServer
from openmtc_scl.platform.gevent.ServerRack import GEventServerRack
from openmtc_etsi.scl import CreateRequestIndication, RetrieveRequestIndication
from openmtc_etsi.model import Scl, MgmtObj, ContentInstance
from json import loads
from base64 import b64decode, b64encode
import threading
from threading import Thread
from time import sleep
from sys import getsizeof
from .create_app import config_mgmt_app
from json import dumps, load
from copy import deepcopy
from random import choice
from string import lowercase
from timeit import timeit
import shelve
DEFAULT_NB_DEVICES = 1
DEFAULT_INTERVAL = 0
DEFAULT_DATA_SIZE = None
DEFAULT_DESTINATION_PATH = None #"coap://localhost:24000/m2m/applications/ScalableDynamicApp/containers/ImportantData/contentInstances/"
DEFAULT_PAYLOAD_FILE = None
class MsgSender(LoggerMixin):
def __init__(self, api, nb_devices, interval, data_size, destination_path, payload_file_path, counter):
self.total_time = 0
self.db_iteration_counter = 0
self.counter = counter
self.api = api
thread = Thread(target=self.start_sending_data, args=(nb_devices, interval, data_size, destination_path, payload_file_path))
thread.start()
def send_data_item(self, nb_device, data_size, path, payload_file_path, payload=None):
def resp_success(result):
self.logger.info("Message sent successfully !! %s", result)
def resp_error(result):
self.logger.info("Error sending message !! %s", result)
def send_message():
self.logger.info("Sending Message")
request = CreateRequestIndication(path=path, resource=payload, content_type="application/json")
split_path = path.split("/")
get_index = split_path.index("applications")
app_id = split_path[get_index + 1]
request.targeted_appid = app_id
request.originator = originator
response = self.api.send_request_indication(request)
response.then(resp_success, resp_error)
if payload_file_path is None:
dummy = "Dummy "
#Sample default Message
if data_size is not None:
# Create Dummy Message of size "data_size"
effective_size = data_size - 48
msg = "a" * effective_size
payload = dumps({"key":msg}) # getsizeof of empty msg is :: 48 -- > getsizeof(dumps({"key":msg})) is 48
originator = None
else:
originator = None
dummy = ""
payload_size = getsizeof(payload)
self.logger.info("%sMessage is %s of size %s bytes", dummy, payload, payload_size)
if data_size is not None:
if data_size < payload_size:
self.logger.warning("Payload size exceeded %s bytes", data_size)
# Sending message
t = timeit(lambda: send_message(), number=nb_device)
self.total_time = t
def send_data(self, nb_devices, data_size, destination_path, payload_file_path):
if not self.sending_data:
return
if destination_path is not None:
path = destination_path
else:
self.log.error("Destination Path is not Available. Default path is None")
return
if payload_file_path is not None:
try:
with open(payload_file_path) as f:
try:
payload = load(f)
payload = dumps(payload)
except:
self.logger.error("Some errors while reading the contents. \
Possibly JSON format error. Setting Payload to Default")
payload = None
except IOError:
self.logger.error("Couldn't open the file. Verify the path.")
return
else:
payload = None
#for i in range(nb_devices):
self.send_data_item(nb_devices, data_size, path, payload_file_path, payload=payload)
def start_sending_data(self, nb_devices, interval, data_size, destination_path, payload_file_path):
def success_handle(result):
self.send_interval_data(nb_devices, interval, \
data_size, destination_path, payload_file_path)
def success_handle_1(result):
self.send_data(nb_devices, data_size, destination_path, payload_file_path)
def error_handle(result):
self.logger.error("Error occurred. %s", result)
self.sending_data = False
return
self.sending_data=True
path = destination_path
split_path = path.split("/")
path = "/".join(split_path[:len(split_path)-1])
request = RetrieveRequestIndication(path)
response = self.api.send_request_indication(request)
if interval !=0:
response.then(success_handle, error_handle)
else:
response.then(success_handle_1, error_handle)
def send_interval_data(self, nb_devices, interval, data_size, destination_path, payload_file_path):
while self.sending_data:
self.send_data(nb_devices, data_size, destination_path, payload_file_path)
total_delay = self.total_time
self.logger.info("Total delay is %s", total_delay)
avg_delay = total_delay/nb_devices
self.logger.info("Average delay is %s", avg_delay)
if interval-total_delay > 0:
interval = interval-total_delay
else:
interval = 1
filename = "delay_measurements_" + str(self.counter) + ".db"
s = shelve.open(filename)
try:
config_json = {"nb_devices" : nb_devices, "interval" : interval, "data_size" : data_size}
s["iteration_" + str(self.db_iteration_counter)] = {"total_delay" : total_delay, "avg_delay" : avg_delay, "config_json" : config_json}
self.db_iteration_counter += 1
finally:
s.close()
sleep(interval)
def stop_sending_data(self):
self.sending_data=False
class EmulatedDevicesPlugin(Plugin):
def _init(self):
self.events.resource_created.register_handler(self._handle_content_inst_created, ContentInstance)
self.config_mgmt_app = config_mgmt_app(self.api)
self._initialized()
def _start(self):
self.config_mgmt_app._start()
self.obj_dict = {}
self.counter = 0
self._started()
def _stop(self):
self._stopped()
def _handle_content_inst_created(self, instance, request_indication):
temp_obj_dict = deepcopy(self.obj_dict)
for key in temp_obj_dict.keys():
self.obj_dict[key].stop_sending_data()
del self.obj_dict[key]
try:
dev_dict = loads(b64decode(request_indication.resource["content"]["$t"]))
except KeyError:
dev_dict = loads(b64decode(request_indication.resource["content"]["binaryContent"]))
traffic_group = dev_dict["traffic_group"]
nb_devices = traffic_group.get("nb_devices", DEFAULT_NB_DEVICES)
interval = traffic_group.get("interval", DEFAULT_INTERVAL)
data_size = traffic_group.get("data_size", DEFAULT_DATA_SIZE)
destination_path = dev_dict.get("destination_path", DEFAULT_DESTINATION_PATH)
payload_file_path = dev_dict.get("payload_file_path", DEFAULT_PAYLOAD_FILE)
if interval == 0:
self.logger.info(" --- DELAY MEASUREMENT SUMMARY ----")
for val in range(self.counter):
filename = "delay_measurements_" + str(val) + ".db"
self.logger.info("Filename :: %s", filename)
s = shelve.open(filename)
counter = 0
display = True
condn = True
try:
while condn:
if "iteration_" + str(counter) in s:
content = s['iteration_' + str(counter)]
if display:
self.logger.info("Configuration: devices=%s, interval=%s, data_size=%s", content["config_json"]["nb_devices"], \
content["config_json"]["interval"], content["config_json"]["data_size"])
display = False
self.logger.info("Iteration %s, Total_Delay=%s, Average_Delay=%s", counter, content["total_delay"], content["avg_delay"])
counter += 1
else:
condn = False
finally:
s.close()
return
self.obj_dict["obj_" + str(self.counter)] = MsgSender(self.api, nb_devices, interval, data_size, destination_path, payload_file_path, self.counter)
self.counter += 1
| from aplus import Promise
from futile.logging import LoggerMixin
from openmtc_server.Plugin import Plugin
from openmtc_etsi.exc import OpenMTCError
from gevent.server import DatagramServer, StreamServer
from openmtc_scl.platform.gevent.ServerRack import GEventServerRack
from openmtc_etsi.scl import CreateRequestIndication, RetrieveRequestIndication
from openmtc_etsi.model import Scl, MgmtObj, ContentInstance
from json import loads
from base64 import b64decode, b64encode
import threading
from threading import Thread
from time import sleep
from sys import getsizeof
from .create_app import config_mgmt_app
from json import dumps, load
from copy import deepcopy
from random import choice
from string import lowercase
from timeit import timeit
import shelve
DEFAULT_NB_DEVICES = 1
DEFAULT_INTERVAL = 0
DEFAULT_DATA_SIZE = None
DEFAULT_DESTINATION_PATH = None #"coap://localhost:24000/m2m/applications/ScalableDynamicApp/containers/ImportantData/contentInstances/"
DEFAULT_PAYLOAD_FILE = None
class MsgSender(LoggerMixin):
def __init__(self, api, nb_devices, interval, data_size, destination_path, payload_file_path, counter):
self.total_time = 0
self.db_iteration_counter = 0
self.counter = counter
self.api = api
thread = Thread(target=self.start_sending_data, args=(nb_devices, interval, data_size, destination_path, payload_file_path))
thread.start()
def send_data_item(self, nb_device, data_size, path, payload_file_path, payload=None):
def resp_success(result):
self.logger.info("Message sent successfully !! %s", result)
def resp_error(result):
self.logger.info("Error sending message !! %s", result)
def send_message():
self.logger.info("Sending Message")
request = CreateRequestIndication(path=path, resource=payload, content_type="application/json")
split_path = path.split("/")
get_index = split_path.index("applications")
app_id = split_path[get_index + 1]
request.targeted_appid = app_id
request.originator = originator
response = self.api.send_request_indication(request)
response.then(resp_success, resp_error)
if payload_file_path is None:
dummy = "Dummy "
#Sample default Message
if data_size is not None:
# Create Dummy Message of size "data_size"
effective_size = data_size - 48
msg = "a" * effective_size
payload = dumps({"key":msg}) # getsizeof of empty msg is :: 48 -- > getsizeof(dumps({"key":msg})) is 48
originator = None
else:
originator = None
dummy = ""
payload_size = getsizeof(payload)
self.logger.info("%sMessage is %s of size %s bytes", dummy, payload, payload_size)
if data_size is not None:
if data_size < payload_size:
self.logger.warning("Payload size exceeded %s bytes", data_size)
# Sending message
t = timeit(lambda: send_message(), number=nb_device)
self.total_time = t
def send_data(self, nb_devices, data_size, destination_path, payload_file_path):
if not self.sending_data:
return
if destination_path is not None:
path = destination_path
else:
self.log.error("Destination Path is not Available. Default path is None")
return
if payload_file_path is not None:
try:
with open(payload_file_path) as f:
try:
payload = load(f)
payload = dumps(payload)
except:
self.logger.error("Some errors while reading the contents. \
Possibly JSON format error. Setting Payload to Default")
payload = None
except IOError:
self.logger.error("Couldn't open the file. Verify the path.")
return
else:
payload = None
#for i in range(nb_devices):
self.send_data_item(nb_devices, data_size, path, payload_file_path, payload=payload)
def start_sending_data(self, nb_devices, interval, data_size, destination_path, payload_file_path):
def success_handle(result):
self.send_interval_data(nb_devices, interval, \
data_size, destination_path, payload_file_path)
def success_handle_1(result):
self.send_data(nb_devices, data_size, destination_path, payload_file_path)
def error_handle(result):
self.logger.error("Error occurred. %s", result)
self.sending_data = False
return
self.sending_data=True
path = destination_path
split_path = path.split("/")
path = "/".join(split_path[:len(split_path)-1])
request = RetrieveRequestIndication(path)
response = self.api.send_request_indication(request)
if interval !=0:
response.then(success_handle, error_handle)
else:
response.then(success_handle_1, error_handle)
def send_interval_data(self, nb_devices, interval, data_size, destination_path, payload_file_path):
while self.sending_data:
self.send_data(nb_devices, data_size, destination_path, payload_file_path)
total_delay = self.total_time
self.logger.info("Total delay is %s", total_delay)
avg_delay = total_delay/nb_devices
self.logger.info("Average delay is %s", avg_delay)
if interval-total_delay > 0:
interval = interval-total_delay
else:
interval = 1
filename = "delay_measurements_" + str(self.counter) + ".db"
s = shelve.open(filename)
try:
config_json = {"nb_devices" : nb_devices, "interval" : interval, "data_size" : data_size}
s["iteration_" + str(self.db_iteration_counter)] = {"total_delay" : total_delay, "avg_delay" : avg_delay, "config_json" : config_json}
self.db_iteration_counter += 1
finally:
s.close()
sleep(interval)
def stop_sending_data(self):
self.sending_data=False
class EmulatedDevicesPlugin(Plugin):
def _init(self):
self.events.resource_created.register_handler(self._handle_content_inst_created, ContentInstance)
self.config_mgmt_app = config_mgmt_app(self.api)
self._initialized()
def _start(self):
self.config_mgmt_app._start()
self.obj_dict = {}
self.counter = 0
self._started()
def _stop(self):
self._stopped()
def _handle_content_inst_created(self, instance, request_indication):
temp_obj_dict = deepcopy(self.obj_dict)
for key in temp_obj_dict.keys():
self.obj_dict[key].stop_sending_data()
del self.obj_dict[key]
try:
dev_dict = loads(b64decode(request_indication.resource["content"]["$t"]))
except KeyError:
dev_dict = loads(b64decode(request_indication.resource["content"]["binaryContent"]))
traffic_group = dev_dict["traffic_group"]
nb_devices = traffic_group.get("nb_devices", DEFAULT_NB_DEVICES)
interval = traffic_group.get("interval", DEFAULT_INTERVAL)
data_size = traffic_group.get("data_size", DEFAULT_DATA_SIZE)
destination_path = dev_dict.get("destination_path", DEFAULT_DESTINATION_PATH)
payload_file_path = dev_dict.get("payload_file_path", DEFAULT_PAYLOAD_FILE)
if interval == 0:
self.logger.info(" --- DELAY MEASUREMENT SUMMARY ----")
for val in range(self.counter):
filename = "delay_measurements_" + str(val) + ".db"
self.logger.info("Filename :: %s", filename)
s = shelve.open(filename)
counter = 0
display = True
condn = True
try:
while condn:
if "iteration_" + str(counter) in s:
content = s['iteration_' + str(counter)]
if display:
self.logger.info("Configuration: devices=%s, interval=%s, data_size=%s", content["config_json"]["nb_devices"], \
content["config_json"]["interval"], content["config_json"]["data_size"])
display = False
self.logger.info("Iteration %s, Total_Delay=%s, Average_Delay=%s", counter, content["total_delay"], content["avg_delay"])
counter += 1
else:
condn = False
finally:
s.close()
return
self.obj_dict["obj_" + str(self.counter)] = MsgSender(self.api, nb_devices, interval, data_size, destination_path, payload_file_path, self.counter)
self.counter += 1 | en | 0.463983 | #"coap://localhost:24000/m2m/applications/ScalableDynamicApp/containers/ImportantData/contentInstances/" #Sample default Message # Create Dummy Message of size "data_size" # getsizeof of empty msg is :: 48 -- > getsizeof(dumps({"key":msg})) is 48 # Sending message #for i in range(nb_devices): | 1.793812 | 2 |
fabricio/docker/image.py | theoden-dd/fabricio | 291 | 6620890 | <filename>fabricio/docker/image.py
import json
import warnings
import docker.auth
import docker.utils
import six
from functools import partial
import fabricio
from fabricio import utils
class ImageError(fabricio.Error):
pass
class ImageNotFoundError(ImageError):
pass
class Registry(six.text_type):
def __new__(cls, value=None, *args, **kwargs):
if value is None:
return None
return super(Registry, cls).__new__(cls, value, *args, **kwargs)
def __init__(self, *args, **kwargs):
super(Registry, self).__init__(**kwargs)
self.host, _, port = self.partition(':')
self.port = port and int(port)
class Image(object):
name = None
tag = None
registry = None
use_digest = False
@property
def temp_tag(self):
return 'fabricio-temp-image:' + self.name.rsplit('/')[-1]
def __new__(cls, name=None, tag=None, registry=None):
if isinstance(name, Image):
return name[registry:tag]
return super(Image, cls).__new__(cls)
def __init__(self, name=None, tag=None, registry=None):
if name is not None and not isinstance(name, Image):
_registry, _name, _tag = self.parse_image_name(name)
self.name = _name
self.tag = tag or _tag or 'latest' # TODO 'latest' is unnecessary
self.registry = Registry(registry or _registry)
self.use_digest = not tag and '@' in name
self.field_names = {} # descriptor's cache
self.service = None
def __str__(self):
if self.service is not None:
image_id = getattr(self.service, 'image_id', None)
if image_id:
return image_id
return super(Image, self).__str__()
def __repr__(self):
if not self.name:
raise ImageError('image name is not set or empty')
tag_separator = '@' if self.use_digest else ':'
registry = self.registry and '{0}/'.format(self.registry) or ''
tag = self.tag and '{0}{1}'.format(tag_separator, self.tag) or ''
return '{registry}{name}{tag}'.format(
registry=registry,
name=self.name,
tag=tag,
)
def __bool__(self):
try:
return bool(repr(self))
except ImageError:
return False
def __nonzero__(self):
return self.__bool__()
def __get__(self, service, owner_cls):
if service is None:
return self
field_name = self.get_field_name(owner_cls)
if field_name not in service.__dict__:
image = service.__dict__[field_name] = self[:]
image.service = service
return service.__dict__[field_name]
def __set__(self, service, image):
field_name = self.get_field_name(type(service))
image = self.__class__(image)
image.service = service
service.__dict__[field_name] = image
def __getitem__(self, item):
if isinstance(item, slice):
registry, tag, account = item.start, item.stop, item.step
else:
registry, tag, account = None, item, None
use_digest = self.use_digest
# tag can override image registry, name and/or digest
_registry, _name, _tag = self.parse_image_name(tag)
if not _tag:
if _registry:
_tag = 'latest'
else:
_tag, _name = _name, None
if _tag:
use_digest = _name and tag and '@' in tag
registry = _registry or registry or self.registry
name = _name or account and self.name and '{account}/{name}'.format(
account=account,
name=self.name.split('/')[-1],
) or self.name
tag = _tag or tag or self.tag
if use_digest:
name = '{name}@{digest}'.format(name=name, digest=tag)
tag = None
return self.__class__(name=name, tag=tag, registry=registry)
def get_field_name(self, owner_cls):
field_name = self.field_names.get(owner_cls)
if field_name is None:
for attr in dir(owner_cls):
if getattr(owner_cls, attr) is self:
if field_name is not None:
raise ValueError(
'Same instance of Image used for more than one '
'attribute of class {cls}'.format(
cls=owner_cls.__name__,
)
)
self.field_names[owner_cls] = field_name = attr
return field_name
@staticmethod
def parse_image_name(image):
if not image:
return None, None, None
repository, tag = docker.utils.parse_repository_tag(image)
registry, name = docker.auth.resolve_repository_name(repository)
if registry == docker.auth.INDEX_NAME:
registry = None
return registry, name, tag
@property
def digest(self):
if not self.use_digest:
for repo_digest in self.info.get('RepoDigests', ()):
return repo_digest
raise ImageError('image has no digest')
return repr(self)
@utils.default_property
def info(self):
command = 'docker inspect --type image {image}'
info = fabricio.run(
command.format(image=self),
abort_exception=ImageNotFoundError,
)
return json.loads(info)[0]
def get_delete_callback(self, force=False):
command = 'docker rmi {force}{image}'
force = force and '--force ' or ''
return partial(
fabricio.run,
command.format(image=self, force=force),
ignore_errors=True,
)
def delete(self, force=False, ignore_errors=True):
delete_callback = self.get_delete_callback(force=force)
return delete_callback(ignore_errors=ignore_errors)
@classmethod
def make_container_options(
cls,
temporary=None,
name=None,
options=(),
):
return utils.Options(
options,
name=name,
rm=temporary,
tty=temporary,
interactive=temporary,
detach=temporary is not None and not temporary,
)
def run(
self,
command=None,
name=None,
temporary=True,
options=(),
quiet=True,
):
run_command = 'docker run {options} {image} {command}'
return fabricio.run(
run_command.format(
image=self,
command=command or '',
options=self.make_container_options(
temporary=temporary,
name=name,
options=options,
),
),
quiet=quiet,
)
def create(self, command=None, name=None, options=()): # pragma: no cover
warnings.warn('Image.create() is deprecated', DeprecationWarning)
warnings.warn(
'Image.create() is deprecated',
RuntimeWarning, stacklevel=2,
)
run_command = 'docker create {options} {image} {command}'.rstrip()
return fabricio.run(
run_command.format(
image=self,
command=command or '',
options=self.make_container_options(name=name, options=options),
),
)
def pull(self, local=False, use_cache=False, ignore_errors=False):
run = fabricio.local if local else fabricio.run
run = partial(run, use_cache=use_cache, ignore_errors=ignore_errors)
run_ignore_errors = partial(run, ignore_errors=True)
image = six.text_type(self)
run_ignore_errors(
'docker tag {image} {tag} '
'&& docker rmi {image}'.format(image=image, tag=self.temp_tag)
)
pull_result = run('docker pull ' + image, quiet=False)
if pull_result.succeeded:
run_ignore_errors('docker rmi {tag}'.format(tag=self.temp_tag))
def build(self, local=False, build_path='.', options=None, use_cache=False):
if local:
run = fabricio.local
run_capture_output = partial(run, capture=True)
else:
run = run_capture_output = fabricio.run
run = partial(run, use_cache=use_cache)
run_capture_output = partial(run_capture_output, use_cache=use_cache)
run_ignore_errors = partial(run, ignore_errors=True)
image = six.text_type(self)
options = options or utils.Options()
options['tag'] = image
# default options
options.setdefault('pull', 1)
options.setdefault('force-rm', 1)
with utils.patch(fabricio, 'run', run_capture_output):
try:
old_parent_id = self.info['Parent']
except ImageNotFoundError:
old_parent_id = ''
run_ignore_errors(
'docker tag {image} {tag} '
'&& docker rmi {image}'.format(image=image, tag=self.temp_tag)
)
run(
'docker build {options} {build_path}'.format(
options=options,
build_path=build_path,
),
quiet=False,
)
run_ignore_errors('docker rmi {tag} {old_parent}'.format(
tag=self.temp_tag,
old_parent=old_parent_id,
))
| <filename>fabricio/docker/image.py
import json
import warnings
import docker.auth
import docker.utils
import six
from functools import partial
import fabricio
from fabricio import utils
class ImageError(fabricio.Error):
pass
class ImageNotFoundError(ImageError):
pass
class Registry(six.text_type):
def __new__(cls, value=None, *args, **kwargs):
if value is None:
return None
return super(Registry, cls).__new__(cls, value, *args, **kwargs)
def __init__(self, *args, **kwargs):
super(Registry, self).__init__(**kwargs)
self.host, _, port = self.partition(':')
self.port = port and int(port)
class Image(object):
name = None
tag = None
registry = None
use_digest = False
@property
def temp_tag(self):
return 'fabricio-temp-image:' + self.name.rsplit('/')[-1]
def __new__(cls, name=None, tag=None, registry=None):
if isinstance(name, Image):
return name[registry:tag]
return super(Image, cls).__new__(cls)
def __init__(self, name=None, tag=None, registry=None):
if name is not None and not isinstance(name, Image):
_registry, _name, _tag = self.parse_image_name(name)
self.name = _name
self.tag = tag or _tag or 'latest' # TODO 'latest' is unnecessary
self.registry = Registry(registry or _registry)
self.use_digest = not tag and '@' in name
self.field_names = {} # descriptor's cache
self.service = None
def __str__(self):
if self.service is not None:
image_id = getattr(self.service, 'image_id', None)
if image_id:
return image_id
return super(Image, self).__str__()
def __repr__(self):
if not self.name:
raise ImageError('image name is not set or empty')
tag_separator = '@' if self.use_digest else ':'
registry = self.registry and '{0}/'.format(self.registry) or ''
tag = self.tag and '{0}{1}'.format(tag_separator, self.tag) or ''
return '{registry}{name}{tag}'.format(
registry=registry,
name=self.name,
tag=tag,
)
def __bool__(self):
try:
return bool(repr(self))
except ImageError:
return False
def __nonzero__(self):
return self.__bool__()
def __get__(self, service, owner_cls):
if service is None:
return self
field_name = self.get_field_name(owner_cls)
if field_name not in service.__dict__:
image = service.__dict__[field_name] = self[:]
image.service = service
return service.__dict__[field_name]
def __set__(self, service, image):
field_name = self.get_field_name(type(service))
image = self.__class__(image)
image.service = service
service.__dict__[field_name] = image
def __getitem__(self, item):
if isinstance(item, slice):
registry, tag, account = item.start, item.stop, item.step
else:
registry, tag, account = None, item, None
use_digest = self.use_digest
# tag can override image registry, name and/or digest
_registry, _name, _tag = self.parse_image_name(tag)
if not _tag:
if _registry:
_tag = 'latest'
else:
_tag, _name = _name, None
if _tag:
use_digest = _name and tag and '@' in tag
registry = _registry or registry or self.registry
name = _name or account and self.name and '{account}/{name}'.format(
account=account,
name=self.name.split('/')[-1],
) or self.name
tag = _tag or tag or self.tag
if use_digest:
name = '{name}@{digest}'.format(name=name, digest=tag)
tag = None
return self.__class__(name=name, tag=tag, registry=registry)
def get_field_name(self, owner_cls):
field_name = self.field_names.get(owner_cls)
if field_name is None:
for attr in dir(owner_cls):
if getattr(owner_cls, attr) is self:
if field_name is not None:
raise ValueError(
'Same instance of Image used for more than one '
'attribute of class {cls}'.format(
cls=owner_cls.__name__,
)
)
self.field_names[owner_cls] = field_name = attr
return field_name
@staticmethod
def parse_image_name(image):
if not image:
return None, None, None
repository, tag = docker.utils.parse_repository_tag(image)
registry, name = docker.auth.resolve_repository_name(repository)
if registry == docker.auth.INDEX_NAME:
registry = None
return registry, name, tag
@property
def digest(self):
if not self.use_digest:
for repo_digest in self.info.get('RepoDigests', ()):
return repo_digest
raise ImageError('image has no digest')
return repr(self)
@utils.default_property
def info(self):
command = 'docker inspect --type image {image}'
info = fabricio.run(
command.format(image=self),
abort_exception=ImageNotFoundError,
)
return json.loads(info)[0]
def get_delete_callback(self, force=False):
command = 'docker rmi {force}{image}'
force = force and '--force ' or ''
return partial(
fabricio.run,
command.format(image=self, force=force),
ignore_errors=True,
)
def delete(self, force=False, ignore_errors=True):
delete_callback = self.get_delete_callback(force=force)
return delete_callback(ignore_errors=ignore_errors)
@classmethod
def make_container_options(
cls,
temporary=None,
name=None,
options=(),
):
return utils.Options(
options,
name=name,
rm=temporary,
tty=temporary,
interactive=temporary,
detach=temporary is not None and not temporary,
)
def run(
self,
command=None,
name=None,
temporary=True,
options=(),
quiet=True,
):
run_command = 'docker run {options} {image} {command}'
return fabricio.run(
run_command.format(
image=self,
command=command or '',
options=self.make_container_options(
temporary=temporary,
name=name,
options=options,
),
),
quiet=quiet,
)
def create(self, command=None, name=None, options=()): # pragma: no cover
warnings.warn('Image.create() is deprecated', DeprecationWarning)
warnings.warn(
'Image.create() is deprecated',
RuntimeWarning, stacklevel=2,
)
run_command = 'docker create {options} {image} {command}'.rstrip()
return fabricio.run(
run_command.format(
image=self,
command=command or '',
options=self.make_container_options(name=name, options=options),
),
)
def pull(self, local=False, use_cache=False, ignore_errors=False):
run = fabricio.local if local else fabricio.run
run = partial(run, use_cache=use_cache, ignore_errors=ignore_errors)
run_ignore_errors = partial(run, ignore_errors=True)
image = six.text_type(self)
run_ignore_errors(
'docker tag {image} {tag} '
'&& docker rmi {image}'.format(image=image, tag=self.temp_tag)
)
pull_result = run('docker pull ' + image, quiet=False)
if pull_result.succeeded:
run_ignore_errors('docker rmi {tag}'.format(tag=self.temp_tag))
def build(self, local=False, build_path='.', options=None, use_cache=False):
if local:
run = fabricio.local
run_capture_output = partial(run, capture=True)
else:
run = run_capture_output = fabricio.run
run = partial(run, use_cache=use_cache)
run_capture_output = partial(run_capture_output, use_cache=use_cache)
run_ignore_errors = partial(run, ignore_errors=True)
image = six.text_type(self)
options = options or utils.Options()
options['tag'] = image
# default options
options.setdefault('pull', 1)
options.setdefault('force-rm', 1)
with utils.patch(fabricio, 'run', run_capture_output):
try:
old_parent_id = self.info['Parent']
except ImageNotFoundError:
old_parent_id = ''
run_ignore_errors(
'docker tag {image} {tag} '
'&& docker rmi {image}'.format(image=image, tag=self.temp_tag)
)
run(
'docker build {options} {build_path}'.format(
options=options,
build_path=build_path,
),
quiet=False,
)
run_ignore_errors('docker rmi {tag} {old_parent}'.format(
tag=self.temp_tag,
old_parent=old_parent_id,
))
| en | 0.398863 | # TODO 'latest' is unnecessary # descriptor's cache # tag can override image registry, name and/or digest # pragma: no cover # default options | 2.291646 | 2 |
cbl/maybe_type.py | Commodoreprime/Command-Block-Assembly | 223 | 6620891 | from .containers import Parameter, Temporary, DelegatedWrite
from .native_type import NativeType, as_var
from .cbl_type import CBLType, CBLTypeInstance
from .struct_type import StructuredType
from .function_type import IntrinsicCallable
import cmd_ir.instructions as i
class MaybeType(NativeType):
def __init__(self):
self.type_map = {}
def instantiate(self, compiler, args):
assert len(args) == 1
real_t = args[0]
if real_t not in self.type_map:
t = MaybeWrappedType(real_t)
t.typename = self.typename + '<%s>' % real_t.typename
t.complete_type(compiler)
self.type_map[real_t] = t
return self.type_map[real_t]
class MaybeTypeInstance(CBLTypeInstance):
def __init__(self, var, valvar, func_members, func_properties):
super().__init__(func_members, func_properties)
self._var = var
self.__valvar = valvar
self.has_wrap = valvar is not None
def valvar(self):
# return _var if we are not wrapped
return self.__valvar if self.has_wrap else self._var
class MaybeWrappedType(CBLType):
def __init__(self, real_type):
self.real_type = real_type
super().__init__()
@property
def ir_type(self):
return i.VarType.nbt
def allocate(self, compiler, namehint):
var = compiler.create_var(namehint, self.ir_type)
path = i.VirtualString('.maybe')
subvar = i.NBTSubPath(var, path, self._wrap_type())
valvar = compiler.define(namehint + '_valvar', subvar)
# There is support for valvar being elided e.g. when we are
# an NBT property of a struct, use that property as valvar.
# But we don't test for this yet
return MaybeTypeInstance(var, valvar, self.get_func_members(),
self.get_func_properties())
def _wrap_type(self):
types = self.real_type.ir_types()
if len(types) == 1:
# take on the real underlying type if it's a single type
return types[0]
else:
# Otherwise we wrap it in NBT
return i.VarType.nbt
def as_variable(self, instance):
return instance._var
def complete_type(self, compiler):
bool = compiler.type('bool')
tparam = Parameter(self.real_type, 'value', False)
# Default constructor
ftype, func = self.add_constructor(compiler, (), True)
func.set_as_intrinsic(IntrinsicCallable(self._default_ctor))
# Constructor with value
ftype, func = self.add_constructor(compiler, (tparam,), True)
func.set_as_intrinsic(IntrinsicCallable(self.__copy_value_ctor))
# Set value
ftype, func = self.add_operator_member(compiler, '=', self, (tparam,),
True)
func.set_as_intrinsic(IntrinsicCallable(self.__set_value))
# Get value
ftype, func = self.add_function_member(compiler, 'get', self.real_type,
(), True, False)
func.set_as_intrinsic(IntrinsicCallable(self.__get_value))
# Test if empty
ftype, func = self.add_function_member(compiler, 'isEmpty', bool,
(), True, False)
func.set_as_intrinsic(IntrinsicCallable(self.__is_empty))
super().complete_type(compiler)
def __init(self, compiler, maybe):
if maybe.has_wrap:
holder = compiler.define('maybeholder', i.CreateNBTCompound())
compiler.add_insn(i.NBTAssign(maybe._var, holder))
def _default_ctor(self, compiler, container, args):
self.__init(compiler, args[0].value)
return Temporary(compiler.type('void'), None)
def __copy_value_ctor(self, compiler, container, args):
self.__init(compiler, args[0].value)
self.__set_value(compiler, container, args)
return Temporary(compiler.type('void'), None)
def __is_empty(self, compiler, container, args):
valvar = args[0].value.valvar()
bool = compiler.type('bool')
result = bool.allocate(compiler, 'empty')
compiler.add_insn(i.SetScore(result, 0))
exec = compiler.define('empty_test', i.CreateExec())
set_empty = compiler.create_block('set_empty')
set_empty.is_function = True
set_empty.add(i.SetScore(result, 1))
with compiler.compiletime():
compiler.add_insn(i.ExecUnlessNBTVar(exec, valvar))
compiler.add_insn(i.ExecRun(exec, set_empty))
return Temporary(bool, result)
def __create_wrap_holders(self, compiler, realvar, types):
for n, t in enumerate(types):
path = i.VirtualString('.v%d' % n)
subpath = i.NBTSubPath(realvar, path, t)
holder = compiler.define('maybehold_%d' % n, subpath)
yield holder
def __set_value(self, compiler, container, args):
valvar = args[0].value.valvar()
vcontainer = args[1]
vars = vcontainer.type.as_variables(vcontainer.value)
if len(vars) == 1:
# Use actual var if it's a single variable
realvar = vars[0]
else:
# Wrap vars into NBT var
nbtwrap = compiler.define('new_compound', i.CreateNBTCompound())
realvar = compiler.create_var('maybewrap', i.VarType.nbt)
compiler.add_insn(i.NBTAssign(realvar, nbtwrap))
types = [v.type for v in vars]
holders = self.__create_wrap_holders(compiler, realvar, types)
for holder, var in zip(holders, vars):
compiler.add_insn(i.SetScore(holder, var))
# TODO verify this works correctly
if isinstance(args[0], DelegatedWrite):
return args[0].write(compiler, realvar)
compiler.add_insn(i.SetScore(valvar, realvar))
return args[0] # this
def __get_value(self, compiler, container, args):
valvar = args[0].value.valvar()
types = self.real_type.ir_types()
if len(types) == 1:
vars = (valvar,)
else:
vars = self.__create_wrap_holders(compiler, valvar, types)
value = self.real_type.from_variables(compiler, vars)
return Temporary(self.real_type, value)
def _copy_impl(self, compiler, this, other):
if isinstance(this, DelegatedWrite):
return this.write(compiler, other)
compiler.add_insn(i.SetScore(this.value._var, other.value._var))
return other
| from .containers import Parameter, Temporary, DelegatedWrite
from .native_type import NativeType, as_var
from .cbl_type import CBLType, CBLTypeInstance
from .struct_type import StructuredType
from .function_type import IntrinsicCallable
import cmd_ir.instructions as i
class MaybeType(NativeType):
def __init__(self):
self.type_map = {}
def instantiate(self, compiler, args):
assert len(args) == 1
real_t = args[0]
if real_t not in self.type_map:
t = MaybeWrappedType(real_t)
t.typename = self.typename + '<%s>' % real_t.typename
t.complete_type(compiler)
self.type_map[real_t] = t
return self.type_map[real_t]
class MaybeTypeInstance(CBLTypeInstance):
def __init__(self, var, valvar, func_members, func_properties):
super().__init__(func_members, func_properties)
self._var = var
self.__valvar = valvar
self.has_wrap = valvar is not None
def valvar(self):
# return _var if we are not wrapped
return self.__valvar if self.has_wrap else self._var
class MaybeWrappedType(CBLType):
def __init__(self, real_type):
self.real_type = real_type
super().__init__()
@property
def ir_type(self):
return i.VarType.nbt
def allocate(self, compiler, namehint):
var = compiler.create_var(namehint, self.ir_type)
path = i.VirtualString('.maybe')
subvar = i.NBTSubPath(var, path, self._wrap_type())
valvar = compiler.define(namehint + '_valvar', subvar)
# There is support for valvar being elided e.g. when we are
# an NBT property of a struct, use that property as valvar.
# But we don't test for this yet
return MaybeTypeInstance(var, valvar, self.get_func_members(),
self.get_func_properties())
def _wrap_type(self):
types = self.real_type.ir_types()
if len(types) == 1:
# take on the real underlying type if it's a single type
return types[0]
else:
# Otherwise we wrap it in NBT
return i.VarType.nbt
def as_variable(self, instance):
return instance._var
def complete_type(self, compiler):
bool = compiler.type('bool')
tparam = Parameter(self.real_type, 'value', False)
# Default constructor
ftype, func = self.add_constructor(compiler, (), True)
func.set_as_intrinsic(IntrinsicCallable(self._default_ctor))
# Constructor with value
ftype, func = self.add_constructor(compiler, (tparam,), True)
func.set_as_intrinsic(IntrinsicCallable(self.__copy_value_ctor))
# Set value
ftype, func = self.add_operator_member(compiler, '=', self, (tparam,),
True)
func.set_as_intrinsic(IntrinsicCallable(self.__set_value))
# Get value
ftype, func = self.add_function_member(compiler, 'get', self.real_type,
(), True, False)
func.set_as_intrinsic(IntrinsicCallable(self.__get_value))
# Test if empty
ftype, func = self.add_function_member(compiler, 'isEmpty', bool,
(), True, False)
func.set_as_intrinsic(IntrinsicCallable(self.__is_empty))
super().complete_type(compiler)
def __init(self, compiler, maybe):
if maybe.has_wrap:
holder = compiler.define('maybeholder', i.CreateNBTCompound())
compiler.add_insn(i.NBTAssign(maybe._var, holder))
def _default_ctor(self, compiler, container, args):
self.__init(compiler, args[0].value)
return Temporary(compiler.type('void'), None)
def __copy_value_ctor(self, compiler, container, args):
self.__init(compiler, args[0].value)
self.__set_value(compiler, container, args)
return Temporary(compiler.type('void'), None)
def __is_empty(self, compiler, container, args):
valvar = args[0].value.valvar()
bool = compiler.type('bool')
result = bool.allocate(compiler, 'empty')
compiler.add_insn(i.SetScore(result, 0))
exec = compiler.define('empty_test', i.CreateExec())
set_empty = compiler.create_block('set_empty')
set_empty.is_function = True
set_empty.add(i.SetScore(result, 1))
with compiler.compiletime():
compiler.add_insn(i.ExecUnlessNBTVar(exec, valvar))
compiler.add_insn(i.ExecRun(exec, set_empty))
return Temporary(bool, result)
def __create_wrap_holders(self, compiler, realvar, types):
for n, t in enumerate(types):
path = i.VirtualString('.v%d' % n)
subpath = i.NBTSubPath(realvar, path, t)
holder = compiler.define('maybehold_%d' % n, subpath)
yield holder
def __set_value(self, compiler, container, args):
valvar = args[0].value.valvar()
vcontainer = args[1]
vars = vcontainer.type.as_variables(vcontainer.value)
if len(vars) == 1:
# Use actual var if it's a single variable
realvar = vars[0]
else:
# Wrap vars into NBT var
nbtwrap = compiler.define('new_compound', i.CreateNBTCompound())
realvar = compiler.create_var('maybewrap', i.VarType.nbt)
compiler.add_insn(i.NBTAssign(realvar, nbtwrap))
types = [v.type for v in vars]
holders = self.__create_wrap_holders(compiler, realvar, types)
for holder, var in zip(holders, vars):
compiler.add_insn(i.SetScore(holder, var))
# TODO verify this works correctly
if isinstance(args[0], DelegatedWrite):
return args[0].write(compiler, realvar)
compiler.add_insn(i.SetScore(valvar, realvar))
return args[0] # this
def __get_value(self, compiler, container, args):
valvar = args[0].value.valvar()
types = self.real_type.ir_types()
if len(types) == 1:
vars = (valvar,)
else:
vars = self.__create_wrap_holders(compiler, valvar, types)
value = self.real_type.from_variables(compiler, vars)
return Temporary(self.real_type, value)
def _copy_impl(self, compiler, this, other):
if isinstance(this, DelegatedWrite):
return this.write(compiler, other)
compiler.add_insn(i.SetScore(this.value._var, other.value._var))
return other
| en | 0.780529 | # return _var if we are not wrapped # There is support for valvar being elided e.g. when we are # an NBT property of a struct, use that property as valvar. # But we don't test for this yet # take on the real underlying type if it's a single type # Otherwise we wrap it in NBT # Default constructor # Constructor with value # Set value # Get value # Test if empty # Use actual var if it's a single variable # Wrap vars into NBT var # TODO verify this works correctly # this | 1.994265 | 2 |
bin/dbm/droplet_rise.py | ChrisBarker-NOAA/tamoc | 18 | 6620892 | <filename>bin/dbm/droplet_rise.py
"""
Insoluble fluid particles
=========================
Use the ``TAMOC`` ``DBM`` to specify an oil droplet that cannot dissolve
(e.g., a dead, heavy oil with negligible dissolution) and calculate all of its
properties in deepwater conditions.
In particular, this script demonstrates the methods:
* `dbm.InsolubleParticle.density`
* `dbm.InsolubleParticle.mass_by_diameter`
* `dbm.InsolubleParticle.diameter`
* `dbm.InsolubleParticle.particle_shape`
* `dbm.InsolubleParticle.slip_velocity`
* `dbm.InsolubleParticle.surface_area`
* `dbm.InsolubleParticle.heat_transfer`
"""
# <NAME>, July 2013, Texas A&M University <<EMAIL>>.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from tamoc import dbm
from tamoc import seawater
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Define the type of inert fluid particle
isfluid = True
iscompressible = False
gamma=29. # deg API
beta=0.0007 # Pa^(-1)
co=3.7e-9 # K^(-1)
# Create a DBM InsolubleParticle object for this simple oil
oil = dbm.InsolubleParticle(isfluid, iscompressible, gamma=gamma,
beta=beta, co=co)
# Specify some generic deepwater ocean conditions
T = 273.15 + 4.
P = 150.0 * 1.0e5
Ta = 273.15 + 4.0
Sa = 34.5
L = 1500;
# Compute the rise velocity for several droplet sizes
de = np.logspace(np.log10(0.00010), np.log10(0.05))
t = np.zeros(len(de))
for i in range(len(t)):
m = oil.mass_by_diameter(de[i], T, P, Sa, Ta)
us = oil.slip_velocity(m, T, P, Sa, Ta)
t[i] = L / us
plt.figure(1)
plt.clf()
plt.show()
ax1 = plt.subplot(111)
ax1.loglog(de, t/60/60, '.-')
ax1.set_xlabel('Diameter (m)')
ax1.set_ylabel('Rise Time (hrs)')
plt.draw()
| <filename>bin/dbm/droplet_rise.py
"""
Insoluble fluid particles
=========================
Use the ``TAMOC`` ``DBM`` to specify an oil droplet that cannot dissolve
(e.g., a dead, heavy oil with negligible dissolution) and calculate all of its
properties in deepwater conditions.
In particular, this script demonstrates the methods:
* `dbm.InsolubleParticle.density`
* `dbm.InsolubleParticle.mass_by_diameter`
* `dbm.InsolubleParticle.diameter`
* `dbm.InsolubleParticle.particle_shape`
* `dbm.InsolubleParticle.slip_velocity`
* `dbm.InsolubleParticle.surface_area`
* `dbm.InsolubleParticle.heat_transfer`
"""
# <NAME>, July 2013, Texas A&M University <<EMAIL>>.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from tamoc import dbm
from tamoc import seawater
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Define the type of inert fluid particle
isfluid = True
iscompressible = False
gamma=29. # deg API
beta=0.0007 # Pa^(-1)
co=3.7e-9 # K^(-1)
# Create a DBM InsolubleParticle object for this simple oil
oil = dbm.InsolubleParticle(isfluid, iscompressible, gamma=gamma,
beta=beta, co=co)
# Specify some generic deepwater ocean conditions
T = 273.15 + 4.
P = 150.0 * 1.0e5
Ta = 273.15 + 4.0
Sa = 34.5
L = 1500;
# Compute the rise velocity for several droplet sizes
de = np.logspace(np.log10(0.00010), np.log10(0.05))
t = np.zeros(len(de))
for i in range(len(t)):
m = oil.mass_by_diameter(de[i], T, P, Sa, Ta)
us = oil.slip_velocity(m, T, P, Sa, Ta)
t[i] = L / us
plt.figure(1)
plt.clf()
plt.show()
ax1 = plt.subplot(111)
ax1.loglog(de, t/60/60, '.-')
ax1.set_xlabel('Diameter (m)')
ax1.set_ylabel('Rise Time (hrs)')
plt.draw()
| en | 0.530127 | Insoluble fluid particles ========================= Use the ``TAMOC`` ``DBM`` to specify an oil droplet that cannot dissolve (e.g., a dead, heavy oil with negligible dissolution) and calculate all of its properties in deepwater conditions. In particular, this script demonstrates the methods: * `dbm.InsolubleParticle.density` * `dbm.InsolubleParticle.mass_by_diameter` * `dbm.InsolubleParticle.diameter` * `dbm.InsolubleParticle.particle_shape` * `dbm.InsolubleParticle.slip_velocity` * `dbm.InsolubleParticle.surface_area` * `dbm.InsolubleParticle.heat_transfer` # <NAME>, July 2013, Texas A&M University <<EMAIL>>. # Define the type of inert fluid particle # deg API # Pa^(-1) # K^(-1) # Create a DBM InsolubleParticle object for this simple oil # Specify some generic deepwater ocean conditions # Compute the rise velocity for several droplet sizes | 2.381535 | 2 |
tests/test_RNA_folding.py | wlqqlz/rna-folding | 1 | 6620893 | <reponame>wlqqlz/rna-folding<gh_stars>1-10
# Copyright 2021 D-Wave Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import unittest
import RNA_folding
from dwave.system import LeapHybridCQMSampler
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class TestSmoke(unittest.TestCase):
@unittest.skipIf(os.getenv('SKIP_INT_TESTS'), "Skipping integration test.")
def test_smoke(self):
"""Run RNA_folding.py and check that nothing crashes"""
demo_file = os.path.join(project_dir, 'RNA_folding.py')
subprocess.check_output([sys.executable, demo_file])
class TestRNA_folding(unittest.TestCase):
def test_read_file_to_stem_dict(self):
"""Test ability to read file and create appropriate stem dict"""
file = os.path.join(project_dir, 'RNA_text_files/simple.txt')
bond_matrix = RNA_folding.text_to_matrix(file, 2)
stem_dict = RNA_folding.make_stem_dict(bond_matrix, 3, 2)
self.assertEqual(stem_dict, {(2, 5, 12, 15): [(2, 4, 13, 15), (2, 5, 12, 15), (3, 5, 12, 14)]})
def test_build_cqm(self):
"""Test build_CQM creating correct constraints and variables"""
stem_dict = {
(1, 3, 13, 15): [(1, 3, 13, 15)],
(6, 10, 20, 24): [(6, 8, 22, 24), (6, 9, 21, 24), (6, 10, 20, 24), (7, 9, 21, 23), (7, 10, 20, 23),
(8, 10, 20, 22)],
(7, 9, 14, 16): [(7, 9, 14, 16)],
(13, 15, 23, 25): [(13, 15, 23, 25)]
}
cqm = RNA_folding.build_cqm(stem_dict, 3, 0.3)
self.assertEqual(cqm.variables,
[(1, 3, 13, 15), (6, 8, 22, 24), (6, 9, 21, 24), (6, 10, 20, 24), (7, 9, 21, 23),
(7, 10, 20, 23),
(8, 10, 20, 22), (7, 9, 14, 16), (13, 15, 23, 25), 'Null:(6, 10, 20, 24)']
)
self.assertEqual(cqm.objective.linear,
{(1, 3, 13, 15): -9.0, (6, 8, 22, 24): -9.0, (6, 9, 21, 24): -16.0, (6, 10, 20, 24): -25.0,
(7, 9, 21, 23): -9.0, (7, 10, 20, 23): -16.0, (8, 10, 20, 22): -9.0, (7, 9, 14, 16): -9.0,
(13, 15, 23, 25): -9.0, 'Null:(6, 10, 20, 24)': 0.0}
)
self.assertEqual(cqm.objective.quadratic,
{((6, 8, 22, 24), (1, 3, 13, 15)): 2.6999999999999997,
((6, 9, 21, 24), (1, 3, 13, 15)): 3.5999999999999996, ((6, 10, 20, 24), (1, 3, 13, 15)): 4.5,
((7, 9, 21, 23), (1, 3, 13, 15)): 2.6999999999999997,
((7, 10, 20, 23), (1, 3, 13, 15)): 3.5999999999999996,
((8, 10, 20, 22), (1, 3, 13, 15)): 2.6999999999999997}
)
self.assertEqual(len(cqm.constraints), 15)
def test_small_case(self):
"""Test solution quality of small case."""
file = os.path.join(project_dir, 'RNA_text_files/simple_pseudo.txt')
bond_matrix = RNA_folding.text_to_matrix(file, 2)
stem_dict = RNA_folding.make_stem_dict(bond_matrix, 3, 2)
cqm = RNA_folding.build_cqm(stem_dict, 3, 0.3)
sampler = LeapHybridCQMSampler()
sample_set = sampler.sample_cqm(cqm)
stems = RNA_folding.process_cqm_solution(sample_set)
self.assertEqual(set(stems), {(1, 3, 13, 15), (6, 10, 20, 24)})
| # Copyright 2021 D-Wave Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import unittest
import RNA_folding
from dwave.system import LeapHybridCQMSampler
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class TestSmoke(unittest.TestCase):
@unittest.skipIf(os.getenv('SKIP_INT_TESTS'), "Skipping integration test.")
def test_smoke(self):
"""Run RNA_folding.py and check that nothing crashes"""
demo_file = os.path.join(project_dir, 'RNA_folding.py')
subprocess.check_output([sys.executable, demo_file])
class TestRNA_folding(unittest.TestCase):
def test_read_file_to_stem_dict(self):
"""Test ability to read file and create appropriate stem dict"""
file = os.path.join(project_dir, 'RNA_text_files/simple.txt')
bond_matrix = RNA_folding.text_to_matrix(file, 2)
stem_dict = RNA_folding.make_stem_dict(bond_matrix, 3, 2)
self.assertEqual(stem_dict, {(2, 5, 12, 15): [(2, 4, 13, 15), (2, 5, 12, 15), (3, 5, 12, 14)]})
def test_build_cqm(self):
"""Test build_CQM creating correct constraints and variables"""
stem_dict = {
(1, 3, 13, 15): [(1, 3, 13, 15)],
(6, 10, 20, 24): [(6, 8, 22, 24), (6, 9, 21, 24), (6, 10, 20, 24), (7, 9, 21, 23), (7, 10, 20, 23),
(8, 10, 20, 22)],
(7, 9, 14, 16): [(7, 9, 14, 16)],
(13, 15, 23, 25): [(13, 15, 23, 25)]
}
cqm = RNA_folding.build_cqm(stem_dict, 3, 0.3)
self.assertEqual(cqm.variables,
[(1, 3, 13, 15), (6, 8, 22, 24), (6, 9, 21, 24), (6, 10, 20, 24), (7, 9, 21, 23),
(7, 10, 20, 23),
(8, 10, 20, 22), (7, 9, 14, 16), (13, 15, 23, 25), 'Null:(6, 10, 20, 24)']
)
self.assertEqual(cqm.objective.linear,
{(1, 3, 13, 15): -9.0, (6, 8, 22, 24): -9.0, (6, 9, 21, 24): -16.0, (6, 10, 20, 24): -25.0,
(7, 9, 21, 23): -9.0, (7, 10, 20, 23): -16.0, (8, 10, 20, 22): -9.0, (7, 9, 14, 16): -9.0,
(13, 15, 23, 25): -9.0, 'Null:(6, 10, 20, 24)': 0.0}
)
self.assertEqual(cqm.objective.quadratic,
{((6, 8, 22, 24), (1, 3, 13, 15)): 2.6999999999999997,
((6, 9, 21, 24), (1, 3, 13, 15)): 3.5999999999999996, ((6, 10, 20, 24), (1, 3, 13, 15)): 4.5,
((7, 9, 21, 23), (1, 3, 13, 15)): 2.6999999999999997,
((7, 10, 20, 23), (1, 3, 13, 15)): 3.5999999999999996,
((8, 10, 20, 22), (1, 3, 13, 15)): 2.6999999999999997}
)
self.assertEqual(len(cqm.constraints), 15)
def test_small_case(self):
"""Test solution quality of small case."""
file = os.path.join(project_dir, 'RNA_text_files/simple_pseudo.txt')
bond_matrix = RNA_folding.text_to_matrix(file, 2)
stem_dict = RNA_folding.make_stem_dict(bond_matrix, 3, 2)
cqm = RNA_folding.build_cqm(stem_dict, 3, 0.3)
sampler = LeapHybridCQMSampler()
sample_set = sampler.sample_cqm(cqm)
stems = RNA_folding.process_cqm_solution(sample_set)
self.assertEqual(set(stems), {(1, 3, 13, 15), (6, 10, 20, 24)}) | en | 0.852665 | # Copyright 2021 D-Wave Systems # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Run RNA_folding.py and check that nothing crashes Test ability to read file and create appropriate stem dict Test build_CQM creating correct constraints and variables Test solution quality of small case. | 2.149547 | 2 |
webapp/videobank/views.py | jtallieu/djambalaya | 0 | 6620894 | <filename>webapp/videobank/views.py
# Create your views here.
from django.shortcuts import render
import logging
log = logging.getLogger('videobank.webapp')
def main(request):
log.info('Handling index view: {}'.format(request))
return render(request, "vms/home.html")
| <filename>webapp/videobank/views.py
# Create your views here.
from django.shortcuts import render
import logging
log = logging.getLogger('videobank.webapp')
def main(request):
log.info('Handling index view: {}'.format(request))
return render(request, "vms/home.html")
| en | 0.968116 | # Create your views here. | 1.746537 | 2 |
template.py | randcodeclips/cli_template | 0 | 6620895 | import sys
def helloWorld(args, params):
to = 'World'
if params['variables']['to'] != '':
to = params['variables']['to']
text = 'Hello, {}'.format(to)
if params['toggle']['excited']:
text += '!'
print(text)
commands = {
'hello': {
'function': helloWorld,
'comment': 'Hello world command',
'variables': {
'to': {
'arg_offset': 0,
'name': 'To',
'comment': 'Specify whom you\'re saying your helloes to'
},
},
'toggle': {
'excited': {
'call': ['-e', '--excited'],
'comment': 'When selected adds exclamation point to the end'
}
}
},
}
def wrongUsage(args):
print(f'Error: Command missused, try {args[0]} [command] -h',
f'or {args[0]} [command] --help')
exit()
def fetchSyntax(placeholder_name, prop_dict):
tmp_name, tmp_req, tmp_comment = [placeholder_name, ['[ ', ' ]'], '']
if 'name' in prop_dict:
tmp_name = prop_dict['name']
elif 'call' in prop_dict:
tmp_name = " | ".join(prop_dict["call"])
if 'required' in prop_dict and prop_dict['required']:
tmp_req = ['', '']
if 'comment' in prop_dict:
tmp_comment = prop_dict['comment']
return [tmp_name, tmp_req, tmp_comment]
def helpParse(com, args):
helper = com
if args[1] not in ['-h' '--help'] and args[1] in com.keys():
helper = {args[1]: com[args[1]]}
else:
print(f'''For more details about command use: \
{args[0]} <command> --help''')
for x, y in helper.items():
tmp_comment, prp_m = ['', {'variables': 'Values', 'toggle': 'Options'}]
if 'comment' in y:
tmp_comment = y['comment']
props = {a: y[z] for z, a in prp_m.items() if z in y}
if args[1] in com.keys():
for gen in ['Values', 'Options']:
if gen not in props:
props[gen] = {}
order = {str(a['arg_offset']): z
for z, a in props['Values'].items() if 'arg_offset' in a}
order_keys, ordered, com_args = [list(order.keys()), [], args[1]]
for order_suposed_index in range(len(order_keys)):
if order_suposed_index != int(order_keys[order_suposed_index]):
raise IndexError(
'Missing index, check your command dictionary')
ordered.append(
props['Values'][order[order_keys[order_suposed_index]]])
ordered.extend([y['toggle'][com_tg]
for com_tg in props['Options'].keys()])
for com_data in ordered:
call, optional, _ = fetchSyntax('', com_data)
com_args += ' {}{}{}'.format(optional[0], call, optional[1])
print(f'Command reference: {args[0]} {com_args}')
print('\n{:<4}{:<32}{}'.format('', x, tmp_comment))
for z, a in props.items():
print('{:<8}{:<40}'.format('', z + ':'))
for b, c in a.items():
tmp_name, tmp_req, tmp_cmn = fetchSyntax(b, c)
print('{:<14}{:<26}{}'
.format('', tmp_req[0] + tmp_name + tmp_req[1], tmp_cmn))
if any(r in sys.argv for r in ['--help', '-h']):
helpParse(commands, sys.argv)
elif len(sys.argv) > 1 and sys.argv[1] in commands.keys():
params = {}
if 'toggle' in commands[sys.argv[1]].keys() \
and len(commands[sys.argv[1]]['toggle'].keys()) > 0:
params['toggle'] = {}
calls = {x: commands[sys.argv[1]]['toggle'][x]['call']
for x in commands[sys.argv[1]]['toggle']}
for x in calls.keys():
params['toggle'][x] = any(y in sys.argv for y in calls[x])
if 'variables' in commands[sys.argv[1]].keys() \
and len(commands[sys.argv[1]]['variables'].keys()) > 0:
params['variables'], mixed_values = [{}, []]
variables = [x for x in commands[sys.argv[1]]['variables'].keys()]
for variable in variables:
var, val = [commands[sys.argv[1]]['variables'][variable], '']
if 'arg_offset' in var.keys() \
and len(sys.argv) > (2 + var['arg_offset']):
arg_set, tgl_d = [2 + var['arg_offset'],
commands[sys.argv[1]]['toggle'].items()]
toggle_calls = [toggler for _, com_data in tgl_d
for toggler in com_data['call']]
if sys.argv[arg_set] not in toggle_calls:
val = sys.argv[2 + var['arg_offset']]
if 'call' in var.keys() and type(var['call']) == list:
pre_val = [arg for call in var['call']
for arg in sys.argv if call in arg]
if len(pre_val) > 0:
val = (pre_val[0].split('='))[1]
if 'required' in var and var['required'] and val == '':
wrongUsage(sys.argv)
else:
params['variables'][variable] = val
commands[sys.argv[1]]['function'](sys.argv, params)
else:
wrongUsage(sys.argv)
exit()
| import sys
def helloWorld(args, params):
to = 'World'
if params['variables']['to'] != '':
to = params['variables']['to']
text = 'Hello, {}'.format(to)
if params['toggle']['excited']:
text += '!'
print(text)
commands = {
'hello': {
'function': helloWorld,
'comment': 'Hello world command',
'variables': {
'to': {
'arg_offset': 0,
'name': 'To',
'comment': 'Specify whom you\'re saying your helloes to'
},
},
'toggle': {
'excited': {
'call': ['-e', '--excited'],
'comment': 'When selected adds exclamation point to the end'
}
}
},
}
def wrongUsage(args):
print(f'Error: Command missused, try {args[0]} [command] -h',
f'or {args[0]} [command] --help')
exit()
def fetchSyntax(placeholder_name, prop_dict):
tmp_name, tmp_req, tmp_comment = [placeholder_name, ['[ ', ' ]'], '']
if 'name' in prop_dict:
tmp_name = prop_dict['name']
elif 'call' in prop_dict:
tmp_name = " | ".join(prop_dict["call"])
if 'required' in prop_dict and prop_dict['required']:
tmp_req = ['', '']
if 'comment' in prop_dict:
tmp_comment = prop_dict['comment']
return [tmp_name, tmp_req, tmp_comment]
def helpParse(com, args):
helper = com
if args[1] not in ['-h' '--help'] and args[1] in com.keys():
helper = {args[1]: com[args[1]]}
else:
print(f'''For more details about command use: \
{args[0]} <command> --help''')
for x, y in helper.items():
tmp_comment, prp_m = ['', {'variables': 'Values', 'toggle': 'Options'}]
if 'comment' in y:
tmp_comment = y['comment']
props = {a: y[z] for z, a in prp_m.items() if z in y}
if args[1] in com.keys():
for gen in ['Values', 'Options']:
if gen not in props:
props[gen] = {}
order = {str(a['arg_offset']): z
for z, a in props['Values'].items() if 'arg_offset' in a}
order_keys, ordered, com_args = [list(order.keys()), [], args[1]]
for order_suposed_index in range(len(order_keys)):
if order_suposed_index != int(order_keys[order_suposed_index]):
raise IndexError(
'Missing index, check your command dictionary')
ordered.append(
props['Values'][order[order_keys[order_suposed_index]]])
ordered.extend([y['toggle'][com_tg]
for com_tg in props['Options'].keys()])
for com_data in ordered:
call, optional, _ = fetchSyntax('', com_data)
com_args += ' {}{}{}'.format(optional[0], call, optional[1])
print(f'Command reference: {args[0]} {com_args}')
print('\n{:<4}{:<32}{}'.format('', x, tmp_comment))
for z, a in props.items():
print('{:<8}{:<40}'.format('', z + ':'))
for b, c in a.items():
tmp_name, tmp_req, tmp_cmn = fetchSyntax(b, c)
print('{:<14}{:<26}{}'
.format('', tmp_req[0] + tmp_name + tmp_req[1], tmp_cmn))
if any(r in sys.argv for r in ['--help', '-h']):
helpParse(commands, sys.argv)
elif len(sys.argv) > 1 and sys.argv[1] in commands.keys():
params = {}
if 'toggle' in commands[sys.argv[1]].keys() \
and len(commands[sys.argv[1]]['toggle'].keys()) > 0:
params['toggle'] = {}
calls = {x: commands[sys.argv[1]]['toggle'][x]['call']
for x in commands[sys.argv[1]]['toggle']}
for x in calls.keys():
params['toggle'][x] = any(y in sys.argv for y in calls[x])
if 'variables' in commands[sys.argv[1]].keys() \
and len(commands[sys.argv[1]]['variables'].keys()) > 0:
params['variables'], mixed_values = [{}, []]
variables = [x for x in commands[sys.argv[1]]['variables'].keys()]
for variable in variables:
var, val = [commands[sys.argv[1]]['variables'][variable], '']
if 'arg_offset' in var.keys() \
and len(sys.argv) > (2 + var['arg_offset']):
arg_set, tgl_d = [2 + var['arg_offset'],
commands[sys.argv[1]]['toggle'].items()]
toggle_calls = [toggler for _, com_data in tgl_d
for toggler in com_data['call']]
if sys.argv[arg_set] not in toggle_calls:
val = sys.argv[2 + var['arg_offset']]
if 'call' in var.keys() and type(var['call']) == list:
pre_val = [arg for call in var['call']
for arg in sys.argv if call in arg]
if len(pre_val) > 0:
val = (pre_val[0].split('='))[1]
if 'required' in var and var['required'] and val == '':
wrongUsage(sys.argv)
else:
params['variables'][variable] = val
commands[sys.argv[1]]['function'](sys.argv, params)
else:
wrongUsage(sys.argv)
exit()
| en | 0.471528 | For more details about command use: \ {args[0]} <command> --help | 3.014738 | 3 |
programming/leetcode/may_challenge/JewelsAndStonesAlternate.py | vamsitallapudi/Coderefer-Python-Projects | 1 | 6620896 | <filename>programming/leetcode/may_challenge/JewelsAndStonesAlternate.py
print("aA".count)
| <filename>programming/leetcode/may_challenge/JewelsAndStonesAlternate.py
print("aA".count)
| none | 1 | 1.812582 | 2 | |
tests/utils.py | consbio/python-databasin | 2 | 6620897 | import datetime
import dateutil.parser
from dateutil.tz import tzlocal
from django.core.signing import base64_hmac
from django.utils.crypto import constant_time_compare
def make_api_key_callback(response, key):
class AuthenticationError(Exception):
pass
def callback(request, context):
if not all(h in request.headers for h in ('x-api-user', 'x-api-time', 'x-api-signature')):
context.status_code = 401
raise AuthenticationError('API headers are missing')
request_time = dateutil.parser.parse(request.headers['x-api-time'])
if request_time > datetime.datetime.now(tzlocal()) + datetime.timedelta(minutes=5):
raise AuthenticationError('API request date is too old')
try:
salt, signature = request.headers['x-api-signature'].split(b':', 1)
signature = signature.strip(b'=')
except ValueError:
raise AuthenticationError('API signature is malformed')
test_signature = base64_hmac(salt, request.headers['x-api-time'], key)
if constant_time_compare(test_signature, signature):
context.status_code = 200
return response
context.status_code = 401
raise AuthenticationError('Key signature is bad ({} != {})'.format(signature, test_signature))
return callback
| import datetime
import dateutil.parser
from dateutil.tz import tzlocal
from django.core.signing import base64_hmac
from django.utils.crypto import constant_time_compare
def make_api_key_callback(response, key):
class AuthenticationError(Exception):
pass
def callback(request, context):
if not all(h in request.headers for h in ('x-api-user', 'x-api-time', 'x-api-signature')):
context.status_code = 401
raise AuthenticationError('API headers are missing')
request_time = dateutil.parser.parse(request.headers['x-api-time'])
if request_time > datetime.datetime.now(tzlocal()) + datetime.timedelta(minutes=5):
raise AuthenticationError('API request date is too old')
try:
salt, signature = request.headers['x-api-signature'].split(b':', 1)
signature = signature.strip(b'=')
except ValueError:
raise AuthenticationError('API signature is malformed')
test_signature = base64_hmac(salt, request.headers['x-api-time'], key)
if constant_time_compare(test_signature, signature):
context.status_code = 200
return response
context.status_code = 401
raise AuthenticationError('Key signature is bad ({} != {})'.format(signature, test_signature))
return callback
| none | 1 | 2.442657 | 2 | |
tubee/routes/api_subscription.py | tomy0000000/Tubee | 8 | 6620898 | <filename>tubee/routes/api_subscription.py
from flask import Blueprint, abort, jsonify
from flask_login import current_user, login_required
from tubee.forms import SubscriptionForm, SubscriptionTagForm
api_subscription_blueprint = Blueprint("api_subscription", __name__)
@api_subscription_blueprint.route("/add", methods=["POST"])
@login_required
def add():
"""Add a new subscription"""
form = SubscriptionForm()
if not form.validate_on_submit():
abort(403)
results = current_user.subscribe_to(form.channel_id.data)
response = {"success": results}
return jsonify(response)
@api_subscription_blueprint.route("/remove", methods=["POST"])
@login_required
def remove():
"""Remove a new subscription"""
form = SubscriptionForm(channel_id_hidden=True)
if not form.validate_on_submit():
abort(403)
results = current_user.unbsubscribe(form.channel_id.data)
response = {"success": results}
return jsonify(response)
@api_subscription_blueprint.route("/tag", methods=["POST"])
@login_required
def tag():
"""Add a tag to subscription"""
form = SubscriptionTagForm()
if not form.validate_on_submit():
abort(403)
subscription = current_user.subscriptions.filter_by(
channel_id=form.channel_id.data
).first_or_404()
response = subscription.tag(form.tag_name.data)
return jsonify(str(response))
@api_subscription_blueprint.route("/untag", methods=["POST"])
@login_required
def untag():
"""Remove a tag from subscription"""
form = SubscriptionTagForm(tag_name_hidden=True)
if not form.validate_on_submit():
abort(403)
subscription = current_user.subscriptions.filter_by(
channel_id=form.channel_id.data
).first_or_404()
tag = current_user.tags.filter_by(name=form.tag_name.data).first_or_404()
results = subscription.untag(tag.id)
response = {"success": results}
return jsonify(response)
| <filename>tubee/routes/api_subscription.py
from flask import Blueprint, abort, jsonify
from flask_login import current_user, login_required
from tubee.forms import SubscriptionForm, SubscriptionTagForm
api_subscription_blueprint = Blueprint("api_subscription", __name__)
@api_subscription_blueprint.route("/add", methods=["POST"])
@login_required
def add():
"""Add a new subscription"""
form = SubscriptionForm()
if not form.validate_on_submit():
abort(403)
results = current_user.subscribe_to(form.channel_id.data)
response = {"success": results}
return jsonify(response)
@api_subscription_blueprint.route("/remove", methods=["POST"])
@login_required
def remove():
"""Remove a new subscription"""
form = SubscriptionForm(channel_id_hidden=True)
if not form.validate_on_submit():
abort(403)
results = current_user.unbsubscribe(form.channel_id.data)
response = {"success": results}
return jsonify(response)
@api_subscription_blueprint.route("/tag", methods=["POST"])
@login_required
def tag():
"""Add a tag to subscription"""
form = SubscriptionTagForm()
if not form.validate_on_submit():
abort(403)
subscription = current_user.subscriptions.filter_by(
channel_id=form.channel_id.data
).first_or_404()
response = subscription.tag(form.tag_name.data)
return jsonify(str(response))
@api_subscription_blueprint.route("/untag", methods=["POST"])
@login_required
def untag():
"""Remove a tag from subscription"""
form = SubscriptionTagForm(tag_name_hidden=True)
if not form.validate_on_submit():
abort(403)
subscription = current_user.subscriptions.filter_by(
channel_id=form.channel_id.data
).first_or_404()
tag = current_user.tags.filter_by(name=form.tag_name.data).first_or_404()
results = subscription.untag(tag.id)
response = {"success": results}
return jsonify(response)
| en | 0.643304 | Add a new subscription Remove a new subscription Add a tag to subscription Remove a tag from subscription | 2.462349 | 2 |
echolab2/instruments/util/tag_data.py | nlauffenburger/pyEcholab | 20 | 6620899 | <reponame>nlauffenburger/pyEcholab
# coding=utf-8
# National Oceanic and Atmospheric Administration (NOAA)
# Alaskan Fisheries Science Center (AFSC)
# Resource Assessment and Conservation Engineering (RACE)
# Midwater Assessment and Conservation Engineering (MACE)
# THIS SOFTWARE AND ITS DOCUMENTATION ARE CONSIDERED TO BE IN THE PUBLIC DOMAIN
# AND THUS ARE AVAILABLE FOR UNRESTRICTED PUBLIC USE. THEY ARE FURNISHED "AS IS."
# THE AUTHORS, THE UNITED STATES GOVERNMENT, ITS INSTRUMENTALITIES, OFFICERS,
# EMPLOYEES, AND AGENTS MAKE NO WARRANTY, EXPRESS OR IMPLIED, AS TO THE USEFULNESS
# OF THE SOFTWARE AND DOCUMENTATION FOR ANY PURPOSE. THEY ASSUME NO RESPONSIBILITY
# (1) FOR THE USE OF THE SOFTWARE AND DOCUMENTATION; OR (2) TO PROVIDE TECHNICAL
# SUPPORT TO USERS.
class TAGData(object):
'''
The TAGData class provides storage for the TAG0, aka annotations datagrams
in Simrad .raw files.
'''
def __init__(self, file):
# store the annotation text as a list of dicts in the form {'time':0, 'text':''}
self.annotations = []
def add_datagram(self, time, text):
'''
add_datagram adds a TAG0 datagram to the
time is a datetime object
text is a string containing the annotation text
'''
# add the raw NMEA datagram
self.annotations.append({'time':time, 'text':text})
| # coding=utf-8
# National Oceanic and Atmospheric Administration (NOAA)
# Alaskan Fisheries Science Center (AFSC)
# Resource Assessment and Conservation Engineering (RACE)
# Midwater Assessment and Conservation Engineering (MACE)
# THIS SOFTWARE AND ITS DOCUMENTATION ARE CONSIDERED TO BE IN THE PUBLIC DOMAIN
# AND THUS ARE AVAILABLE FOR UNRESTRICTED PUBLIC USE. THEY ARE FURNISHED "AS IS."
# THE AUTHORS, THE UNITED STATES GOVERNMENT, ITS INSTRUMENTALITIES, OFFICERS,
# EMPLOYEES, AND AGENTS MAKE NO WARRANTY, EXPRESS OR IMPLIED, AS TO THE USEFULNESS
# OF THE SOFTWARE AND DOCUMENTATION FOR ANY PURPOSE. THEY ASSUME NO RESPONSIBILITY
# (1) FOR THE USE OF THE SOFTWARE AND DOCUMENTATION; OR (2) TO PROVIDE TECHNICAL
# SUPPORT TO USERS.
class TAGData(object):
'''
The TAGData class provides storage for the TAG0, aka annotations datagrams
in Simrad .raw files.
'''
def __init__(self, file):
# store the annotation text as a list of dicts in the form {'time':0, 'text':''}
self.annotations = []
def add_datagram(self, time, text):
'''
add_datagram adds a TAG0 datagram to the
time is a datetime object
text is a string containing the annotation text
'''
# add the raw NMEA datagram
self.annotations.append({'time':time, 'text':text}) | en | 0.552099 | # coding=utf-8 # National Oceanic and Atmospheric Administration (NOAA) # Alaskan Fisheries Science Center (AFSC) # Resource Assessment and Conservation Engineering (RACE) # Midwater Assessment and Conservation Engineering (MACE) # THIS SOFTWARE AND ITS DOCUMENTATION ARE CONSIDERED TO BE IN THE PUBLIC DOMAIN # AND THUS ARE AVAILABLE FOR UNRESTRICTED PUBLIC USE. THEY ARE FURNISHED "AS IS." # THE AUTHORS, THE UNITED STATES GOVERNMENT, ITS INSTRUMENTALITIES, OFFICERS, # EMPLOYEES, AND AGENTS MAKE NO WARRANTY, EXPRESS OR IMPLIED, AS TO THE USEFULNESS # OF THE SOFTWARE AND DOCUMENTATION FOR ANY PURPOSE. THEY ASSUME NO RESPONSIBILITY # (1) FOR THE USE OF THE SOFTWARE AND DOCUMENTATION; OR (2) TO PROVIDE TECHNICAL # SUPPORT TO USERS. The TAGData class provides storage for the TAG0, aka annotations datagrams in Simrad .raw files. # store the annotation text as a list of dicts in the form {'time':0, 'text':''} add_datagram adds a TAG0 datagram to the time is a datetime object text is a string containing the annotation text # add the raw NMEA datagram | 2.720488 | 3 |
topobank/manager/migrations/0025_alter_topography_instrument_parameters.py | ContactEngineering/TopoBank | 3 | 6620900 | <gh_stars>1-10
# Generated by Django 3.2.7 on 2021-09-17 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manager', '0024_auto_20210804_1426'),
]
operations = [
migrations.AlterField(
model_name='topography',
name='instrument_parameters',
field=models.JSONField(blank=True, default=dict),
),
]
| # Generated by Django 3.2.7 on 2021-09-17 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manager', '0024_auto_20210804_1426'),
]
operations = [
migrations.AlterField(
model_name='topography',
name='instrument_parameters',
field=models.JSONField(blank=True, default=dict),
),
] | en | 0.826268 | # Generated by Django 3.2.7 on 2021-09-17 14:30 | 1.401939 | 1 |
svbrdf/module_svbrdf.py | ywjleft/SVBRDF_from_Video | 1 | 6620901 | import tensorflow as tf
import numpy as np
#Convolution implementation
def conv_down(batch_input, out_channels, stride=2, filterSize=3, initScale = 0.02, useXavier=False, paddingSize = 1, useBias=False, normKernel=True):
with tf.variable_scope("conv"):
in_height, in_width, in_channels = [batch_input.get_shape()[1], batch_input.get_shape()[2], int(batch_input.get_shape()[-1])]
filter = tf.get_variable("filter", [filterSize, filterSize, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, np.sqrt(2.0/(int(in_channels) + int(out_channels))) * initScale) if useXavier else tf.random_normal_initializer(0, initScale))
if normKernel:
d = tf.rsqrt(tf.reduce_sum(tf.square(filter), axis=[0,1,2]) + 1e-8) # [BO] Scaling factor.
filter *= d # [BkkIO] Scale output feature maps.
padded_input = tf.pad(batch_input, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="SYMMETRIC") #SYMMETRIC
conv = tf.nn.conv2d(padded_input, filter, [1, stride, stride, 1], padding="VALID")
if useBias:
offset = tf.get_variable("offset", [1, 1, 1, out_channels], dtype=tf.float32, initializer=tf.zeros_initializer())
conv = conv + offset
return conv
def conv_same(batch_input, out_channels, stride=1, filterSize=3, initScale = 0.02, useXavier=False, paddingSize = 1, useBias=False, normKernel=True):
with tf.variable_scope("conv"):
in_height, in_width, in_channels = [batch_input.get_shape()[1], batch_input.get_shape()[2], int(batch_input.get_shape()[-1])]
filter = tf.get_variable("filter", [filterSize, filterSize, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, np.sqrt(2.0/(int(in_channels) + int(out_channels))) * initScale) if useXavier else tf.random_normal_initializer(0, initScale))
if normKernel:
d = tf.rsqrt(tf.reduce_sum(tf.square(filter), axis=[0,1,2]) + 1e-8) # [BO] Scaling factor.
filter *= d # [BkkIO] Scale output feature maps.
padded_input = tf.pad(batch_input, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="SYMMETRIC") #SYMMETRIC
conv = tf.nn.conv2d(padded_input, filter, [1, stride, stride, 1], padding="VALID")
if useBias:
offset = tf.get_variable("offset", [1, 1, 1, out_channels], dtype=tf.float32, initializer=tf.zeros_initializer())
conv = conv + offset
return conv
def lrelu(x, a):
with tf.name_scope("lrelu"):
# adding these together creates the leak part and linear part
# then cancels them out by subtracting/adding an absolute value term
# leak: a*x/2 - a*abs(x)/2
# linear: x/2 + abs(x)/2
# this block looks like it has 2 inputs on the graph unless we do this
x = tf.identity(x)
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
#Deconvolution used in the method
def deconv(batch_input, out_channels, useBias=False, normKernel=True):
with tf.variable_scope("deconv"):
in_height, in_width, in_channels = [int(batch_input.get_shape()[1]), int(batch_input.get_shape()[2]), int(batch_input.get_shape()[3])]
filter = tf.get_variable("filter", [3, 3, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
filter1 = tf.get_variable("filter1", [3, 3, out_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
if normKernel:
d = tf.rsqrt(tf.reduce_sum(tf.square(filter), axis=[0,1,2]) + 1e-8) # [BO] Scaling factor.
filter *= d # [BkkIO] Scale output feature maps.
d1 = tf.rsqrt(tf.reduce_sum(tf.square(filter1), axis=[0,1,2]) + 1e-8) # [BO] Scaling factor.
filter1 *= d1 # [BkkIO] Scale output feature maps.
# [batch, in_height, in_width, in_channels], [filter_width, filter_height, out_channels, in_channels]
# => [batch, out_height, out_width, out_channels]
resized_images = tf.image.resize_images(batch_input, [in_height * 2, in_width * 2], method = tf.image.ResizeMethod.NEAREST_NEIGHBOR)#BILINEAR
paddingSize = 1
padded = tf.pad(resized_images, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="SYMMETRIC")#CONSTANT
conv = tf.nn.conv2d(padded, filter, [1, 1, 1, 1], padding="VALID")
padded = tf.pad(conv, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="SYMMETRIC")#CONSTANT
conv = tf.nn.conv2d(padded, filter1, [1, 1, 1, 1], padding="VALID")
if useBias:
offset = tf.get_variable("offset", [1, 1, 1, out_channels], dtype=tf.float32, initializer=tf.zeros_initializer())
conv = conv + offset
return conv
#input is of shape [batch, X]. Returns the outputs of the layer.
def fullyConnected(input, outputDim, useBias, layerName = "fully_connected", initMultiplyer = 1.0):
with tf.variable_scope(layerName):
batchSize = tf.shape(input)[0]
inputChannels = int(input.get_shape()[-1])
weights = tf.get_variable("weight", [inputChannels, outputDim ], dtype=tf.float32, initializer=tf.random_normal_initializer(0, initMultiplyer * tf.sqrt(1.0/float(inputChannels)))) #TODO Is this init a good idea ?
weightsTiled = tf.tile(tf.expand_dims(weights, axis = 0), [batchSize, 1,1])
squeezedInput = input
if (len(input.get_shape()) > 3) :
squeezedInput = tf.squeeze(squeezedInput, [1])
squeezedInput = tf.squeeze(squeezedInput, [1])
outputs = tf.matmul(tf.expand_dims(squeezedInput, axis = 1), weightsTiled)
outputs = tf.squeeze(outputs, [1])
if(useBias):
bias = tf.get_variable("bias", [outputDim], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.002))
outputs = outputs + tf.expand_dims(bias, axis = 0)
return outputs
#Takes a globalGenerator output as input and transforms it so it can be added to the main U-NET track
def GlobalToGenerator(inputs, channels):
with tf.variable_scope("GlobalToGenerator1"):
fc1 = fullyConnected(inputs, channels, False, "fullyConnected_global_to_unet" ,0.01) #Why so low ?
return tf.expand_dims(tf.expand_dims(fc1, axis = 1), axis=1)
| import tensorflow as tf
import numpy as np
#Convolution implementation
def conv_down(batch_input, out_channels, stride=2, filterSize=3, initScale = 0.02, useXavier=False, paddingSize = 1, useBias=False, normKernel=True):
with tf.variable_scope("conv"):
in_height, in_width, in_channels = [batch_input.get_shape()[1], batch_input.get_shape()[2], int(batch_input.get_shape()[-1])]
filter = tf.get_variable("filter", [filterSize, filterSize, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, np.sqrt(2.0/(int(in_channels) + int(out_channels))) * initScale) if useXavier else tf.random_normal_initializer(0, initScale))
if normKernel:
d = tf.rsqrt(tf.reduce_sum(tf.square(filter), axis=[0,1,2]) + 1e-8) # [BO] Scaling factor.
filter *= d # [BkkIO] Scale output feature maps.
padded_input = tf.pad(batch_input, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="SYMMETRIC") #SYMMETRIC
conv = tf.nn.conv2d(padded_input, filter, [1, stride, stride, 1], padding="VALID")
if useBias:
offset = tf.get_variable("offset", [1, 1, 1, out_channels], dtype=tf.float32, initializer=tf.zeros_initializer())
conv = conv + offset
return conv
def conv_same(batch_input, out_channels, stride=1, filterSize=3, initScale = 0.02, useXavier=False, paddingSize = 1, useBias=False, normKernel=True):
with tf.variable_scope("conv"):
in_height, in_width, in_channels = [batch_input.get_shape()[1], batch_input.get_shape()[2], int(batch_input.get_shape()[-1])]
filter = tf.get_variable("filter", [filterSize, filterSize, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, np.sqrt(2.0/(int(in_channels) + int(out_channels))) * initScale) if useXavier else tf.random_normal_initializer(0, initScale))
if normKernel:
d = tf.rsqrt(tf.reduce_sum(tf.square(filter), axis=[0,1,2]) + 1e-8) # [BO] Scaling factor.
filter *= d # [BkkIO] Scale output feature maps.
padded_input = tf.pad(batch_input, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="SYMMETRIC") #SYMMETRIC
conv = tf.nn.conv2d(padded_input, filter, [1, stride, stride, 1], padding="VALID")
if useBias:
offset = tf.get_variable("offset", [1, 1, 1, out_channels], dtype=tf.float32, initializer=tf.zeros_initializer())
conv = conv + offset
return conv
def lrelu(x, a):
with tf.name_scope("lrelu"):
# adding these together creates the leak part and linear part
# then cancels them out by subtracting/adding an absolute value term
# leak: a*x/2 - a*abs(x)/2
# linear: x/2 + abs(x)/2
# this block looks like it has 2 inputs on the graph unless we do this
x = tf.identity(x)
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
#Deconvolution used in the method
def deconv(batch_input, out_channels, useBias=False, normKernel=True):
with tf.variable_scope("deconv"):
in_height, in_width, in_channels = [int(batch_input.get_shape()[1]), int(batch_input.get_shape()[2]), int(batch_input.get_shape()[3])]
filter = tf.get_variable("filter", [3, 3, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
filter1 = tf.get_variable("filter1", [3, 3, out_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
if normKernel:
d = tf.rsqrt(tf.reduce_sum(tf.square(filter), axis=[0,1,2]) + 1e-8) # [BO] Scaling factor.
filter *= d # [BkkIO] Scale output feature maps.
d1 = tf.rsqrt(tf.reduce_sum(tf.square(filter1), axis=[0,1,2]) + 1e-8) # [BO] Scaling factor.
filter1 *= d1 # [BkkIO] Scale output feature maps.
# [batch, in_height, in_width, in_channels], [filter_width, filter_height, out_channels, in_channels]
# => [batch, out_height, out_width, out_channels]
resized_images = tf.image.resize_images(batch_input, [in_height * 2, in_width * 2], method = tf.image.ResizeMethod.NEAREST_NEIGHBOR)#BILINEAR
paddingSize = 1
padded = tf.pad(resized_images, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="SYMMETRIC")#CONSTANT
conv = tf.nn.conv2d(padded, filter, [1, 1, 1, 1], padding="VALID")
padded = tf.pad(conv, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="SYMMETRIC")#CONSTANT
conv = tf.nn.conv2d(padded, filter1, [1, 1, 1, 1], padding="VALID")
if useBias:
offset = tf.get_variable("offset", [1, 1, 1, out_channels], dtype=tf.float32, initializer=tf.zeros_initializer())
conv = conv + offset
return conv
#input is of shape [batch, X]. Returns the outputs of the layer.
def fullyConnected(input, outputDim, useBias, layerName = "fully_connected", initMultiplyer = 1.0):
with tf.variable_scope(layerName):
batchSize = tf.shape(input)[0]
inputChannels = int(input.get_shape()[-1])
weights = tf.get_variable("weight", [inputChannels, outputDim ], dtype=tf.float32, initializer=tf.random_normal_initializer(0, initMultiplyer * tf.sqrt(1.0/float(inputChannels)))) #TODO Is this init a good idea ?
weightsTiled = tf.tile(tf.expand_dims(weights, axis = 0), [batchSize, 1,1])
squeezedInput = input
if (len(input.get_shape()) > 3) :
squeezedInput = tf.squeeze(squeezedInput, [1])
squeezedInput = tf.squeeze(squeezedInput, [1])
outputs = tf.matmul(tf.expand_dims(squeezedInput, axis = 1), weightsTiled)
outputs = tf.squeeze(outputs, [1])
if(useBias):
bias = tf.get_variable("bias", [outputDim], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.002))
outputs = outputs + tf.expand_dims(bias, axis = 0)
return outputs
#Takes a globalGenerator output as input and transforms it so it can be added to the main U-NET track
def GlobalToGenerator(inputs, channels):
with tf.variable_scope("GlobalToGenerator1"):
fc1 = fullyConnected(inputs, channels, False, "fullyConnected_global_to_unet" ,0.01) #Why so low ?
return tf.expand_dims(tf.expand_dims(fc1, axis = 1), axis=1)
| en | 0.691573 | #Convolution implementation # [BO] Scaling factor. # [BkkIO] Scale output feature maps. #SYMMETRIC # [BO] Scaling factor. # [BkkIO] Scale output feature maps. #SYMMETRIC # adding these together creates the leak part and linear part # then cancels them out by subtracting/adding an absolute value term # leak: a*x/2 - a*abs(x)/2 # linear: x/2 + abs(x)/2 # this block looks like it has 2 inputs on the graph unless we do this #Deconvolution used in the method # [BO] Scaling factor. # [BkkIO] Scale output feature maps. # [BO] Scaling factor. # [BkkIO] Scale output feature maps. # [batch, in_height, in_width, in_channels], [filter_width, filter_height, out_channels, in_channels] # => [batch, out_height, out_width, out_channels] #BILINEAR #CONSTANT #CONSTANT #input is of shape [batch, X]. Returns the outputs of the layer. #TODO Is this init a good idea ? #Takes a globalGenerator output as input and transforms it so it can be added to the main U-NET track #Why so low ? | 2.367182 | 2 |
data/preprocess.py | Real-Silverywing/pytorch_classification | 0 | 6620902 | import os
import glob
import sys
sys.path.append("..")
import cfg
import random
if __name__ == '__main__':
traindata_path = cfg.BASE + r'\train'
labels = os.listdir(traindata_path)
valdata_path = cfg.BASE + r'\test'
##写train.txt文件
txtpath = cfg.BASE+r'/'
print(labels)
if os.path.exists(txtpath + 'train.txt'): os.remove(txtpath + 'train.txt')
if os.path.exists(txtpath + 'val.txt'): os.remove(txtpath + 'val.txt')
if os.path.exists(txtpath + 'test.txt'): os.remove(txtpath + 'test.txt')
for index, label in enumerate(labels):
imglist = glob.glob(os.path.join(traindata_path,label, '*.jpg'))
#print(imglist)
random.shuffle(imglist)
print('类别:{}--->【{}】 读取图像数量:{}'.format(label,str(index),len(imglist)))
trainlist = imglist[:int(0.8*len(imglist))]
vallist = imglist[(int(0.8*len(imglist))):]
print(' Train: {} Validation: {} '.format(len(trainlist), len(vallist)))
#可能会出现浪费样本,但是保证val和test不冲突
with open(txtpath + 'train.txt', 'a')as f:
for img in trainlist:
# print(img + ' ' + str(index))
f.write(img + ' ' + str(index))
f.write('\n')
with open(txtpath + 'val.txt', 'a')as f:
for img in vallist:
# print(img + ' ' + str(index))
f.write(img + ' ' + str(index))
f.write('\n')
imglist = glob.glob(os.path.join(valdata_path, '*.jpg'))
with open(txtpath + 'test.txt', 'a')as f:
for img in imglist:
f.write(img)
f.write('\n') | import os
import glob
import sys
sys.path.append("..")
import cfg
import random
if __name__ == '__main__':
traindata_path = cfg.BASE + r'\train'
labels = os.listdir(traindata_path)
valdata_path = cfg.BASE + r'\test'
##写train.txt文件
txtpath = cfg.BASE+r'/'
print(labels)
if os.path.exists(txtpath + 'train.txt'): os.remove(txtpath + 'train.txt')
if os.path.exists(txtpath + 'val.txt'): os.remove(txtpath + 'val.txt')
if os.path.exists(txtpath + 'test.txt'): os.remove(txtpath + 'test.txt')
for index, label in enumerate(labels):
imglist = glob.glob(os.path.join(traindata_path,label, '*.jpg'))
#print(imglist)
random.shuffle(imglist)
print('类别:{}--->【{}】 读取图像数量:{}'.format(label,str(index),len(imglist)))
trainlist = imglist[:int(0.8*len(imglist))]
vallist = imglist[(int(0.8*len(imglist))):]
print(' Train: {} Validation: {} '.format(len(trainlist), len(vallist)))
#可能会出现浪费样本,但是保证val和test不冲突
with open(txtpath + 'train.txt', 'a')as f:
for img in trainlist:
# print(img + ' ' + str(index))
f.write(img + ' ' + str(index))
f.write('\n')
with open(txtpath + 'val.txt', 'a')as f:
for img in vallist:
# print(img + ' ' + str(index))
f.write(img + ' ' + str(index))
f.write('\n')
imglist = glob.glob(os.path.join(valdata_path, '*.jpg'))
with open(txtpath + 'test.txt', 'a')as f:
for img in imglist:
f.write(img)
f.write('\n') | zh | 0.494068 | ##写train.txt文件 #print(imglist) #可能会出现浪费样本,但是保证val和test不冲突 # print(img + ' ' + str(index)) # print(img + ' ' + str(index)) | 2.507757 | 3 |
robot-server/robot_server/robot/calibration/check/session.py | fakela/opentrons | 0 | 6620903 | <filename>robot-server/robot_server/robot/calibration/check/session.py
import typing
import logging
from uuid import uuid4
from enum import Enum
from dataclasses import dataclass
from robot_server.robot.calibration.session import CalibrationSession, \
CalibrationException, HEIGHT_SAFETY_BUFFER
from opentrons.types import Mount, Point, Location
from robot_server.robot.calibration.check.util import StateMachine, WILDCARD
from robot_server.robot.calibration.check.models import ComparisonStatus
from robot_server.robot.calibration.helper_classes import (
CheckMove, DeckCalibrationError, PipetteRank, PipetteInfo, PipetteStatus
)
from robot_server.service.session.models import OffsetVector,\
CalibrationCommand, CalibrationCheckCommand
from opentrons.hardware_control import ThreadManager
from opentrons.protocol_api import labware
from .constants import (PIPETTE_TOLERANCES,
P1000_OK_TIP_PICK_UP_VECTOR,
DEFAULT_OK_TIP_PICK_UP_VECTOR,
MOVE_TO_TIP_RACK_SAFETY_BUFFER)
MODULE_LOG = logging.getLogger(__name__)
"""
A set of endpoints that can be used to create a session for any robot
calibration tasks such as checking your calibration data, performing mount
offset or a robot deck transform.
"""
class CalibrationCheckState(str, Enum):
sessionStarted = "sessionStarted"
labwareLoaded = "labwareLoaded"
preparingFirstPipette = "preparingFirstPipette"
inspectingFirstTip = "inspectingFirstTip"
joggingFirstPipetteToHeight = "joggingFirstPipetteToHeight"
comparingFirstPipetteHeight = "comparingFirstPipetteHeight"
joggingFirstPipetteToPointOne = "joggingFirstPipetteToPointOne"
comparingFirstPipettePointOne = "comparingFirstPipettePointOne"
joggingFirstPipetteToPointTwo = "joggingFirstPipetteToPointTwo"
comparingFirstPipettePointTwo = "comparingFirstPipettePointTwo"
joggingFirstPipetteToPointThree = "joggingFirstPipetteToPointThree"
comparingFirstPipettePointThree = "comparingFirstPipettePointThree"
preparingSecondPipette = "preparingSecondPipette"
inspectingSecondTip = "inspectingSecondTip"
joggingSecondPipetteToHeight = "joggingSecondPipetteToHeight"
comparingSecondPipetteHeight = "comparingSecondPipetteHeight"
joggingSecondPipetteToPointOne = "joggingSecondPipetteToPointOne"
comparingSecondPipettePointOne = "comparingSecondPipettePointOne"
returningTip = "returningTip"
sessionExited = "sessionExited"
badCalibrationData = "badCalibrationData"
checkComplete = "checkComplete"
class CalibrationCheckTrigger(str, Enum):
load_labware = CalibrationCommand.load_labware.value
prepare_pipette = CalibrationCheckCommand.prepare_pipette.value
jog = CalibrationCommand.jog.value
pick_up_tip = CalibrationCommand.pick_up_tip.value
confirm_tip_attached = CalibrationCommand.confirm_tip_attached.value
invalidate_tip = CalibrationCommand.invalidate_tip.value
compare_point = CalibrationCheckCommand.compare_point.value
go_to_next_check = CalibrationCheckCommand.go_to_next_check.value
exit = CalibrationCommand.exit.value
reject_calibration = CalibrationCheckCommand.reject_calibration.value
CHECK_TRANSITIONS: typing.List[typing.Dict[str, typing.Any]] = [
{
"trigger": CalibrationCheckTrigger.load_labware,
"from_state": CalibrationCheckState.sessionStarted,
"to_state": CalibrationCheckState.labwareLoaded,
"before": "_load_tip_rack_objects"
},
{
"trigger": CalibrationCheckTrigger.prepare_pipette,
"from_state": CalibrationCheckState.labwareLoaded,
"to_state": CalibrationCheckState.preparingFirstPipette,
"after": "_move_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.preparingFirstPipette,
"to_state": CalibrationCheckState.preparingFirstPipette,
"before": "_jog_first_pipette",
},
{
"trigger": CalibrationCheckTrigger.pick_up_tip,
"from_state": CalibrationCheckState.preparingFirstPipette,
"to_state": CalibrationCheckState.inspectingFirstTip,
"after": [
"_register_point_first_pipette",
"_pick_up_tip_first_pipette"]
},
{
"trigger": CalibrationCheckTrigger.invalidate_tip,
"from_state": CalibrationCheckState.inspectingFirstTip,
"to_state": CalibrationCheckState.preparingFirstPipette,
"before": "_return_first_tip",
"after": "_move_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.confirm_tip_attached,
"from_state": CalibrationCheckState.inspectingFirstTip,
"to_state": CalibrationCheckState.badCalibrationData,
"condition": "_is_tip_pick_up_dangerous",
},
{
"trigger": CalibrationCheckTrigger.confirm_tip_attached,
"from_state": CalibrationCheckState.inspectingFirstTip,
"to_state": CalibrationCheckState.joggingFirstPipetteToHeight,
"after": "_move_first_pipette",
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.joggingFirstPipetteToHeight,
"to_state": CalibrationCheckState.joggingFirstPipetteToHeight,
"before": "_jog_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.compare_point,
"from_state": CalibrationCheckState.joggingFirstPipetteToHeight,
"to_state": CalibrationCheckState.comparingFirstPipetteHeight,
"after": "_register_point_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingFirstPipetteHeight,
"to_state": CalibrationCheckState.joggingFirstPipetteToPointOne,
"after": "_move_first_pipette",
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.joggingFirstPipetteToPointOne,
"to_state": CalibrationCheckState.joggingFirstPipetteToPointOne,
"before": "_jog_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.compare_point,
"from_state": CalibrationCheckState.joggingFirstPipetteToPointOne,
"to_state": CalibrationCheckState.comparingFirstPipettePointOne,
"after": "_register_point_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingFirstPipettePointOne,
"to_state": CalibrationCheckState.joggingFirstPipetteToPointTwo,
"after": "_move_first_pipette",
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.joggingFirstPipetteToPointTwo,
"to_state": CalibrationCheckState.joggingFirstPipetteToPointTwo,
"before": "_jog_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.compare_point,
"from_state": CalibrationCheckState.joggingFirstPipetteToPointTwo,
"to_state": CalibrationCheckState.comparingFirstPipettePointTwo,
"after": "_register_point_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingFirstPipettePointTwo,
"to_state": CalibrationCheckState.joggingFirstPipetteToPointThree,
"after": "_move_first_pipette",
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.joggingFirstPipetteToPointThree,
"to_state": CalibrationCheckState.joggingFirstPipetteToPointThree,
"before": "_jog_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.compare_point,
"from_state": CalibrationCheckState.joggingFirstPipetteToPointThree,
"to_state": CalibrationCheckState.comparingFirstPipettePointThree,
"after": "_register_point_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingFirstPipettePointThree,
"to_state": CalibrationCheckState.preparingSecondPipette,
"condition": "_is_checking_both_mounts",
"before": "_trash_first_pipette_tip",
"after": "_move_second_pipette",
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingFirstPipettePointThree,
"to_state": CalibrationCheckState.checkComplete,
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.preparingSecondPipette,
"to_state": CalibrationCheckState.preparingSecondPipette,
"before": "_jog_second_pipette",
},
{
"trigger": CalibrationCheckTrigger.pick_up_tip,
"from_state": CalibrationCheckState.preparingSecondPipette,
"to_state": CalibrationCheckState.inspectingSecondTip,
"after": [
"_register_point_second_pipette",
"_pick_up_tip_second_pipette"]
},
{
"trigger": CalibrationCheckTrigger.invalidate_tip,
"from_state": CalibrationCheckState.inspectingSecondTip,
"to_state": CalibrationCheckState.preparingSecondPipette,
"before": "_return_second_tip",
"after": "_move_second_pipette"
},
{
"trigger": CalibrationCheckTrigger.confirm_tip_attached,
"from_state": CalibrationCheckState.inspectingSecondTip,
"to_state": CalibrationCheckState.badCalibrationData,
"condition": "_is_tip_pick_up_dangerous",
},
{
"trigger": CalibrationCheckTrigger.confirm_tip_attached,
"from_state": CalibrationCheckState.inspectingSecondTip,
"to_state": CalibrationCheckState.joggingSecondPipetteToHeight,
"after": "_move_second_pipette",
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.joggingSecondPipetteToHeight,
"to_state": CalibrationCheckState.joggingSecondPipetteToHeight,
"before": "_jog_second_pipette"
},
{
"trigger": CalibrationCheckTrigger.compare_point,
"from_state": CalibrationCheckState.joggingSecondPipetteToHeight,
"to_state": CalibrationCheckState.comparingSecondPipetteHeight,
"after": "_register_point_second_pipette"
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingSecondPipetteHeight,
"to_state": CalibrationCheckState.joggingSecondPipetteToPointOne,
"after": "_move_second_pipette",
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.joggingSecondPipetteToPointOne,
"to_state": CalibrationCheckState.joggingSecondPipetteToPointOne,
"before": "_jog_second_pipette"
},
{
"trigger": CalibrationCheckTrigger.compare_point,
"from_state": CalibrationCheckState.joggingSecondPipetteToPointOne,
"to_state": CalibrationCheckState.comparingSecondPipettePointOne,
"after": "_register_point_second_pipette"
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingSecondPipettePointOne,
"to_state": CalibrationCheckState.checkComplete,
},
{
"trigger": CalibrationCheckTrigger.exit,
"from_state": WILDCARD,
"to_state": CalibrationCheckState.sessionExited
},
{
"trigger": CalibrationCheckTrigger.reject_calibration,
"from_state": WILDCARD,
"to_state": CalibrationCheckState.badCalibrationData
}
]
@dataclass
class ComparisonParams:
reference_state: CalibrationCheckState
COMPARISON_STATE_MAP: typing.Dict[CalibrationCheckState, ComparisonParams] = {
CalibrationCheckState.comparingFirstPipetteHeight: ComparisonParams(
reference_state=CalibrationCheckState.joggingFirstPipetteToHeight,
),
CalibrationCheckState.comparingFirstPipettePointOne: ComparisonParams(
reference_state=CalibrationCheckState.joggingFirstPipetteToPointOne,
),
CalibrationCheckState.comparingFirstPipettePointTwo: ComparisonParams(
reference_state=CalibrationCheckState.joggingFirstPipetteToPointTwo,
),
CalibrationCheckState.comparingFirstPipettePointThree: ComparisonParams(
reference_state=CalibrationCheckState.joggingFirstPipetteToPointThree,
),
CalibrationCheckState.comparingSecondPipetteHeight: ComparisonParams(
reference_state=CalibrationCheckState.joggingSecondPipetteToHeight,
),
CalibrationCheckState.comparingSecondPipettePointOne: ComparisonParams(
reference_state=CalibrationCheckState.joggingSecondPipetteToPointOne,
),
}
class CheckCalibrationSession(CalibrationSession, StateMachine):
def __init__(self, hardware: 'ThreadManager',
lights_on_before: bool = False):
CalibrationSession.__init__(self, hardware, lights_on_before)
StateMachine.__init__(self, states=[s for s in CalibrationCheckState],
transitions=CHECK_TRANSITIONS,
initial_state="sessionStarted")
self.session_type = 'check'
self._saved_points: typing.Dict[CalibrationCheckState, Point] = {}
async def handle_command(self,
name: str,
data: typing.Dict[typing.Any, typing.Any]):
"""
Handle a client command
:param name: Name of the command
:param data: Data supplied in command
:return: None
"""
await self.trigger_transition(trigger=name, **data)
def _get_pipette_by_rank(self, rank: PipetteRank) -> \
typing.Optional[PipetteInfo]:
try:
return next(p for p in self._pip_info_by_mount.values()
if p.rank == rank)
except StopIteration:
return None
def can_distinguish_instr_offset(self):
"""
whether or not we can separate out
calibration diffs that are due to instrument
offset or deck transform or both
"""
first_pip = self._get_pipette_by_rank(PipetteRank.first)
return first_pip and first_pip.mount != Mount.LEFT
@property
def _initial_z_offset(self):
return Point(0, 0, 0.3)
async def _is_checking_both_mounts(self):
return len(self._pip_info_by_mount) == 2
async def _load_tip_rack_objects(self):
"""
A function that takes tip rack information
and loads them onto the deck.
"""
second_pip = self._get_pipette_by_rank(PipetteRank.second)
for name, lw_data in self._labware_info.items():
parent = self._deck.position_for(lw_data.slot)
lw = labware.Labware(lw_data.definition, parent)
self._deck[lw_data.slot] = lw
for mount in lw_data.forMounts:
is_second_mount = second_pip and second_pip.mount == mount
pips_share_rack = len(lw_data.forMounts) == 2
well_name = 'A1'
if is_second_mount and pips_share_rack:
well_name = 'B1'
well = lw.wells_by_name()[well_name]
position = well.top().point + MOVE_TO_TIP_RACK_SAFETY_BUFFER
move = CheckMove(position=position, locationId=uuid4())
if is_second_mount:
self._moves.preparingSecondPipette = move
else:
self._moves.preparingFirstPipette = move
def pipette_status(self) -> typing.Dict[Mount, PipetteStatus]:
"""
Public property to help format the current labware status of a given
session for the client.
"""
to_dict = {}
for mount, pip_info in self._pip_info_by_mount.items():
hw_pip = self.pipettes[mount]
p = PipetteStatus(
model=str(hw_pip['model']),
name=str(hw_pip['name']),
mount=str(mount),
tip_length=float(hw_pip['tip_length']),
has_tip=bool(hw_pip['has_tip']),
tiprack_id=pip_info.tiprack_id,
rank=str(pip_info.rank),
serial=str(hw_pip['pipette_id']),
)
to_dict[mount] = p
return to_dict
async def delete_session(self):
for mount in self._pip_info_by_mount.keys():
if self.pipettes[mount]['has_tip']:
try:
await self._trash_tip(mount)
except (CalibrationException, AssertionError):
pass
await self.hardware.home()
if not self._lights_on_before:
await self.hardware.set_lights(rails=False)
def _get_preparing_state_mount(self) -> typing.Optional[Mount]:
pip = None
if self.current_state_name == \
CalibrationCheckState.inspectingFirstTip:
pip = self._get_pipette_by_rank(PipetteRank.first)
elif self.current_state_name == \
CalibrationCheckState.inspectingSecondTip:
pip = self._get_pipette_by_rank(PipetteRank.second)
assert pip, f'cannot check prepare pipette from state:' \
f' {self.current_state_name}'
return pip.mount
def _look_up_state(self) -> CalibrationCheckState:
"""
We want to check whether a tip pick up was dangerous during the
tip inspection state, but the reference points are actually saved
during the preparing pipette state, so we should reference those
states when looking up the reference point.
:return: The calibration check state that the reference point
was saved under for tip pick up.
"""
if self.current_state_name == CalibrationCheckState.inspectingFirstTip:
return CalibrationCheckState.preparingFirstPipette
elif self.current_state_name == \
CalibrationCheckState.inspectingSecondTip:
return CalibrationCheckState.preparingSecondPipette
else:
raise CalibrationException(
f"No transition available for state {self.current_state_name}")
async def _is_tip_pick_up_dangerous(self):
"""
Function to determine whether jogged to pick up tip position is
outside of the safe threshold for conducting the rest of the check.
"""
mount = self._get_preparing_state_mount()
assert mount, 'cannot attempt tip pick up, no mount specified'
ref_state = self._look_up_state()
jogged_pt = self._saved_points[getattr(CalibrationCheckState,
self.current_state_name)]
ref_pt = self._saved_points[getattr(CalibrationCheckState,
ref_state)]
ref_pt_no_safety = ref_pt - MOVE_TO_TIP_RACK_SAFETY_BUFFER
threshold_vector = DEFAULT_OK_TIP_PICK_UP_VECTOR
pip_model = self.pipettes[mount]['model']
if str(pip_model).startswith('p1000'):
threshold_vector = P1000_OK_TIP_PICK_UP_VECTOR
xyThresholdMag = Point(0, 0, 0).magnitude_to(
threshold_vector._replace(z=0))
zThresholdMag = Point(0, 0, 0).magnitude_to(
threshold_vector._replace(x=0, y=0))
xyDiffMag = ref_pt_no_safety._replace(z=0).magnitude_to(
jogged_pt._replace(z=0))
zDiffMag = ref_pt_no_safety._replace(x=0, y=0).magnitude_to(
jogged_pt._replace(x=0, y=0))
return xyDiffMag > xyThresholdMag or zDiffMag > zThresholdMag
async def _pick_up_tip_first_pipette(self):
"""
Function to pick up tip. It will attempt to pick up a tip in
the current location, and save any offset it might have from the
original position.
"""
pip = self._get_pipette_by_rank(PipetteRank.first)
assert pip, 'No pipette attached on first mount'
mount = pip.mount
assert mount, 'cannot attempt tip pick up, no mount specified'
assert self.pipettes[mount]['has_tip'] is False, \
f"Tip is already attached to {mount} pipette, " \
"cannot pick up another"
await self._pick_up_tip(mount)
async def _pick_up_tip_second_pipette(self):
"""
Function to pick up tip. It will attempt to pick up a tip in
the current location, and save any offset it might have from the
original position.
"""
pip = self._get_pipette_by_rank(PipetteRank.second)
assert pip, 'No pipette attached on second mount'
mount = pip.mount
assert mount, 'cannot attempt tip pick up, no mount specified'
assert self.pipettes[mount]['has_tip'] is False, \
f"Tip is already attached to {mount} pipette, " \
"cannot pick up another"
await self._pick_up_tip(mount)
async def _trash_first_pipette_tip(self):
first_pip = self._get_pipette_by_rank(PipetteRank.first)
assert first_pip, \
'cannot trash tip from first mount, pipette not present'
await self._trash_tip(first_pip.mount)
async def _trash_second_pipette_tip(self):
second_pip = self._get_pipette_by_rank(PipetteRank.second)
assert second_pip, \
'cannot trash tip from first mount, pipette not present'
await self._trash_tip(second_pip.mount)
@staticmethod
def _create_tiprack_param(position: typing.Dict):
new_dict = {}
for loc, data in position.items():
for loc_id, values in data.items():
offset = list(values['offset'])
pos_dict = {'offset': offset, 'locationId': str(loc)}
new_dict[str(loc_id)] = {'pipetteId': str(loc_id),
'location': pos_dict}
return new_dict
def format_params(self, next_state: str) -> typing.Dict:
template_dict = {}
if next_state == 'jog':
template_dict['vector'] = [0, 0, 0]
return template_dict
def _determine_threshold(self, state: CalibrationCheckState) -> Point:
"""
Helper function used to determine the threshold for comparison
based on the state currently being compared and the pipette.
"""
first_pipette = [
CalibrationCheckState.comparingFirstPipetteHeight,
CalibrationCheckState.comparingFirstPipettePointOne,
CalibrationCheckState.comparingFirstPipettePointTwo,
CalibrationCheckState.comparingFirstPipettePointThree,
]
if state in first_pipette:
pip = self._get_pipette_by_rank(PipetteRank.first)
else:
pip = self._get_pipette_by_rank(PipetteRank.second)
pipette_type = ''
if pip and pip.mount:
pipette_type = str(self.pipettes[pip.mount]['name'])
is_p1000 = pipette_type in ['p1000_single_gen2', 'p1000_single']
is_p20 = pipette_type in \
['p20_single_gen2', 'p10_single', 'p20_multi_gen2', 'p10_multi']
height_states = [
CalibrationCheckState.comparingFirstPipetteHeight,
CalibrationCheckState.comparingSecondPipetteHeight]
cross_states = [
CalibrationCheckState.comparingFirstPipettePointOne,
CalibrationCheckState.comparingFirstPipettePointTwo,
CalibrationCheckState.comparingFirstPipettePointThree,
CalibrationCheckState.comparingSecondPipettePointOne
]
if is_p1000 and state in cross_states:
return PIPETTE_TOLERANCES['p1000_crosses']
elif is_p1000 and state in height_states:
return PIPETTE_TOLERANCES['p1000_height']
elif is_p20 and state in cross_states:
return PIPETTE_TOLERANCES['p20_crosses']
elif state in cross_states:
return PIPETTE_TOLERANCES['p300_crosses']
else:
return PIPETTE_TOLERANCES['other_height']
def _get_error_source(
self,
comparisons: typing.Dict[CalibrationCheckState, ComparisonStatus],
comparison_state: CalibrationCheckState) -> DeckCalibrationError:
is_second_pip = comparison_state in [
CalibrationCheckState.comparingSecondPipetteHeight,
CalibrationCheckState.comparingSecondPipettePointOne,
]
first_pip_keys = [
CalibrationCheckState.comparingFirstPipetteHeight,
CalibrationCheckState.comparingFirstPipettePointOne,
CalibrationCheckState.comparingFirstPipettePointTwo,
CalibrationCheckState.comparingFirstPipettePointThree,
]
compared_first = all((k in comparisons) for k in first_pip_keys)
first_pip_steps_passed = compared_first
for key in first_pip_keys:
c = comparisons.get(key, None)
if c and c.exceedsThreshold:
first_pip_steps_passed = False
break
if is_second_pip and first_pip_steps_passed:
return DeckCalibrationError.BAD_INSTRUMENT_OFFSET
elif self.can_distinguish_instr_offset() and not is_second_pip:
return DeckCalibrationError.BAD_DECK_TRANSFORM
else:
return DeckCalibrationError.UNKNOWN
def get_comparisons_by_step(
self) -> typing.Dict[CalibrationCheckState, ComparisonStatus]:
comparisons: typing.Dict[CalibrationCheckState, ComparisonStatus] = {}
for comparison_state, comp in COMPARISON_STATE_MAP.items():
ref_pt = self._saved_points.get(getattr(CalibrationCheckState,
comp.reference_state),
None)
jogged_pt = self._saved_points.get(getattr(CalibrationCheckState,
comparison_state), None)
threshold_vector = self._determine_threshold(comparison_state)
if (ref_pt is not None and jogged_pt is not None):
diff_magnitude = None
if threshold_vector.z == 0.0:
diff_magnitude = ref_pt._replace(z=0.0).magnitude_to(
jogged_pt._replace(z=0.0))
elif threshold_vector.x == 0.0 and \
threshold_vector.y == 0.0:
diff_magnitude = ref_pt._replace(
x=0.0, y=0.0).magnitude_to(jogged_pt._replace(
x=0.0, y=0.0))
assert diff_magnitude is not None, \
'step comparisons must check z or (x and y) magnitude'
threshold_mag = Point(0, 0, 0).magnitude_to(
threshold_vector)
exceeds = diff_magnitude > threshold_mag
tform_type = DeckCalibrationError.UNKNOWN
if exceeds:
tform_type = self._get_error_source(comparisons,
comparison_state)
comparisons[getattr(CalibrationCheckState,
comparison_state)] = \
ComparisonStatus(differenceVector=(jogged_pt - ref_pt),
thresholdVector=threshold_vector,
exceedsThreshold=exceeds,
transformType=str(tform_type))
return comparisons
async def _register_point_first_pipette(self):
first_pip = self._get_pipette_by_rank(PipetteRank.first)
assert first_pip, 'cannot register point for missing first pipette'
buffer = Point(0, 0, 0)
if self.current_state_name ==\
CalibrationCheckState.comparingFirstPipetteHeight:
buffer = HEIGHT_SAFETY_BUFFER
current_point = self.hardware.gantry_position(
first_pip.mount, critical_point=first_pip.critical_point)
self._saved_points[getattr(CalibrationCheckState,
self.current_state_name)] = \
await current_point + buffer
async def _register_point_second_pipette(self):
second_pip = self._get_pipette_by_rank(PipetteRank.second)
assert second_pip, 'cannot register point for missing second pipette'
buffer = Point(0, 0, 0)
if self.current_state_name ==\
CalibrationCheckState.comparingSecondPipetteHeight:
buffer = HEIGHT_SAFETY_BUFFER
current_point = self.hardware.gantry_position(
second_pip.mount, critical_point=second_pip.critical_point
)
self._saved_points[getattr(CalibrationCheckState,
self.current_state_name)] = \
await current_point + buffer
async def _move_first_pipette(self):
first_pip = self._get_pipette_by_rank(PipetteRank.first)
assert first_pip, \
'cannot move pipette on first mount, pipette not present'
loc_to_move = Location(getattr(self._moves,
self.current_state_name).position,
None)
saved_z_allowlist = \
[CalibrationCheckState.joggingFirstPipetteToPointOne,
CalibrationCheckState.joggingFirstPipetteToPointTwo,
CalibrationCheckState.joggingFirstPipetteToPointThree]
if self.current_state_name in saved_z_allowlist:
saved_height =\
self._saved_points[getattr(CalibrationCheckState,
'comparingFirstPipetteHeight')]
z_point = \
saved_height + self._initial_z_offset - HEIGHT_SAFETY_BUFFER
updated_point = loc_to_move.point + z_point._replace(x=0.0, y=0.0)
loc_to_move = Location(updated_point, None)
await self._move(first_pip.mount, loc_to_move)
await self._register_point_first_pipette()
async def _move_second_pipette(self):
second_pip = self._get_pipette_by_rank(PipetteRank.second)
assert second_pip, \
'cannot move pipette on second mount, pipette not present'
loc_to_move = Location(getattr(self._moves,
self.current_state_name).position,
None)
if self.current_state_name ==\
CalibrationCheckState.joggingSecondPipetteToPointOne:
saved_height =\
self._saved_points[getattr(CalibrationCheckState,
'comparingSecondPipetteHeight')]
z_point = \
saved_height + self._initial_z_offset - HEIGHT_SAFETY_BUFFER
updated_point = loc_to_move.point + z_point._replace(x=0.0, y=0.0)
loc_to_move = Location(updated_point, None)
await self._move(second_pip.mount, loc_to_move)
await self._register_point_second_pipette()
async def _jog_first_pipette(self, vector: OffsetVector):
first_pip = self._get_pipette_by_rank(PipetteRank.first)
assert first_pip, \
'cannot jog pipette on first mount, pipette not present'
await super(self.__class__, self)._jog(first_pip.mount, Point(*vector))
async def _jog_second_pipette(self, vector: OffsetVector):
second_pip = self._get_pipette_by_rank(PipetteRank.second)
assert second_pip, \
'cannot jog pipette on second mount, pipette not present'
await super(self.__class__, self)._jog(second_pip.mount,
Point(*vector))
async def _return_first_tip(self):
first_pip = self._get_pipette_by_rank(PipetteRank.first)
assert first_pip, \
'cannot drop tip on first mount, pipette not present'
mount = first_pip.mount
z_value = float(self.pipettes[mount]['tip_length']) * 0.5
state_name = CalibrationCheckState.inspectingFirstTip
return_pt = self._saved_points[getattr(CalibrationCheckState,
state_name)]
account_for_tip = return_pt - Point(0, 0, z_value)
loc = Location(account_for_tip, None)
await self._move(first_pip.mount, loc)
await self._drop_tip(first_pip.mount)
async def _return_second_tip(self):
second_pip = self._get_pipette_by_rank(PipetteRank.second)
assert second_pip, \
'cannot drop tip on second mount, pipette not present'
mount = second_pip.mount
z_value = float(self.pipettes[mount]['tip_length']) * 0.5
state_name = CalibrationCheckState.inspectingSecondTip
return_pt = self._saved_points[getattr(CalibrationCheckState,
state_name)]
account_for_tip = return_pt - Point(0, 0, z_value)
loc = Location(account_for_tip, None)
await self._move(second_pip.mount, loc)
await self._drop_tip(second_pip.mount)
| <filename>robot-server/robot_server/robot/calibration/check/session.py
import typing
import logging
from uuid import uuid4
from enum import Enum
from dataclasses import dataclass
from robot_server.robot.calibration.session import CalibrationSession, \
CalibrationException, HEIGHT_SAFETY_BUFFER
from opentrons.types import Mount, Point, Location
from robot_server.robot.calibration.check.util import StateMachine, WILDCARD
from robot_server.robot.calibration.check.models import ComparisonStatus
from robot_server.robot.calibration.helper_classes import (
CheckMove, DeckCalibrationError, PipetteRank, PipetteInfo, PipetteStatus
)
from robot_server.service.session.models import OffsetVector,\
CalibrationCommand, CalibrationCheckCommand
from opentrons.hardware_control import ThreadManager
from opentrons.protocol_api import labware
from .constants import (PIPETTE_TOLERANCES,
P1000_OK_TIP_PICK_UP_VECTOR,
DEFAULT_OK_TIP_PICK_UP_VECTOR,
MOVE_TO_TIP_RACK_SAFETY_BUFFER)
MODULE_LOG = logging.getLogger(__name__)
"""
A set of endpoints that can be used to create a session for any robot
calibration tasks such as checking your calibration data, performing mount
offset or a robot deck transform.
"""
class CalibrationCheckState(str, Enum):
sessionStarted = "sessionStarted"
labwareLoaded = "labwareLoaded"
preparingFirstPipette = "preparingFirstPipette"
inspectingFirstTip = "inspectingFirstTip"
joggingFirstPipetteToHeight = "joggingFirstPipetteToHeight"
comparingFirstPipetteHeight = "comparingFirstPipetteHeight"
joggingFirstPipetteToPointOne = "joggingFirstPipetteToPointOne"
comparingFirstPipettePointOne = "comparingFirstPipettePointOne"
joggingFirstPipetteToPointTwo = "joggingFirstPipetteToPointTwo"
comparingFirstPipettePointTwo = "comparingFirstPipettePointTwo"
joggingFirstPipetteToPointThree = "joggingFirstPipetteToPointThree"
comparingFirstPipettePointThree = "comparingFirstPipettePointThree"
preparingSecondPipette = "preparingSecondPipette"
inspectingSecondTip = "inspectingSecondTip"
joggingSecondPipetteToHeight = "joggingSecondPipetteToHeight"
comparingSecondPipetteHeight = "comparingSecondPipetteHeight"
joggingSecondPipetteToPointOne = "joggingSecondPipetteToPointOne"
comparingSecondPipettePointOne = "comparingSecondPipettePointOne"
returningTip = "returningTip"
sessionExited = "sessionExited"
badCalibrationData = "badCalibrationData"
checkComplete = "checkComplete"
class CalibrationCheckTrigger(str, Enum):
load_labware = CalibrationCommand.load_labware.value
prepare_pipette = CalibrationCheckCommand.prepare_pipette.value
jog = CalibrationCommand.jog.value
pick_up_tip = CalibrationCommand.pick_up_tip.value
confirm_tip_attached = CalibrationCommand.confirm_tip_attached.value
invalidate_tip = CalibrationCommand.invalidate_tip.value
compare_point = CalibrationCheckCommand.compare_point.value
go_to_next_check = CalibrationCheckCommand.go_to_next_check.value
exit = CalibrationCommand.exit.value
reject_calibration = CalibrationCheckCommand.reject_calibration.value
CHECK_TRANSITIONS: typing.List[typing.Dict[str, typing.Any]] = [
{
"trigger": CalibrationCheckTrigger.load_labware,
"from_state": CalibrationCheckState.sessionStarted,
"to_state": CalibrationCheckState.labwareLoaded,
"before": "_load_tip_rack_objects"
},
{
"trigger": CalibrationCheckTrigger.prepare_pipette,
"from_state": CalibrationCheckState.labwareLoaded,
"to_state": CalibrationCheckState.preparingFirstPipette,
"after": "_move_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.preparingFirstPipette,
"to_state": CalibrationCheckState.preparingFirstPipette,
"before": "_jog_first_pipette",
},
{
"trigger": CalibrationCheckTrigger.pick_up_tip,
"from_state": CalibrationCheckState.preparingFirstPipette,
"to_state": CalibrationCheckState.inspectingFirstTip,
"after": [
"_register_point_first_pipette",
"_pick_up_tip_first_pipette"]
},
{
"trigger": CalibrationCheckTrigger.invalidate_tip,
"from_state": CalibrationCheckState.inspectingFirstTip,
"to_state": CalibrationCheckState.preparingFirstPipette,
"before": "_return_first_tip",
"after": "_move_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.confirm_tip_attached,
"from_state": CalibrationCheckState.inspectingFirstTip,
"to_state": CalibrationCheckState.badCalibrationData,
"condition": "_is_tip_pick_up_dangerous",
},
{
"trigger": CalibrationCheckTrigger.confirm_tip_attached,
"from_state": CalibrationCheckState.inspectingFirstTip,
"to_state": CalibrationCheckState.joggingFirstPipetteToHeight,
"after": "_move_first_pipette",
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.joggingFirstPipetteToHeight,
"to_state": CalibrationCheckState.joggingFirstPipetteToHeight,
"before": "_jog_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.compare_point,
"from_state": CalibrationCheckState.joggingFirstPipetteToHeight,
"to_state": CalibrationCheckState.comparingFirstPipetteHeight,
"after": "_register_point_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingFirstPipetteHeight,
"to_state": CalibrationCheckState.joggingFirstPipetteToPointOne,
"after": "_move_first_pipette",
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.joggingFirstPipetteToPointOne,
"to_state": CalibrationCheckState.joggingFirstPipetteToPointOne,
"before": "_jog_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.compare_point,
"from_state": CalibrationCheckState.joggingFirstPipetteToPointOne,
"to_state": CalibrationCheckState.comparingFirstPipettePointOne,
"after": "_register_point_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingFirstPipettePointOne,
"to_state": CalibrationCheckState.joggingFirstPipetteToPointTwo,
"after": "_move_first_pipette",
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.joggingFirstPipetteToPointTwo,
"to_state": CalibrationCheckState.joggingFirstPipetteToPointTwo,
"before": "_jog_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.compare_point,
"from_state": CalibrationCheckState.joggingFirstPipetteToPointTwo,
"to_state": CalibrationCheckState.comparingFirstPipettePointTwo,
"after": "_register_point_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingFirstPipettePointTwo,
"to_state": CalibrationCheckState.joggingFirstPipetteToPointThree,
"after": "_move_first_pipette",
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.joggingFirstPipetteToPointThree,
"to_state": CalibrationCheckState.joggingFirstPipetteToPointThree,
"before": "_jog_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.compare_point,
"from_state": CalibrationCheckState.joggingFirstPipetteToPointThree,
"to_state": CalibrationCheckState.comparingFirstPipettePointThree,
"after": "_register_point_first_pipette"
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingFirstPipettePointThree,
"to_state": CalibrationCheckState.preparingSecondPipette,
"condition": "_is_checking_both_mounts",
"before": "_trash_first_pipette_tip",
"after": "_move_second_pipette",
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingFirstPipettePointThree,
"to_state": CalibrationCheckState.checkComplete,
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.preparingSecondPipette,
"to_state": CalibrationCheckState.preparingSecondPipette,
"before": "_jog_second_pipette",
},
{
"trigger": CalibrationCheckTrigger.pick_up_tip,
"from_state": CalibrationCheckState.preparingSecondPipette,
"to_state": CalibrationCheckState.inspectingSecondTip,
"after": [
"_register_point_second_pipette",
"_pick_up_tip_second_pipette"]
},
{
"trigger": CalibrationCheckTrigger.invalidate_tip,
"from_state": CalibrationCheckState.inspectingSecondTip,
"to_state": CalibrationCheckState.preparingSecondPipette,
"before": "_return_second_tip",
"after": "_move_second_pipette"
},
{
"trigger": CalibrationCheckTrigger.confirm_tip_attached,
"from_state": CalibrationCheckState.inspectingSecondTip,
"to_state": CalibrationCheckState.badCalibrationData,
"condition": "_is_tip_pick_up_dangerous",
},
{
"trigger": CalibrationCheckTrigger.confirm_tip_attached,
"from_state": CalibrationCheckState.inspectingSecondTip,
"to_state": CalibrationCheckState.joggingSecondPipetteToHeight,
"after": "_move_second_pipette",
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.joggingSecondPipetteToHeight,
"to_state": CalibrationCheckState.joggingSecondPipetteToHeight,
"before": "_jog_second_pipette"
},
{
"trigger": CalibrationCheckTrigger.compare_point,
"from_state": CalibrationCheckState.joggingSecondPipetteToHeight,
"to_state": CalibrationCheckState.comparingSecondPipetteHeight,
"after": "_register_point_second_pipette"
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingSecondPipetteHeight,
"to_state": CalibrationCheckState.joggingSecondPipetteToPointOne,
"after": "_move_second_pipette",
},
{
"trigger": CalibrationCheckTrigger.jog,
"from_state": CalibrationCheckState.joggingSecondPipetteToPointOne,
"to_state": CalibrationCheckState.joggingSecondPipetteToPointOne,
"before": "_jog_second_pipette"
},
{
"trigger": CalibrationCheckTrigger.compare_point,
"from_state": CalibrationCheckState.joggingSecondPipetteToPointOne,
"to_state": CalibrationCheckState.comparingSecondPipettePointOne,
"after": "_register_point_second_pipette"
},
{
"trigger": CalibrationCheckTrigger.go_to_next_check,
"from_state": CalibrationCheckState.comparingSecondPipettePointOne,
"to_state": CalibrationCheckState.checkComplete,
},
{
"trigger": CalibrationCheckTrigger.exit,
"from_state": WILDCARD,
"to_state": CalibrationCheckState.sessionExited
},
{
"trigger": CalibrationCheckTrigger.reject_calibration,
"from_state": WILDCARD,
"to_state": CalibrationCheckState.badCalibrationData
}
]
@dataclass
class ComparisonParams:
reference_state: CalibrationCheckState
COMPARISON_STATE_MAP: typing.Dict[CalibrationCheckState, ComparisonParams] = {
CalibrationCheckState.comparingFirstPipetteHeight: ComparisonParams(
reference_state=CalibrationCheckState.joggingFirstPipetteToHeight,
),
CalibrationCheckState.comparingFirstPipettePointOne: ComparisonParams(
reference_state=CalibrationCheckState.joggingFirstPipetteToPointOne,
),
CalibrationCheckState.comparingFirstPipettePointTwo: ComparisonParams(
reference_state=CalibrationCheckState.joggingFirstPipetteToPointTwo,
),
CalibrationCheckState.comparingFirstPipettePointThree: ComparisonParams(
reference_state=CalibrationCheckState.joggingFirstPipetteToPointThree,
),
CalibrationCheckState.comparingSecondPipetteHeight: ComparisonParams(
reference_state=CalibrationCheckState.joggingSecondPipetteToHeight,
),
CalibrationCheckState.comparingSecondPipettePointOne: ComparisonParams(
reference_state=CalibrationCheckState.joggingSecondPipetteToPointOne,
),
}
class CheckCalibrationSession(CalibrationSession, StateMachine):
def __init__(self, hardware: 'ThreadManager',
lights_on_before: bool = False):
CalibrationSession.__init__(self, hardware, lights_on_before)
StateMachine.__init__(self, states=[s for s in CalibrationCheckState],
transitions=CHECK_TRANSITIONS,
initial_state="sessionStarted")
self.session_type = 'check'
self._saved_points: typing.Dict[CalibrationCheckState, Point] = {}
async def handle_command(self,
name: str,
data: typing.Dict[typing.Any, typing.Any]):
"""
Handle a client command
:param name: Name of the command
:param data: Data supplied in command
:return: None
"""
await self.trigger_transition(trigger=name, **data)
def _get_pipette_by_rank(self, rank: PipetteRank) -> \
typing.Optional[PipetteInfo]:
try:
return next(p for p in self._pip_info_by_mount.values()
if p.rank == rank)
except StopIteration:
return None
def can_distinguish_instr_offset(self):
"""
whether or not we can separate out
calibration diffs that are due to instrument
offset or deck transform or both
"""
first_pip = self._get_pipette_by_rank(PipetteRank.first)
return first_pip and first_pip.mount != Mount.LEFT
@property
def _initial_z_offset(self):
return Point(0, 0, 0.3)
async def _is_checking_both_mounts(self):
return len(self._pip_info_by_mount) == 2
async def _load_tip_rack_objects(self):
"""
A function that takes tip rack information
and loads them onto the deck.
"""
second_pip = self._get_pipette_by_rank(PipetteRank.second)
for name, lw_data in self._labware_info.items():
parent = self._deck.position_for(lw_data.slot)
lw = labware.Labware(lw_data.definition, parent)
self._deck[lw_data.slot] = lw
for mount in lw_data.forMounts:
is_second_mount = second_pip and second_pip.mount == mount
pips_share_rack = len(lw_data.forMounts) == 2
well_name = 'A1'
if is_second_mount and pips_share_rack:
well_name = 'B1'
well = lw.wells_by_name()[well_name]
position = well.top().point + MOVE_TO_TIP_RACK_SAFETY_BUFFER
move = CheckMove(position=position, locationId=uuid4())
if is_second_mount:
self._moves.preparingSecondPipette = move
else:
self._moves.preparingFirstPipette = move
def pipette_status(self) -> typing.Dict[Mount, PipetteStatus]:
"""
Public property to help format the current labware status of a given
session for the client.
"""
to_dict = {}
for mount, pip_info in self._pip_info_by_mount.items():
hw_pip = self.pipettes[mount]
p = PipetteStatus(
model=str(hw_pip['model']),
name=str(hw_pip['name']),
mount=str(mount),
tip_length=float(hw_pip['tip_length']),
has_tip=bool(hw_pip['has_tip']),
tiprack_id=pip_info.tiprack_id,
rank=str(pip_info.rank),
serial=str(hw_pip['pipette_id']),
)
to_dict[mount] = p
return to_dict
async def delete_session(self):
for mount in self._pip_info_by_mount.keys():
if self.pipettes[mount]['has_tip']:
try:
await self._trash_tip(mount)
except (CalibrationException, AssertionError):
pass
await self.hardware.home()
if not self._lights_on_before:
await self.hardware.set_lights(rails=False)
def _get_preparing_state_mount(self) -> typing.Optional[Mount]:
pip = None
if self.current_state_name == \
CalibrationCheckState.inspectingFirstTip:
pip = self._get_pipette_by_rank(PipetteRank.first)
elif self.current_state_name == \
CalibrationCheckState.inspectingSecondTip:
pip = self._get_pipette_by_rank(PipetteRank.second)
assert pip, f'cannot check prepare pipette from state:' \
f' {self.current_state_name}'
return pip.mount
def _look_up_state(self) -> CalibrationCheckState:
"""
We want to check whether a tip pick up was dangerous during the
tip inspection state, but the reference points are actually saved
during the preparing pipette state, so we should reference those
states when looking up the reference point.
:return: The calibration check state that the reference point
was saved under for tip pick up.
"""
if self.current_state_name == CalibrationCheckState.inspectingFirstTip:
return CalibrationCheckState.preparingFirstPipette
elif self.current_state_name == \
CalibrationCheckState.inspectingSecondTip:
return CalibrationCheckState.preparingSecondPipette
else:
raise CalibrationException(
f"No transition available for state {self.current_state_name}")
async def _is_tip_pick_up_dangerous(self):
"""
Function to determine whether jogged to pick up tip position is
outside of the safe threshold for conducting the rest of the check.
"""
mount = self._get_preparing_state_mount()
assert mount, 'cannot attempt tip pick up, no mount specified'
ref_state = self._look_up_state()
jogged_pt = self._saved_points[getattr(CalibrationCheckState,
self.current_state_name)]
ref_pt = self._saved_points[getattr(CalibrationCheckState,
ref_state)]
ref_pt_no_safety = ref_pt - MOVE_TO_TIP_RACK_SAFETY_BUFFER
threshold_vector = DEFAULT_OK_TIP_PICK_UP_VECTOR
pip_model = self.pipettes[mount]['model']
if str(pip_model).startswith('p1000'):
threshold_vector = P1000_OK_TIP_PICK_UP_VECTOR
xyThresholdMag = Point(0, 0, 0).magnitude_to(
threshold_vector._replace(z=0))
zThresholdMag = Point(0, 0, 0).magnitude_to(
threshold_vector._replace(x=0, y=0))
xyDiffMag = ref_pt_no_safety._replace(z=0).magnitude_to(
jogged_pt._replace(z=0))
zDiffMag = ref_pt_no_safety._replace(x=0, y=0).magnitude_to(
jogged_pt._replace(x=0, y=0))
return xyDiffMag > xyThresholdMag or zDiffMag > zThresholdMag
async def _pick_up_tip_first_pipette(self):
"""
Function to pick up tip. It will attempt to pick up a tip in
the current location, and save any offset it might have from the
original position.
"""
pip = self._get_pipette_by_rank(PipetteRank.first)
assert pip, 'No pipette attached on first mount'
mount = pip.mount
assert mount, 'cannot attempt tip pick up, no mount specified'
assert self.pipettes[mount]['has_tip'] is False, \
f"Tip is already attached to {mount} pipette, " \
"cannot pick up another"
await self._pick_up_tip(mount)
async def _pick_up_tip_second_pipette(self):
"""
Function to pick up tip. It will attempt to pick up a tip in
the current location, and save any offset it might have from the
original position.
"""
pip = self._get_pipette_by_rank(PipetteRank.second)
assert pip, 'No pipette attached on second mount'
mount = pip.mount
assert mount, 'cannot attempt tip pick up, no mount specified'
assert self.pipettes[mount]['has_tip'] is False, \
f"Tip is already attached to {mount} pipette, " \
"cannot pick up another"
await self._pick_up_tip(mount)
async def _trash_first_pipette_tip(self):
first_pip = self._get_pipette_by_rank(PipetteRank.first)
assert first_pip, \
'cannot trash tip from first mount, pipette not present'
await self._trash_tip(first_pip.mount)
async def _trash_second_pipette_tip(self):
second_pip = self._get_pipette_by_rank(PipetteRank.second)
assert second_pip, \
'cannot trash tip from first mount, pipette not present'
await self._trash_tip(second_pip.mount)
@staticmethod
def _create_tiprack_param(position: typing.Dict):
new_dict = {}
for loc, data in position.items():
for loc_id, values in data.items():
offset = list(values['offset'])
pos_dict = {'offset': offset, 'locationId': str(loc)}
new_dict[str(loc_id)] = {'pipetteId': str(loc_id),
'location': pos_dict}
return new_dict
def format_params(self, next_state: str) -> typing.Dict:
template_dict = {}
if next_state == 'jog':
template_dict['vector'] = [0, 0, 0]
return template_dict
def _determine_threshold(self, state: CalibrationCheckState) -> Point:
"""
Helper function used to determine the threshold for comparison
based on the state currently being compared and the pipette.
"""
first_pipette = [
CalibrationCheckState.comparingFirstPipetteHeight,
CalibrationCheckState.comparingFirstPipettePointOne,
CalibrationCheckState.comparingFirstPipettePointTwo,
CalibrationCheckState.comparingFirstPipettePointThree,
]
if state in first_pipette:
pip = self._get_pipette_by_rank(PipetteRank.first)
else:
pip = self._get_pipette_by_rank(PipetteRank.second)
pipette_type = ''
if pip and pip.mount:
pipette_type = str(self.pipettes[pip.mount]['name'])
is_p1000 = pipette_type in ['p1000_single_gen2', 'p1000_single']
is_p20 = pipette_type in \
['p20_single_gen2', 'p10_single', 'p20_multi_gen2', 'p10_multi']
height_states = [
CalibrationCheckState.comparingFirstPipetteHeight,
CalibrationCheckState.comparingSecondPipetteHeight]
cross_states = [
CalibrationCheckState.comparingFirstPipettePointOne,
CalibrationCheckState.comparingFirstPipettePointTwo,
CalibrationCheckState.comparingFirstPipettePointThree,
CalibrationCheckState.comparingSecondPipettePointOne
]
if is_p1000 and state in cross_states:
return PIPETTE_TOLERANCES['p1000_crosses']
elif is_p1000 and state in height_states:
return PIPETTE_TOLERANCES['p1000_height']
elif is_p20 and state in cross_states:
return PIPETTE_TOLERANCES['p20_crosses']
elif state in cross_states:
return PIPETTE_TOLERANCES['p300_crosses']
else:
return PIPETTE_TOLERANCES['other_height']
def _get_error_source(
self,
comparisons: typing.Dict[CalibrationCheckState, ComparisonStatus],
comparison_state: CalibrationCheckState) -> DeckCalibrationError:
is_second_pip = comparison_state in [
CalibrationCheckState.comparingSecondPipetteHeight,
CalibrationCheckState.comparingSecondPipettePointOne,
]
first_pip_keys = [
CalibrationCheckState.comparingFirstPipetteHeight,
CalibrationCheckState.comparingFirstPipettePointOne,
CalibrationCheckState.comparingFirstPipettePointTwo,
CalibrationCheckState.comparingFirstPipettePointThree,
]
compared_first = all((k in comparisons) for k in first_pip_keys)
first_pip_steps_passed = compared_first
for key in first_pip_keys:
c = comparisons.get(key, None)
if c and c.exceedsThreshold:
first_pip_steps_passed = False
break
if is_second_pip and first_pip_steps_passed:
return DeckCalibrationError.BAD_INSTRUMENT_OFFSET
elif self.can_distinguish_instr_offset() and not is_second_pip:
return DeckCalibrationError.BAD_DECK_TRANSFORM
else:
return DeckCalibrationError.UNKNOWN
def get_comparisons_by_step(
self) -> typing.Dict[CalibrationCheckState, ComparisonStatus]:
comparisons: typing.Dict[CalibrationCheckState, ComparisonStatus] = {}
for comparison_state, comp in COMPARISON_STATE_MAP.items():
ref_pt = self._saved_points.get(getattr(CalibrationCheckState,
comp.reference_state),
None)
jogged_pt = self._saved_points.get(getattr(CalibrationCheckState,
comparison_state), None)
threshold_vector = self._determine_threshold(comparison_state)
if (ref_pt is not None and jogged_pt is not None):
diff_magnitude = None
if threshold_vector.z == 0.0:
diff_magnitude = ref_pt._replace(z=0.0).magnitude_to(
jogged_pt._replace(z=0.0))
elif threshold_vector.x == 0.0 and \
threshold_vector.y == 0.0:
diff_magnitude = ref_pt._replace(
x=0.0, y=0.0).magnitude_to(jogged_pt._replace(
x=0.0, y=0.0))
assert diff_magnitude is not None, \
'step comparisons must check z or (x and y) magnitude'
threshold_mag = Point(0, 0, 0).magnitude_to(
threshold_vector)
exceeds = diff_magnitude > threshold_mag
tform_type = DeckCalibrationError.UNKNOWN
if exceeds:
tform_type = self._get_error_source(comparisons,
comparison_state)
comparisons[getattr(CalibrationCheckState,
comparison_state)] = \
ComparisonStatus(differenceVector=(jogged_pt - ref_pt),
thresholdVector=threshold_vector,
exceedsThreshold=exceeds,
transformType=str(tform_type))
return comparisons
async def _register_point_first_pipette(self):
first_pip = self._get_pipette_by_rank(PipetteRank.first)
assert first_pip, 'cannot register point for missing first pipette'
buffer = Point(0, 0, 0)
if self.current_state_name ==\
CalibrationCheckState.comparingFirstPipetteHeight:
buffer = HEIGHT_SAFETY_BUFFER
current_point = self.hardware.gantry_position(
first_pip.mount, critical_point=first_pip.critical_point)
self._saved_points[getattr(CalibrationCheckState,
self.current_state_name)] = \
await current_point + buffer
async def _register_point_second_pipette(self):
second_pip = self._get_pipette_by_rank(PipetteRank.second)
assert second_pip, 'cannot register point for missing second pipette'
buffer = Point(0, 0, 0)
if self.current_state_name ==\
CalibrationCheckState.comparingSecondPipetteHeight:
buffer = HEIGHT_SAFETY_BUFFER
current_point = self.hardware.gantry_position(
second_pip.mount, critical_point=second_pip.critical_point
)
self._saved_points[getattr(CalibrationCheckState,
self.current_state_name)] = \
await current_point + buffer
async def _move_first_pipette(self):
first_pip = self._get_pipette_by_rank(PipetteRank.first)
assert first_pip, \
'cannot move pipette on first mount, pipette not present'
loc_to_move = Location(getattr(self._moves,
self.current_state_name).position,
None)
saved_z_allowlist = \
[CalibrationCheckState.joggingFirstPipetteToPointOne,
CalibrationCheckState.joggingFirstPipetteToPointTwo,
CalibrationCheckState.joggingFirstPipetteToPointThree]
if self.current_state_name in saved_z_allowlist:
saved_height =\
self._saved_points[getattr(CalibrationCheckState,
'comparingFirstPipetteHeight')]
z_point = \
saved_height + self._initial_z_offset - HEIGHT_SAFETY_BUFFER
updated_point = loc_to_move.point + z_point._replace(x=0.0, y=0.0)
loc_to_move = Location(updated_point, None)
await self._move(first_pip.mount, loc_to_move)
await self._register_point_first_pipette()
async def _move_second_pipette(self):
second_pip = self._get_pipette_by_rank(PipetteRank.second)
assert second_pip, \
'cannot move pipette on second mount, pipette not present'
loc_to_move = Location(getattr(self._moves,
self.current_state_name).position,
None)
if self.current_state_name ==\
CalibrationCheckState.joggingSecondPipetteToPointOne:
saved_height =\
self._saved_points[getattr(CalibrationCheckState,
'comparingSecondPipetteHeight')]
z_point = \
saved_height + self._initial_z_offset - HEIGHT_SAFETY_BUFFER
updated_point = loc_to_move.point + z_point._replace(x=0.0, y=0.0)
loc_to_move = Location(updated_point, None)
await self._move(second_pip.mount, loc_to_move)
await self._register_point_second_pipette()
async def _jog_first_pipette(self, vector: OffsetVector):
first_pip = self._get_pipette_by_rank(PipetteRank.first)
assert first_pip, \
'cannot jog pipette on first mount, pipette not present'
await super(self.__class__, self)._jog(first_pip.mount, Point(*vector))
async def _jog_second_pipette(self, vector: OffsetVector):
second_pip = self._get_pipette_by_rank(PipetteRank.second)
assert second_pip, \
'cannot jog pipette on second mount, pipette not present'
await super(self.__class__, self)._jog(second_pip.mount,
Point(*vector))
async def _return_first_tip(self):
first_pip = self._get_pipette_by_rank(PipetteRank.first)
assert first_pip, \
'cannot drop tip on first mount, pipette not present'
mount = first_pip.mount
z_value = float(self.pipettes[mount]['tip_length']) * 0.5
state_name = CalibrationCheckState.inspectingFirstTip
return_pt = self._saved_points[getattr(CalibrationCheckState,
state_name)]
account_for_tip = return_pt - Point(0, 0, z_value)
loc = Location(account_for_tip, None)
await self._move(first_pip.mount, loc)
await self._drop_tip(first_pip.mount)
async def _return_second_tip(self):
second_pip = self._get_pipette_by_rank(PipetteRank.second)
assert second_pip, \
'cannot drop tip on second mount, pipette not present'
mount = second_pip.mount
z_value = float(self.pipettes[mount]['tip_length']) * 0.5
state_name = CalibrationCheckState.inspectingSecondTip
return_pt = self._saved_points[getattr(CalibrationCheckState,
state_name)]
account_for_tip = return_pt - Point(0, 0, z_value)
loc = Location(account_for_tip, None)
await self._move(second_pip.mount, loc)
await self._drop_tip(second_pip.mount)
| en | 0.914189 | A set of endpoints that can be used to create a session for any robot calibration tasks such as checking your calibration data, performing mount offset or a robot deck transform. Handle a client command :param name: Name of the command :param data: Data supplied in command :return: None whether or not we can separate out calibration diffs that are due to instrument offset or deck transform or both A function that takes tip rack information and loads them onto the deck. Public property to help format the current labware status of a given session for the client. We want to check whether a tip pick up was dangerous during the tip inspection state, but the reference points are actually saved during the preparing pipette state, so we should reference those states when looking up the reference point. :return: The calibration check state that the reference point was saved under for tip pick up. Function to determine whether jogged to pick up tip position is outside of the safe threshold for conducting the rest of the check. Function to pick up tip. It will attempt to pick up a tip in the current location, and save any offset it might have from the original position. Function to pick up tip. It will attempt to pick up a tip in the current location, and save any offset it might have from the original position. Helper function used to determine the threshold for comparison based on the state currently being compared and the pipette. | 2.412326 | 2 |
chatroom/migrations/0007_auto_20160106_0538.py | sonicyang/chiphub | 0 | 6620904 | <filename>chatroom/migrations/0007_auto_20160106_0538.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0008_auto_20151216_0932'),
('chatroom', '0006_auto_20160106_0503'),
]
operations = [
migrations.CreateModel(
name='ERanking',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rank', models.IntegerField(default=0)),
],
),
migrations.RemoveField(
model_name='entry',
name='rank',
),
migrations.AddField(
model_name='eranking',
name='entry',
field=models.ForeignKey(to='chatroom.Entry'),
),
migrations.AddField(
model_name='eranking',
name='users',
field=models.ForeignKey(to='login.Users'),
),
migrations.AddField(
model_name='entry',
name='ranker',
field=models.ManyToManyField(related_name='entries_ranked', through='chatroom.ERanking', to='login.Users'),
),
]
| <filename>chatroom/migrations/0007_auto_20160106_0538.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0008_auto_20151216_0932'),
('chatroom', '0006_auto_20160106_0503'),
]
operations = [
migrations.CreateModel(
name='ERanking',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rank', models.IntegerField(default=0)),
],
),
migrations.RemoveField(
model_name='entry',
name='rank',
),
migrations.AddField(
model_name='eranking',
name='entry',
field=models.ForeignKey(to='chatroom.Entry'),
),
migrations.AddField(
model_name='eranking',
name='users',
field=models.ForeignKey(to='login.Users'),
),
migrations.AddField(
model_name='entry',
name='ranker',
field=models.ManyToManyField(related_name='entries_ranked', through='chatroom.ERanking', to='login.Users'),
),
]
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.695996 | 2 |
ocimatic/ui.py | OCIoficial/ocimatic | 0 | 6620905 | <gh_stars>0
import re
import sys
from contextlib import contextmanager
from typing import (Any, Callable, Iterable, Iterator, List, NamedTuple, Optional, TextIO, TypeVar,
Union, cast)
from colorama import Fore, Style
import ocimatic
RESET = Style.RESET_ALL
BOLD = Style.BRIGHT
RED = Fore.RED
GREEN = Fore.GREEN
YELLOW = Fore.YELLOW
BLUE = Fore.BLUE
MAGENTA = Fore.MAGENTA
INFO = BOLD
OK = BOLD + GREEN
WARNING = BOLD + YELLOW
ERROR = BOLD + RED
def colorize(text: str, color: str) -> str:
return cast(str, color + text + RESET)
def bold(text: str) -> str:
return colorize(text, BOLD)
def decolorize(text: str) -> str:
return re.sub(r'\033\[[0-9]+m', '', text)
IO = Union[TextIO]
IO_STREAMS: List[Optional[IO]] = [sys.stdout]
class WorkResult(NamedTuple):
success: bool
short_msg: Optional[str] = None
long_msg: Optional[str] = None
@contextmanager
def capture_io(stream: Optional[IO]) -> Iterator[None]:
IO_STREAMS.append(stream)
yield
IO_STREAMS.pop()
def write(text: str) -> None:
stream = IO_STREAMS[-1]
if stream:
stream.write(text)
def flush() -> None:
stream = IO_STREAMS[-1]
if stream:
stream.flush()
def writeln(text: str = '') -> None:
write(text + '\n')
def task_header(name: str, msg: str) -> None:
"""Print header for task"""
write('\n\n')
write(colorize('[%s] %s' % (name, msg), BOLD + YELLOW))
writeln()
flush()
def workgroup_header(msg: str, length: int = 35) -> None:
"""Header for a generic group of works"""
writeln()
msg = '....' + msg[-length - 4:] if len(msg) - 4 > length else msg
write(colorize('[%s]' % (msg), INFO))
if ocimatic.config['verbosity'] < 0:
write(' ')
else:
writeln()
flush()
def contest_group_header(msg: str, length: int = 35) -> None:
"""Header for a group of works involving a contest"""
write('\n\n')
msg = '....' + msg[-length - 4:] if len(msg) - 4 > length else msg
write(colorize('[%s]' % (msg), INFO + YELLOW))
writeln()
flush()
F1 = TypeVar("F1", bound=Callable[..., Iterable[WorkResult]])
def solution_group(formatter: str = "{}") -> Callable[[F1], Callable[..., None]]:
def decorator(func: F1) -> Callable[..., None]:
def wrapper(*args: Any, **kwargs: Any) -> None:
solution_group_header(formatter.format(*args, **kwargs))
for result in func(*args, **kwargs):
end_work(result)
solution_group_footer()
return wrapper
return decorator
def solution_group_header(msg: str, length: int = 35) -> None:
"""Header for a group of works involving a solution"""
writeln()
msg = '....' + msg[-length - 4:] if len(msg) - 4 > length else msg
write(colorize('[%s]' % (msg), INFO + BLUE) + ' ')
flush()
def solution_group_footer() -> None:
writeln()
flush()
F2 = TypeVar('F2', bound=Callable[..., WorkResult])
def work(action: str, formatter: str = "{}") -> Callable[[F2], F2]:
def decorator(func: F2) -> F2:
def wrapper(*args: Any, **kwargs: Any) -> WorkResult:
if not CAPTURE_WORKS:
start_work(action, formatter.format(*args, **kwargs))
result = func(*args, **kwargs)
if CAPTURE_WORKS:
CAPTURE_WORKS[-1].append(result)
end_work(result)
return result
return cast(F2, wrapper)
return decorator
def start_work(action: str, msg: str, length: int = 45) -> None:
if ocimatic.config['verbosity'] < 0:
return
msg = '....' + msg[-length - 4:] if len(msg) - 4 > length else msg
msg = ' * [' + action + '] ' + msg + ' '
write(colorize(msg, MAGENTA))
flush()
def end_work(result: WorkResult) -> None:
color = OK if result.success else ERROR
if ocimatic.config['verbosity'] < 0:
write(colorize('.', color))
else:
write(colorize(str(result.short_msg), color))
writeln()
if result.long_msg and ocimatic.config['verbosity'] > 0:
long_msg = result.long_msg.strip()
long_msg = "\n".join(f">>> {line}" for line in long_msg.split("\n"))
write(long_msg)
writeln()
writeln()
flush()
def fatal_error(message: str) -> None:
writeln(colorize('ocimatic: ' + message, INFO + RED))
writeln()
sys.exit(1)
def show_message(label: str, msg: str, color: str = INFO) -> None:
write(' %s \n' % colorize(label + ': ' + str(msg), color))
CAPTURE_WORKS: List[List[WorkResult]] = []
@contextmanager
def capture_works() -> Iterator[List[WorkResult]]:
CAPTURE_WORKS.append([])
yield CAPTURE_WORKS[-1]
CAPTURE_WORKS.pop()
F = TypeVar("F", bound=Callable[..., Any])
def contest_group(formatter: str = "{}") -> Callable[[F], F]:
def decorator(func: F) -> F:
def wrapper(*args: Any, **kwargs: Any) -> Any:
contest_group_header(formatter.format(*args, **kwargs))
return func(*args, **kwargs)
return cast(F, wrapper)
return decorator
def workgroup(formatter: str = "{}") -> Callable[[F], F]:
def decorator(func: F) -> F:
def wrapper(*args: Any, **kwargs: Any) -> Any:
workgroup_header(formatter.format(*args, **kwargs))
return func(*args, **kwargs)
return cast(F, wrapper)
return decorator
def task(action: str) -> Callable[[F], F]:
def decorator(func: F) -> F:
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
task_header(str(self), action)
return func(self, *args, **kwargs)
return cast(F, wrapper)
return decorator
| import re
import sys
from contextlib import contextmanager
from typing import (Any, Callable, Iterable, Iterator, List, NamedTuple, Optional, TextIO, TypeVar,
Union, cast)
from colorama import Fore, Style
import ocimatic
RESET = Style.RESET_ALL
BOLD = Style.BRIGHT
RED = Fore.RED
GREEN = Fore.GREEN
YELLOW = Fore.YELLOW
BLUE = Fore.BLUE
MAGENTA = Fore.MAGENTA
INFO = BOLD
OK = BOLD + GREEN
WARNING = BOLD + YELLOW
ERROR = BOLD + RED
def colorize(text: str, color: str) -> str:
return cast(str, color + text + RESET)
def bold(text: str) -> str:
return colorize(text, BOLD)
def decolorize(text: str) -> str:
return re.sub(r'\033\[[0-9]+m', '', text)
IO = Union[TextIO]
IO_STREAMS: List[Optional[IO]] = [sys.stdout]
class WorkResult(NamedTuple):
success: bool
short_msg: Optional[str] = None
long_msg: Optional[str] = None
@contextmanager
def capture_io(stream: Optional[IO]) -> Iterator[None]:
IO_STREAMS.append(stream)
yield
IO_STREAMS.pop()
def write(text: str) -> None:
stream = IO_STREAMS[-1]
if stream:
stream.write(text)
def flush() -> None:
stream = IO_STREAMS[-1]
if stream:
stream.flush()
def writeln(text: str = '') -> None:
write(text + '\n')
def task_header(name: str, msg: str) -> None:
"""Print header for task"""
write('\n\n')
write(colorize('[%s] %s' % (name, msg), BOLD + YELLOW))
writeln()
flush()
def workgroup_header(msg: str, length: int = 35) -> None:
"""Header for a generic group of works"""
writeln()
msg = '....' + msg[-length - 4:] if len(msg) - 4 > length else msg
write(colorize('[%s]' % (msg), INFO))
if ocimatic.config['verbosity'] < 0:
write(' ')
else:
writeln()
flush()
def contest_group_header(msg: str, length: int = 35) -> None:
"""Header for a group of works involving a contest"""
write('\n\n')
msg = '....' + msg[-length - 4:] if len(msg) - 4 > length else msg
write(colorize('[%s]' % (msg), INFO + YELLOW))
writeln()
flush()
F1 = TypeVar("F1", bound=Callable[..., Iterable[WorkResult]])
def solution_group(formatter: str = "{}") -> Callable[[F1], Callable[..., None]]:
def decorator(func: F1) -> Callable[..., None]:
def wrapper(*args: Any, **kwargs: Any) -> None:
solution_group_header(formatter.format(*args, **kwargs))
for result in func(*args, **kwargs):
end_work(result)
solution_group_footer()
return wrapper
return decorator
def solution_group_header(msg: str, length: int = 35) -> None:
"""Header for a group of works involving a solution"""
writeln()
msg = '....' + msg[-length - 4:] if len(msg) - 4 > length else msg
write(colorize('[%s]' % (msg), INFO + BLUE) + ' ')
flush()
def solution_group_footer() -> None:
writeln()
flush()
F2 = TypeVar('F2', bound=Callable[..., WorkResult])
def work(action: str, formatter: str = "{}") -> Callable[[F2], F2]:
def decorator(func: F2) -> F2:
def wrapper(*args: Any, **kwargs: Any) -> WorkResult:
if not CAPTURE_WORKS:
start_work(action, formatter.format(*args, **kwargs))
result = func(*args, **kwargs)
if CAPTURE_WORKS:
CAPTURE_WORKS[-1].append(result)
end_work(result)
return result
return cast(F2, wrapper)
return decorator
def start_work(action: str, msg: str, length: int = 45) -> None:
if ocimatic.config['verbosity'] < 0:
return
msg = '....' + msg[-length - 4:] if len(msg) - 4 > length else msg
msg = ' * [' + action + '] ' + msg + ' '
write(colorize(msg, MAGENTA))
flush()
def end_work(result: WorkResult) -> None:
color = OK if result.success else ERROR
if ocimatic.config['verbosity'] < 0:
write(colorize('.', color))
else:
write(colorize(str(result.short_msg), color))
writeln()
if result.long_msg and ocimatic.config['verbosity'] > 0:
long_msg = result.long_msg.strip()
long_msg = "\n".join(f">>> {line}" for line in long_msg.split("\n"))
write(long_msg)
writeln()
writeln()
flush()
def fatal_error(message: str) -> None:
writeln(colorize('ocimatic: ' + message, INFO + RED))
writeln()
sys.exit(1)
def show_message(label: str, msg: str, color: str = INFO) -> None:
write(' %s \n' % colorize(label + ': ' + str(msg), color))
CAPTURE_WORKS: List[List[WorkResult]] = []
@contextmanager
def capture_works() -> Iterator[List[WorkResult]]:
CAPTURE_WORKS.append([])
yield CAPTURE_WORKS[-1]
CAPTURE_WORKS.pop()
F = TypeVar("F", bound=Callable[..., Any])
def contest_group(formatter: str = "{}") -> Callable[[F], F]:
def decorator(func: F) -> F:
def wrapper(*args: Any, **kwargs: Any) -> Any:
contest_group_header(formatter.format(*args, **kwargs))
return func(*args, **kwargs)
return cast(F, wrapper)
return decorator
def workgroup(formatter: str = "{}") -> Callable[[F], F]:
def decorator(func: F) -> F:
def wrapper(*args: Any, **kwargs: Any) -> Any:
workgroup_header(formatter.format(*args, **kwargs))
return func(*args, **kwargs)
return cast(F, wrapper)
return decorator
def task(action: str) -> Callable[[F], F]:
def decorator(func: F) -> F:
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
task_header(str(self), action)
return func(self, *args, **kwargs)
return cast(F, wrapper)
return decorator | en | 0.864012 | Print header for task Header for a generic group of works Header for a group of works involving a contest Header for a group of works involving a solution | 2.324144 | 2 |
rename_era5_grib.py | jiangleads/EC- | 35 | 6620906 | #coding=utf-8
from __future__ import print_function
import traceback
import sys
from eccodes import *
#判断字符串Str是否包含序列SubStrList中的每一个子字符串
def IsSubString(SubStrList,Str):
flag=True
for substr in SubStrList:
if not(substr in Str):
flag=False
return flag
#获取当前目录所有指定类型的文件
def GetFileList(FindPath,FlagStr=[]):
import os
FileList=[]
#print FileList
FileNames=os.listdir(FindPath)
#print FileNames
for fn in FileNames:
if (len(FlagStr)>0):
#返回指定类型的文件名
if (IsSubString(FlagStr,fn)):
fullfilename=os.path.join(FindPath,fn)
FileList.append(fullfilename)
else:
#默认直接返回所有文件名
fullfilename=os.path.join(FindPath,fn)
FileList.append(fullfilename)
#对文件名排序
if (len(FileList)>0):
FileList.sort()
#print FileList
return FileList
def example(filename):
f = open(filename, 'rb')
keys = [
## 'Ni',
## 'Nj',
## 'latitudeOfFirstGridPointInDegrees',
## 'longitudeOfFirstGridPointInDegrees',
## 'latitudeOfLastGridPointInDegrees',
## 'longitudeOfLastGridPointInDegrees',
'dataDate',
## 'dataTime'
]
dataDate=0
while 1:
gid = codes_grib_new_from_file(f)
if gid is None:
break
for key in keys:
try:
## print(' %s: %s' % (key, codes_get(gid, key)))
dataDate2=codes_get(gid, key)
if dataDate != dataDate2:
print(key,dataDate,dataDate2)
dataDate=dataDate2
## input()
continue
except KeyValueNotFoundError as err:
# Full list of exceptions here:
# https://confluence.ecmwf.int/display/ECC/Python+exception+classes
print(' Key="%s" was not found: %s' % (key, err.msg))
except CodesInternalError as err:
print('Error with key="%s" : %s' % (key, err.msg))
## print('There are %d values, average is %f, min is %f, max is %f' % (
## codes_get_size(gid, 'values'),
## codes_get(gid, 'average'),
## codes_get(gid, 'min'),
## codes_get(gid, 'max')
## ))
codes_release(gid)
f.close()
#改名
newname=os.path.dirname(filename)+"/era5.CHV.levels."+str(dataDate)+".grib"
os.rename(filename,newname)
print(filename,'======>',newname)
import os
from eccodes import *
Path='/mnt/d/Downloads/' #
SubStrList=['.grib']
FileList=GetFileList(Path,SubStrList) #得到指定类型(grib)文件名列表
##
##
for eachfile in FileList: #对每个文件操作
print(eachfile)
example(eachfile)
| #coding=utf-8
from __future__ import print_function
import traceback
import sys
from eccodes import *
#判断字符串Str是否包含序列SubStrList中的每一个子字符串
def IsSubString(SubStrList,Str):
flag=True
for substr in SubStrList:
if not(substr in Str):
flag=False
return flag
#获取当前目录所有指定类型的文件
def GetFileList(FindPath,FlagStr=[]):
import os
FileList=[]
#print FileList
FileNames=os.listdir(FindPath)
#print FileNames
for fn in FileNames:
if (len(FlagStr)>0):
#返回指定类型的文件名
if (IsSubString(FlagStr,fn)):
fullfilename=os.path.join(FindPath,fn)
FileList.append(fullfilename)
else:
#默认直接返回所有文件名
fullfilename=os.path.join(FindPath,fn)
FileList.append(fullfilename)
#对文件名排序
if (len(FileList)>0):
FileList.sort()
#print FileList
return FileList
def example(filename):
f = open(filename, 'rb')
keys = [
## 'Ni',
## 'Nj',
## 'latitudeOfFirstGridPointInDegrees',
## 'longitudeOfFirstGridPointInDegrees',
## 'latitudeOfLastGridPointInDegrees',
## 'longitudeOfLastGridPointInDegrees',
'dataDate',
## 'dataTime'
]
dataDate=0
while 1:
gid = codes_grib_new_from_file(f)
if gid is None:
break
for key in keys:
try:
## print(' %s: %s' % (key, codes_get(gid, key)))
dataDate2=codes_get(gid, key)
if dataDate != dataDate2:
print(key,dataDate,dataDate2)
dataDate=dataDate2
## input()
continue
except KeyValueNotFoundError as err:
# Full list of exceptions here:
# https://confluence.ecmwf.int/display/ECC/Python+exception+classes
print(' Key="%s" was not found: %s' % (key, err.msg))
except CodesInternalError as err:
print('Error with key="%s" : %s' % (key, err.msg))
## print('There are %d values, average is %f, min is %f, max is %f' % (
## codes_get_size(gid, 'values'),
## codes_get(gid, 'average'),
## codes_get(gid, 'min'),
## codes_get(gid, 'max')
## ))
codes_release(gid)
f.close()
#改名
newname=os.path.dirname(filename)+"/era5.CHV.levels."+str(dataDate)+".grib"
os.rename(filename,newname)
print(filename,'======>',newname)
import os
from eccodes import *
Path='/mnt/d/Downloads/' #
SubStrList=['.grib']
FileList=GetFileList(Path,SubStrList) #得到指定类型(grib)文件名列表
##
##
for eachfile in FileList: #对每个文件操作
print(eachfile)
example(eachfile)
| zh | 0.160193 | #coding=utf-8 #判断字符串Str是否包含序列SubStrList中的每一个子字符串 #获取当前目录所有指定类型的文件 #print FileList #print FileNames #返回指定类型的文件名 #默认直接返回所有文件名 #对文件名排序 #print FileList ## 'Ni', ## 'Nj', ## 'latitudeOfFirstGridPointInDegrees', ## 'longitudeOfFirstGridPointInDegrees', ## 'latitudeOfLastGridPointInDegrees', ## 'longitudeOfLastGridPointInDegrees', ## 'dataTime' ## print(' %s: %s' % (key, codes_get(gid, key))) ## input() # Full list of exceptions here: # https://confluence.ecmwf.int/display/ECC/Python+exception+classes ## print('There are %d values, average is %f, min is %f, max is %f' % ( ## codes_get_size(gid, 'values'), ## codes_get(gid, 'average'), ## codes_get(gid, 'min'), ## codes_get(gid, 'max') ## )) #改名 # #得到指定类型(grib)文件名列表 ## ## #对每个文件操作 | 2.68376 | 3 |
binilla/widgets/field_widgets/simple_image_frame.py | delan/binilla | 1 | 6620907 | import weakref
import threadsafe_tkinter as tk
from binilla.widgets.field_widgets import container_frame
class SimpleImageFrame(container_frame.ContainerFrame):
tag = None
image_frame = None
display_frame_cls = type(None)
def __init__(self, *args, **kwargs):
container_frame.ContainerFrame.__init__(self, *args, **kwargs)
try:
self.tag = self.tag_window.tag
except AttributeError:
pass
self.populate()
def populate(self):
container_frame.ContainerFrame.populate(self)
if self.image_frame is None or self.image_frame() is None:
self.image_frame = weakref.ref(self.display_frame_cls(self))
self.reload()
def pose_fields(self):
orient = self.desc.get('ORIENT', 'v')[:1].lower()
side = 'left' if orient == 'h' else 'top'
if self.image_frame:
self.image_frame().pack(side=side, fill='x')
container_frame.ContainerFrame.pose_fields(self)
| import weakref
import threadsafe_tkinter as tk
from binilla.widgets.field_widgets import container_frame
class SimpleImageFrame(container_frame.ContainerFrame):
tag = None
image_frame = None
display_frame_cls = type(None)
def __init__(self, *args, **kwargs):
container_frame.ContainerFrame.__init__(self, *args, **kwargs)
try:
self.tag = self.tag_window.tag
except AttributeError:
pass
self.populate()
def populate(self):
container_frame.ContainerFrame.populate(self)
if self.image_frame is None or self.image_frame() is None:
self.image_frame = weakref.ref(self.display_frame_cls(self))
self.reload()
def pose_fields(self):
orient = self.desc.get('ORIENT', 'v')[:1].lower()
side = 'left' if orient == 'h' else 'top'
if self.image_frame:
self.image_frame().pack(side=side, fill='x')
container_frame.ContainerFrame.pose_fields(self)
| none | 1 | 2.255701 | 2 | |
projecteuler/023_non_abundant_sums.py | vikasmunshi/euler | 0 | 6620908 | <gh_stars>0
#!/usr/bin/env python3.8
# -*- coding: utf-8 -*-
"""
https://projecteuler.net/problem=23
A perfect number is a number for which the sum of its proper divisors is exactly equal to the number. For example,
the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number.
A number n is called deficient if the sum of its proper divisors is less than n and it is called abundant if this
sum exceeds n.
As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest number that can be written as the sum of two
abundant numbers is 24. By mathematical analysis, it can be shown that all integers greater than 28123 can be written as
the sum of two abundant numbers. However, this upper limit cannot be reduced any further by analysis even though it is
known that the greatest number that cannot be expressed as the sum of two abundant numbers is less than this limit.
Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers.
Answer: 4179871
"""
def sum_non_abundant_numbers() -> int:
def sum_proper_divisors(n: int) -> int:
n_sqrt = int(n ** 0.5)
return 1 + sum(i + n // i for i in range(2, n_sqrt + 1) if n % i == 0) - (n_sqrt if n_sqrt ** 2 == n else 0)
abundant_numbers = [i for i in range(12, 28123 - 12) if sum_proper_divisors(i) > i]
abundant_sums = (a + b for a in abundant_numbers for b in abundant_numbers)
return sum(set(range(1, 28123 + 1)) - set(abundant_sums))
if __name__ == '__main__':
from .evaluate import Watchdog
with Watchdog() as wd:
result = wd.evaluate(sum_non_abundant_numbers)(answer=4179871)
| #!/usr/bin/env python3.8
# -*- coding: utf-8 -*-
"""
https://projecteuler.net/problem=23
A perfect number is a number for which the sum of its proper divisors is exactly equal to the number. For example,
the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number.
A number n is called deficient if the sum of its proper divisors is less than n and it is called abundant if this
sum exceeds n.
As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest number that can be written as the sum of two
abundant numbers is 24. By mathematical analysis, it can be shown that all integers greater than 28123 can be written as
the sum of two abundant numbers. However, this upper limit cannot be reduced any further by analysis even though it is
known that the greatest number that cannot be expressed as the sum of two abundant numbers is less than this limit.
Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers.
Answer: 4179871
"""
def sum_non_abundant_numbers() -> int:
def sum_proper_divisors(n: int) -> int:
n_sqrt = int(n ** 0.5)
return 1 + sum(i + n // i for i in range(2, n_sqrt + 1) if n % i == 0) - (n_sqrt if n_sqrt ** 2 == n else 0)
abundant_numbers = [i for i in range(12, 28123 - 12) if sum_proper_divisors(i) > i]
abundant_sums = (a + b for a in abundant_numbers for b in abundant_numbers)
return sum(set(range(1, 28123 + 1)) - set(abundant_sums))
if __name__ == '__main__':
from .evaluate import Watchdog
with Watchdog() as wd:
result = wd.evaluate(sum_non_abundant_numbers)(answer=4179871) | en | 0.945586 | #!/usr/bin/env python3.8 # -*- coding: utf-8 -*- https://projecteuler.net/problem=23 A perfect number is a number for which the sum of its proper divisors is exactly equal to the number. For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number. A number n is called deficient if the sum of its proper divisors is less than n and it is called abundant if this sum exceeds n. As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest number that can be written as the sum of two abundant numbers is 24. By mathematical analysis, it can be shown that all integers greater than 28123 can be written as the sum of two abundant numbers. However, this upper limit cannot be reduced any further by analysis even though it is known that the greatest number that cannot be expressed as the sum of two abundant numbers is less than this limit. Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers. Answer: 4179871 | 4.145688 | 4 |
app/py/webconfig.py | tbor8080/pyhongo | 0 | 6620909 | # /usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys,json
from py.webapp import *
class WebAppConfig(Database,WebAppInFlask):
"""
FlaskApp Class
- setDatabase
- setFramework
(sample code)
#=====================================================================================
def main():
framework=WebAppFrameWork()
###############################################################################
# + config method
# database config:
# - database :(None | sqlite | pgsql)
# - dbname: (None | <Database Name>)
# - document_root: <Document Root Directory> (* required)
###############################################################################
# + Stored Text:
# - framework.config(document_root='doc_root')
###############################################################################
# + SQLite:
# - framework.config(database='sqlite',dbname='sample.db',document_root='./doc_root')
###############################################################################
# + PgSQL:
# - framework.config(database='pgsql',dbname='<dbname>',document_root='./doc_root')
###############################################################################
(db,app)=framework.db(),framework.app()
...... and more
#####################################################################################
"""
__package__='__database_env__'
__database__={
'sqlite':False,
'mysql':False,
'pgsql':False,
'psql':False,
}
__framework__={
'flask':False,
'django':False,
'cart':False
}
__config__={}
def __init__(self):
self.setType(None)
self.setDatabaseName(None)
super().__init__(self.dbtype,self.dbname)
self.setDatabaseType(None)
def __doc(self):
"""
#####################################################################################
def createTable(db=None):
if is not None:
# Create Table
db.setTableName('TEST1')
# append column
# - name:None
# - types:None
# - val:None
# - primarykey: Tuple(<column name>,<True | False)
# - unique: <True | False>
db.appendColumn('commodity_id','INTEGER','NOT NULL',('commodity_id',True))
db.appendColumn('company_id','TEXT','NOT NULL')
db.appendColumn('doc_name','TEXT','NOT NULL')
db.appendColumn('reg_date','DATE','NOT NULL')
# Preview SQL Code
db.previewCode(db.getCreateTableCode())
# Execute SQL Code
sql=db.getCreateTableCode()
db.connect()
db.cursor().execute(sql)
db.close()
# Example SQL Sample:
# sql=f'SELECT * from {db.getDatanaseName()};'
# db.connect()
# data=db.cursor().execute(sql).fetchall()
# db.close()
# print(data.fetchall())
"""
return self.__doc__
def config(self,database=None,framework=None,dbname=None,document_root=None,**kwargs):
# if self.is_config() is False:
if database is not None:
self.setDatabaseType(database)
if dbname is not None:
self.setDatabaseName(dbname)
if document_root is not None:
self.setDocumentRoot(document_root)
self.makeDir(document_root)
self.setFramework(framework)
if self.getDatabaseType()=='pgsql':
for kw in kwargs:
if kw=='user':
self.setUser(kwargs[kw])
elif kw=='host':
self.setHost(kwargs[kw])
elif kw=='port':
self.setPort(kwargs[kw])
#else:
# config=self.get_config()
# print(config)
# self.setDatabase(config['database']['type'])
# self.setDocumentRoot(config['doc_root'])
# self.setUser()
def Database(self):
database_application=self.getDatabaseType()
if self.__database__['sqlite']:
database_dir=self.getDocumentRoot()+'/'+self.__install__['directory'][0]
self.makeDir(database_dir)
return SelectSQLite3(database_application,dbname=database_dir+self.getDatabaseName())
elif self.__database__['pgsql'] or self.__database__['psql']:
try:
user=self.getUser()
except AttributeError:
self.setUser((None,None))
return SelectPgSQL(database_application,dbname=self.getDatabaseName(),user=user)
# No Database Application
return FileinFlask(database_application)
def Application(self):
framework=WebAppInFlask()
document_root=self.getDocumentRoot()
if document_root=='':
print('+ document_root is not found.')
print(' - look at method config(document_root="xxx")')
exit()
if self.__framework__['django']:
framework=WebAppInDjango()
elif self.__framework__['cart']:
framework=ShoppingCart()
framework.setDatabase(self.db())
framework.setInstallDir(document_root)
framework.setType(self.getDatabaseType())
framework.setDatabaseName(self.getDatabaseName())
framework.setFramework(self.getFramework())
# exit()
return framework
def getGlobalVar(self):
return self.__install__
def getDatabaseType(self):
return self.__database_application
def setDatabaseType(self,database=None):
if database is not None:
self.__database__[database]=True
self.__database_application=database
def getFramework(self):
return self.__framework
def setFramework(self,framework=None):
if framework is None:
framework='flask'
self.__framework=framework
if framework is not None:
self.__framework__[framework]=True
def app(self):
return self.Application()
def db(self):
return self.Database()
def set_config_json(self,file):
self.__json_file=file
def get_config_json(self):
return self.__json_file
def is_config(self):
if os.path.exists(self.get_config_json()):
return True
return False
def get_config(self):
return self.__config__
def set_config(self,file=None):
self.set_config_json(file)
if self.is_config():
self.__config__=self.read_json()
else:
return self.is_config()
def read_json(self):
with open(self.get_config_json(),'rt') as fp:
data=fp.read()
return json.loads(data) | # /usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys,json
from py.webapp import *
class WebAppConfig(Database,WebAppInFlask):
"""
FlaskApp Class
- setDatabase
- setFramework
(sample code)
#=====================================================================================
def main():
framework=WebAppFrameWork()
###############################################################################
# + config method
# database config:
# - database :(None | sqlite | pgsql)
# - dbname: (None | <Database Name>)
# - document_root: <Document Root Directory> (* required)
###############################################################################
# + Stored Text:
# - framework.config(document_root='doc_root')
###############################################################################
# + SQLite:
# - framework.config(database='sqlite',dbname='sample.db',document_root='./doc_root')
###############################################################################
# + PgSQL:
# - framework.config(database='pgsql',dbname='<dbname>',document_root='./doc_root')
###############################################################################
(db,app)=framework.db(),framework.app()
...... and more
#####################################################################################
"""
__package__='__database_env__'
__database__={
'sqlite':False,
'mysql':False,
'pgsql':False,
'psql':False,
}
__framework__={
'flask':False,
'django':False,
'cart':False
}
__config__={}
def __init__(self):
self.setType(None)
self.setDatabaseName(None)
super().__init__(self.dbtype,self.dbname)
self.setDatabaseType(None)
def __doc(self):
"""
#####################################################################################
def createTable(db=None):
if is not None:
# Create Table
db.setTableName('TEST1')
# append column
# - name:None
# - types:None
# - val:None
# - primarykey: Tuple(<column name>,<True | False)
# - unique: <True | False>
db.appendColumn('commodity_id','INTEGER','NOT NULL',('commodity_id',True))
db.appendColumn('company_id','TEXT','NOT NULL')
db.appendColumn('doc_name','TEXT','NOT NULL')
db.appendColumn('reg_date','DATE','NOT NULL')
# Preview SQL Code
db.previewCode(db.getCreateTableCode())
# Execute SQL Code
sql=db.getCreateTableCode()
db.connect()
db.cursor().execute(sql)
db.close()
# Example SQL Sample:
# sql=f'SELECT * from {db.getDatanaseName()};'
# db.connect()
# data=db.cursor().execute(sql).fetchall()
# db.close()
# print(data.fetchall())
"""
return self.__doc__
def config(self,database=None,framework=None,dbname=None,document_root=None,**kwargs):
# if self.is_config() is False:
if database is not None:
self.setDatabaseType(database)
if dbname is not None:
self.setDatabaseName(dbname)
if document_root is not None:
self.setDocumentRoot(document_root)
self.makeDir(document_root)
self.setFramework(framework)
if self.getDatabaseType()=='pgsql':
for kw in kwargs:
if kw=='user':
self.setUser(kwargs[kw])
elif kw=='host':
self.setHost(kwargs[kw])
elif kw=='port':
self.setPort(kwargs[kw])
#else:
# config=self.get_config()
# print(config)
# self.setDatabase(config['database']['type'])
# self.setDocumentRoot(config['doc_root'])
# self.setUser()
def Database(self):
database_application=self.getDatabaseType()
if self.__database__['sqlite']:
database_dir=self.getDocumentRoot()+'/'+self.__install__['directory'][0]
self.makeDir(database_dir)
return SelectSQLite3(database_application,dbname=database_dir+self.getDatabaseName())
elif self.__database__['pgsql'] or self.__database__['psql']:
try:
user=self.getUser()
except AttributeError:
self.setUser((None,None))
return SelectPgSQL(database_application,dbname=self.getDatabaseName(),user=user)
# No Database Application
return FileinFlask(database_application)
def Application(self):
framework=WebAppInFlask()
document_root=self.getDocumentRoot()
if document_root=='':
print('+ document_root is not found.')
print(' - look at method config(document_root="xxx")')
exit()
if self.__framework__['django']:
framework=WebAppInDjango()
elif self.__framework__['cart']:
framework=ShoppingCart()
framework.setDatabase(self.db())
framework.setInstallDir(document_root)
framework.setType(self.getDatabaseType())
framework.setDatabaseName(self.getDatabaseName())
framework.setFramework(self.getFramework())
# exit()
return framework
def getGlobalVar(self):
return self.__install__
def getDatabaseType(self):
return self.__database_application
def setDatabaseType(self,database=None):
if database is not None:
self.__database__[database]=True
self.__database_application=database
def getFramework(self):
return self.__framework
def setFramework(self,framework=None):
if framework is None:
framework='flask'
self.__framework=framework
if framework is not None:
self.__framework__[framework]=True
def app(self):
return self.Application()
def db(self):
return self.Database()
def set_config_json(self,file):
self.__json_file=file
def get_config_json(self):
return self.__json_file
def is_config(self):
if os.path.exists(self.get_config_json()):
return True
return False
def get_config(self):
return self.__config__
def set_config(self,file=None):
self.set_config_json(file)
if self.is_config():
self.__config__=self.read_json()
else:
return self.is_config()
def read_json(self):
with open(self.get_config_json(),'rt') as fp:
data=fp.read()
return json.loads(data) | de | 0.212331 | # /usr/bin/env python # -*- coding: utf-8 -*- FlaskApp Class - setDatabase - setFramework (sample code) #===================================================================================== def main(): framework=WebAppFrameWork() ############################################################################### # + config method # database config: # - database :(None | sqlite | pgsql) # - dbname: (None | <Database Name>) # - document_root: <Document Root Directory> (* required) ############################################################################### # + Stored Text: # - framework.config(document_root='doc_root') ############################################################################### # + SQLite: # - framework.config(database='sqlite',dbname='sample.db',document_root='./doc_root') ############################################################################### # + PgSQL: # - framework.config(database='pgsql',dbname='<dbname>',document_root='./doc_root') ############################################################################### (db,app)=framework.db(),framework.app() ...... and more ##################################################################################### ##################################################################################### def createTable(db=None): if is not None: # Create Table db.setTableName('TEST1') # append column # - name:None # - types:None # - val:None # - primarykey: Tuple(<column name>,<True | False) # - unique: <True | False> db.appendColumn('commodity_id','INTEGER','NOT NULL',('commodity_id',True)) db.appendColumn('company_id','TEXT','NOT NULL') db.appendColumn('doc_name','TEXT','NOT NULL') db.appendColumn('reg_date','DATE','NOT NULL') # Preview SQL Code db.previewCode(db.getCreateTableCode()) # Execute SQL Code sql=db.getCreateTableCode() db.connect() db.cursor().execute(sql) db.close() # Example SQL Sample: # sql=f'SELECT * from {db.getDatanaseName()};' # db.connect() # data=db.cursor().execute(sql).fetchall() # db.close() # print(data.fetchall()) # if self.is_config() is False: #else: # config=self.get_config() # print(config) # self.setDatabase(config['database']['type']) # self.setDocumentRoot(config['doc_root']) # self.setUser() # No Database Application # exit() | 2.624188 | 3 |
docs/conf.py | trickeydan/zoloto | 0 | 6620910 | <filename>docs/conf.py
from typing import List
import zoloto
project = "Zoloto"
copyright = "2020, <NAME>" # noqa: A001
author = "<NAME>"
release = zoloto.__version__
extensions = [
"sphinx.ext.autodoc",
"sphinx_autodoc_typehints",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx_rtd_theme",
"m2r",
]
templates_path = [] # type: List[str]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"cv2": (
"https://docs.opencv.org/3.0-last-rst/",
None,
), # This is the most recent version with intersphinx support.
"numpy": ("https://docs.scipy.org/doc/numpy", None),
}
html_theme = "sphinx_rtd_theme"
html_static_path = [] # type: List[str]
autodoc_default_options = {
"member-order": "alphabetical",
"special-members": "__init__, __iter__",
"undoc-members": True,
"inherited-members": True,
}
autodoc_mock_imports = ["picamera"]
| <filename>docs/conf.py
from typing import List
import zoloto
project = "Zoloto"
copyright = "2020, <NAME>" # noqa: A001
author = "<NAME>"
release = zoloto.__version__
extensions = [
"sphinx.ext.autodoc",
"sphinx_autodoc_typehints",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx_rtd_theme",
"m2r",
]
templates_path = [] # type: List[str]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"cv2": (
"https://docs.opencv.org/3.0-last-rst/",
None,
), # This is the most recent version with intersphinx support.
"numpy": ("https://docs.scipy.org/doc/numpy", None),
}
html_theme = "sphinx_rtd_theme"
html_static_path = [] # type: List[str]
autodoc_default_options = {
"member-order": "alphabetical",
"special-members": "__init__, __iter__",
"undoc-members": True,
"inherited-members": True,
}
autodoc_mock_imports = ["picamera"]
| en | 0.6669 | # noqa: A001 # type: List[str] # This is the most recent version with intersphinx support. # type: List[str] | 1.822977 | 2 |
devstack/components/keystone.py | hagleitn/Openstack-Devstack2 | 1 | 6620911 | <gh_stars>1-10
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
from urlparse import urlunparse
from devstack import cfg
from devstack import component as comp
from devstack import log as logging
from devstack import shell as sh
from devstack import utils
from devstack.components import db
LOG = logging.getLogger("devstack.components.keystone")
# This db will be dropped then created
DB_NAME = "keystone"
# Subdirs of the git checkout
BIN_DIR = "bin"
CONFIG_DIR = "etc"
# Simple confs
ROOT_CONF = "keystone.conf"
CATALOG_CONF = 'default_catalog.templates'
LOGGING_CONF = "logging.conf"
LOGGING_SOURCE_FN = 'logging.conf.sample'
CONFIGS = [ROOT_CONF, CATALOG_CONF, LOGGING_CONF]
# This is a special conf/init script
MANAGE_DATA_CONF = 'keystone_init.sh'
MANAGE_CMD_ROOT = [sh.joinpths("/", "bin", 'bash')]
MANAGE_ADMIN_USER = 'admin'
MANAGE_DEMO_USER = 'demo'
MANAGE_INVIS_USER = 'invisible_to_admin'
# Sync db command
MANAGE_APP_NAME = 'keystone-manage'
SYNC_DB_CMD = [sh.joinpths('%BINDIR%', MANAGE_APP_NAME), 'db_sync']
# What to start
APP_NAME = 'keystone-all'
APP_OPTIONS = {
APP_NAME: ['--config-file', sh.joinpths('%ROOT%', CONFIG_DIR, ROOT_CONF),
"--debug", '-d',
'--log-config=' + sh.joinpths('%ROOT%', CONFIG_DIR, 'logging.cnf')]
}
# Swift template additions
# TODO: get rid of these
SWIFT_TEMPL_ADDS = ['catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
'catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
'catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/',
'catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
"catalog.RegionOne.object_store.name = Swift Service"]
# Quantum template additions
# TODO: get rid of these
QUANTUM_TEMPL_ADDS = ['catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/',
'catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/',
'catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/',
"catalog.RegionOne.network.name = Quantum Service"]
class KeystoneUninstaller(comp.PythonUninstallComponent):
def __init__(self, *args, **kargs):
comp.PythonUninstallComponent.__init__(self, *args, **kargs)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
class KeystoneInstaller(comp.PythonInstallComponent):
def __init__(self, *args, **kargs):
comp.PythonInstallComponent.__init__(self, *args, **kargs)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
def _get_download_locations(self):
places = list()
places.append({
'uri': ("git", "keystone_repo"),
'branch': ("git", "keystone_branch"),
})
return places
def post_install(self):
comp.PythonInstallComponent.post_install(self)
self._setup_db()
self._sync_db()
self._setup_initer()
def known_options(self):
return set(['swift', 'quantum'])
def _sync_db(self):
LOG.info("Syncing keystone to database named %s.", DB_NAME)
params = dict()
params['BINDIR'] = self.bin_dir
cmds = [{'cmd': SYNC_DB_CMD}]
utils.execute_template(*cmds, cwd=self.bin_dir, params=params)
def _get_config_files(self):
return list(CONFIGS)
def _setup_db(self):
LOG.info("Fixing up database named %s.", DB_NAME)
db.drop_db(self.cfg, self.pw_gen, self.distro, DB_NAME)
db.create_db(self.cfg, self.pw_gen, self.distro, DB_NAME)
def _setup_initer(self):
LOG.info("Configuring keystone initializer template %s.", MANAGE_DATA_CONF)
(_, contents) = utils.load_template(self.component_name, MANAGE_DATA_CONF)
params = self._get_param_map(MANAGE_DATA_CONF)
contents = utils.param_replace(contents, params, True)
tgt_fn = sh.joinpths(self.bin_dir, MANAGE_DATA_CONF)
sh.write_file(tgt_fn, contents)
sh.chmod(tgt_fn, 0755)
self.tracewriter.file_touched(tgt_fn)
def _config_adjust(self, contents, name):
if name == ROOT_CONF:
# Use config parser and
# then extract known configs that
# ill need locations/directories/files made (or touched)...
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
config.readfp(stream)
log_filename = config.get('default', 'log_file')
if log_filename:
LOG.info("Ensuring log file %s exists and is empty." % (log_filename))
log_dir = sh.dirname(log_filename)
if log_dir:
LOG.info("Ensuring log directory %s exists." % (log_dir))
self.tracewriter.dirs_made(*sh.mkdirslist(log_dir))
# Destroy then recreate it (the log file)
sh.unlink(log_filename)
self.tracewriter.file_touched(sh.touch_file(log_filename))
elif name == CATALOG_CONF:
nlines = list()
if 'swift' in self.options:
mp = dict()
mp['SERVICE_HOST'] = self.cfg.get('host', 'ip')
nlines.append("# Swift additions")
nlines.extend(utils.param_replace_list(SWIFT_TEMPL_ADDS, mp))
nlines.append("")
if 'quantum' in self.options:
mp = dict()
mp['SERVICE_HOST'] = self.cfg.get('host', 'ip')
nlines.append("# Quantum additions")
nlines.extend(utils.param_replace_list(QUANTUM_TEMPL_ADDS, mp))
nlines.append("")
if nlines:
nlines.insert(0, contents)
contents = cfg.add_header(name, utils.joinlinesep(*nlines))
return contents
def _get_source_config(self, config_fn):
if config_fn == LOGGING_CONF:
fn = sh.joinpths(self.cfg_dir, LOGGING_SOURCE_FN)
contents = sh.load_file(fn)
return (fn, contents)
return comp.PythonInstallComponent._get_source_config(self, config_fn)
def warm_configs(self):
get_shared_params(self.cfg, self.pw_gen)
def _get_param_map(self, config_fn):
# These be used to fill in the configuration/cmds +
# params with actual values
mp = dict()
mp['SERVICE_HOST'] = self.cfg.get('host', 'ip')
mp['DEST'] = self.app_dir
mp['BIN_DIR'] = self.bin_dir
mp['CONFIG_FILE'] = sh.joinpths(self.cfg_dir, ROOT_CONF)
if config_fn == ROOT_CONF:
mp['SQL_CONN'] = db.fetch_dbdsn(self.cfg, self.pw_gen, DB_NAME)
mp['KEYSTONE_DIR'] = self.app_dir
mp.update(get_shared_params(self.cfg, self.pw_gen))
elif config_fn == MANAGE_DATA_CONF:
mp.update(get_shared_params(self.cfg, self.pw_gen))
return mp
class KeystoneRuntime(comp.PythonRuntime):
def __init__(self, *args, **kargs):
comp.PythonRuntime.__init__(self, *args, **kargs)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
self.wait_time = max(self.cfg.getint('default', 'service_wait_seconds'), 1)
def post_start(self):
tgt_fn = sh.joinpths(self.bin_dir, MANAGE_DATA_CONF)
if sh.isfile(tgt_fn):
# If its still there, run it
# these environment additions are important
# in that they eventually affect how this script runs
LOG.info("Waiting %s seconds so that keystone can start up before running first time init." % (self.wait_time))
sh.sleep(self.wait_time)
env = dict()
env['ENABLED_SERVICES'] = ",".join(self.instances.keys())
env['BIN_DIR'] = self.bin_dir
setup_cmd = MANAGE_CMD_ROOT + [tgt_fn]
LOG.info("Running (%s) command to initialize keystone." % (" ".join(setup_cmd)))
sh.execute(*setup_cmd, env_overrides=env, run_as_root=False)
LOG.debug("Removing (%s) file since we successfully initialized keystone." % (tgt_fn))
sh.unlink(tgt_fn)
def _get_apps_to_start(self):
apps = list()
for app_name in APP_OPTIONS.keys():
apps.append({
'name': app_name,
'path': sh.joinpths(self.bin_dir, app_name),
})
return apps
def _get_app_options(self, app):
return APP_OPTIONS.get(app)
def get_shared_params(config, pw_gen, service_user_name=None):
mp = dict()
host_ip = config.get('host', 'ip')
# These match what is in keystone_init.sh
mp['SERVICE_TENANT_NAME'] = 'service'
if service_user_name:
mp['SERVICE_USERNAME'] = str(service_user_name)
mp['ADMIN_USER_NAME'] = 'admin'
mp['DEMO_USER_NAME'] = 'demo'
mp['ADMIN_TENANT_NAME'] = mp['ADMIN_USER_NAME']
mp['DEMO_TENANT_NAME'] = mp['DEMO_USER_NAME']
# Tokens and passwords
mp['SERVICE_TOKEN'] = pw_gen.get_password(
"service_token",
'the service admin token',
)
mp['ADMIN_PASSWORD'] = pw_gen.get_password(
'horizon_keystone_admin',
'the horizon and keystone admin',
length=20,
)
mp['SERVICE_PASSWORD'] = pw_gen.get_password(
'service_password',
'service authentication',
)
# Components of the auth endpoint
keystone_auth_host = config.getdefaulted('keystone', 'keystone_auth_host', host_ip)
mp['KEYSTONE_AUTH_HOST'] = keystone_auth_host
keystone_auth_port = config.getdefaulted('keystone', 'keystone_auth_port', '35357')
mp['KEYSTONE_AUTH_PORT'] = keystone_auth_port
keystone_auth_proto = config.getdefaulted('keystone', 'keystone_auth_protocol', 'http')
mp['KEYSTONE_AUTH_PROTOCOL'] = keystone_auth_proto
# Components of the service endpoint
keystone_service_host = config.getdefaulted('keystone', 'keystone_service_host', host_ip)
mp['KEYSTONE_SERVICE_HOST'] = keystone_service_host
keystone_service_port = config.getdefaulted('keystone', 'keystone_service_port', '5000')
mp['KEYSTONE_SERVICE_PORT'] = keystone_service_port
keystone_service_proto = config.getdefaulted('keystone', 'keystone_service_protocol', 'http')
mp['KEYSTONE_SERVICE_PROTOCOL'] = keystone_service_proto
# Uri's of the http/https endpoints
mp['AUTH_ENDPOINT'] = urlunparse((keystone_auth_proto,
"%s:%s" % (keystone_auth_host, keystone_auth_port),
"v2.0", "", "", ""))
mp['SERVICE_ENDPOINT'] = urlunparse((keystone_service_proto,
"%s:%s" % (keystone_service_host, keystone_service_port),
"v2.0", "", "", ""))
return mp
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
from urlparse import urlunparse
from devstack import cfg
from devstack import component as comp
from devstack import log as logging
from devstack import shell as sh
from devstack import utils
from devstack.components import db
LOG = logging.getLogger("devstack.components.keystone")
# This db will be dropped then created
DB_NAME = "keystone"
# Subdirs of the git checkout
BIN_DIR = "bin"
CONFIG_DIR = "etc"
# Simple confs
ROOT_CONF = "keystone.conf"
CATALOG_CONF = 'default_catalog.templates'
LOGGING_CONF = "logging.conf"
LOGGING_SOURCE_FN = 'logging.conf.sample'
CONFIGS = [ROOT_CONF, CATALOG_CONF, LOGGING_CONF]
# This is a special conf/init script
MANAGE_DATA_CONF = 'keystone_init.sh'
MANAGE_CMD_ROOT = [sh.joinpths("/", "bin", 'bash')]
MANAGE_ADMIN_USER = 'admin'
MANAGE_DEMO_USER = 'demo'
MANAGE_INVIS_USER = 'invisible_to_admin'
# Sync db command
MANAGE_APP_NAME = 'keystone-manage'
SYNC_DB_CMD = [sh.joinpths('%BINDIR%', MANAGE_APP_NAME), 'db_sync']
# What to start
APP_NAME = 'keystone-all'
APP_OPTIONS = {
APP_NAME: ['--config-file', sh.joinpths('%ROOT%', CONFIG_DIR, ROOT_CONF),
"--debug", '-d',
'--log-config=' + sh.joinpths('%ROOT%', CONFIG_DIR, 'logging.cnf')]
}
# Swift template additions
# TODO: get rid of these
SWIFT_TEMPL_ADDS = ['catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
'catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
'catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/',
'catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
"catalog.RegionOne.object_store.name = Swift Service"]
# Quantum template additions
# TODO: get rid of these
QUANTUM_TEMPL_ADDS = ['catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/',
'catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/',
'catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/',
"catalog.RegionOne.network.name = Quantum Service"]
class KeystoneUninstaller(comp.PythonUninstallComponent):
def __init__(self, *args, **kargs):
comp.PythonUninstallComponent.__init__(self, *args, **kargs)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
class KeystoneInstaller(comp.PythonInstallComponent):
def __init__(self, *args, **kargs):
comp.PythonInstallComponent.__init__(self, *args, **kargs)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
def _get_download_locations(self):
places = list()
places.append({
'uri': ("git", "keystone_repo"),
'branch': ("git", "keystone_branch"),
})
return places
def post_install(self):
comp.PythonInstallComponent.post_install(self)
self._setup_db()
self._sync_db()
self._setup_initer()
def known_options(self):
return set(['swift', 'quantum'])
def _sync_db(self):
LOG.info("Syncing keystone to database named %s.", DB_NAME)
params = dict()
params['BINDIR'] = self.bin_dir
cmds = [{'cmd': SYNC_DB_CMD}]
utils.execute_template(*cmds, cwd=self.bin_dir, params=params)
def _get_config_files(self):
return list(CONFIGS)
def _setup_db(self):
LOG.info("Fixing up database named %s.", DB_NAME)
db.drop_db(self.cfg, self.pw_gen, self.distro, DB_NAME)
db.create_db(self.cfg, self.pw_gen, self.distro, DB_NAME)
def _setup_initer(self):
LOG.info("Configuring keystone initializer template %s.", MANAGE_DATA_CONF)
(_, contents) = utils.load_template(self.component_name, MANAGE_DATA_CONF)
params = self._get_param_map(MANAGE_DATA_CONF)
contents = utils.param_replace(contents, params, True)
tgt_fn = sh.joinpths(self.bin_dir, MANAGE_DATA_CONF)
sh.write_file(tgt_fn, contents)
sh.chmod(tgt_fn, 0755)
self.tracewriter.file_touched(tgt_fn)
def _config_adjust(self, contents, name):
if name == ROOT_CONF:
# Use config parser and
# then extract known configs that
# ill need locations/directories/files made (or touched)...
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
config.readfp(stream)
log_filename = config.get('default', 'log_file')
if log_filename:
LOG.info("Ensuring log file %s exists and is empty." % (log_filename))
log_dir = sh.dirname(log_filename)
if log_dir:
LOG.info("Ensuring log directory %s exists." % (log_dir))
self.tracewriter.dirs_made(*sh.mkdirslist(log_dir))
# Destroy then recreate it (the log file)
sh.unlink(log_filename)
self.tracewriter.file_touched(sh.touch_file(log_filename))
elif name == CATALOG_CONF:
nlines = list()
if 'swift' in self.options:
mp = dict()
mp['SERVICE_HOST'] = self.cfg.get('host', 'ip')
nlines.append("# Swift additions")
nlines.extend(utils.param_replace_list(SWIFT_TEMPL_ADDS, mp))
nlines.append("")
if 'quantum' in self.options:
mp = dict()
mp['SERVICE_HOST'] = self.cfg.get('host', 'ip')
nlines.append("# Quantum additions")
nlines.extend(utils.param_replace_list(QUANTUM_TEMPL_ADDS, mp))
nlines.append("")
if nlines:
nlines.insert(0, contents)
contents = cfg.add_header(name, utils.joinlinesep(*nlines))
return contents
def _get_source_config(self, config_fn):
if config_fn == LOGGING_CONF:
fn = sh.joinpths(self.cfg_dir, LOGGING_SOURCE_FN)
contents = sh.load_file(fn)
return (fn, contents)
return comp.PythonInstallComponent._get_source_config(self, config_fn)
def warm_configs(self):
get_shared_params(self.cfg, self.pw_gen)
def _get_param_map(self, config_fn):
# These be used to fill in the configuration/cmds +
# params with actual values
mp = dict()
mp['SERVICE_HOST'] = self.cfg.get('host', 'ip')
mp['DEST'] = self.app_dir
mp['BIN_DIR'] = self.bin_dir
mp['CONFIG_FILE'] = sh.joinpths(self.cfg_dir, ROOT_CONF)
if config_fn == ROOT_CONF:
mp['SQL_CONN'] = db.fetch_dbdsn(self.cfg, self.pw_gen, DB_NAME)
mp['KEYSTONE_DIR'] = self.app_dir
mp.update(get_shared_params(self.cfg, self.pw_gen))
elif config_fn == MANAGE_DATA_CONF:
mp.update(get_shared_params(self.cfg, self.pw_gen))
return mp
class KeystoneRuntime(comp.PythonRuntime):
def __init__(self, *args, **kargs):
comp.PythonRuntime.__init__(self, *args, **kargs)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
self.wait_time = max(self.cfg.getint('default', 'service_wait_seconds'), 1)
def post_start(self):
tgt_fn = sh.joinpths(self.bin_dir, MANAGE_DATA_CONF)
if sh.isfile(tgt_fn):
# If its still there, run it
# these environment additions are important
# in that they eventually affect how this script runs
LOG.info("Waiting %s seconds so that keystone can start up before running first time init." % (self.wait_time))
sh.sleep(self.wait_time)
env = dict()
env['ENABLED_SERVICES'] = ",".join(self.instances.keys())
env['BIN_DIR'] = self.bin_dir
setup_cmd = MANAGE_CMD_ROOT + [tgt_fn]
LOG.info("Running (%s) command to initialize keystone." % (" ".join(setup_cmd)))
sh.execute(*setup_cmd, env_overrides=env, run_as_root=False)
LOG.debug("Removing (%s) file since we successfully initialized keystone." % (tgt_fn))
sh.unlink(tgt_fn)
def _get_apps_to_start(self):
apps = list()
for app_name in APP_OPTIONS.keys():
apps.append({
'name': app_name,
'path': sh.joinpths(self.bin_dir, app_name),
})
return apps
def _get_app_options(self, app):
return APP_OPTIONS.get(app)
def get_shared_params(config, pw_gen, service_user_name=None):
mp = dict()
host_ip = config.get('host', 'ip')
# These match what is in keystone_init.sh
mp['SERVICE_TENANT_NAME'] = 'service'
if service_user_name:
mp['SERVICE_USERNAME'] = str(service_user_name)
mp['ADMIN_USER_NAME'] = 'admin'
mp['DEMO_USER_NAME'] = 'demo'
mp['ADMIN_TENANT_NAME'] = mp['ADMIN_USER_NAME']
mp['DEMO_TENANT_NAME'] = mp['DEMO_USER_NAME']
# Tokens and passwords
mp['SERVICE_TOKEN'] = pw_gen.get_password(
"service_token",
'the service admin token',
)
mp['ADMIN_PASSWORD'] = pw_gen.get_password(
'horizon_keystone_admin',
'the horizon and keystone admin',
length=20,
)
mp['SERVICE_PASSWORD'] = pw_gen.get_password(
'service_password',
'service authentication',
)
# Components of the auth endpoint
keystone_auth_host = config.getdefaulted('keystone', 'keystone_auth_host', host_ip)
mp['KEYSTONE_AUTH_HOST'] = keystone_auth_host
keystone_auth_port = config.getdefaulted('keystone', 'keystone_auth_port', '35357')
mp['KEYSTONE_AUTH_PORT'] = keystone_auth_port
keystone_auth_proto = config.getdefaulted('keystone', 'keystone_auth_protocol', 'http')
mp['KEYSTONE_AUTH_PROTOCOL'] = keystone_auth_proto
# Components of the service endpoint
keystone_service_host = config.getdefaulted('keystone', 'keystone_service_host', host_ip)
mp['KEYSTONE_SERVICE_HOST'] = keystone_service_host
keystone_service_port = config.getdefaulted('keystone', 'keystone_service_port', '5000')
mp['KEYSTONE_SERVICE_PORT'] = keystone_service_port
keystone_service_proto = config.getdefaulted('keystone', 'keystone_service_protocol', 'http')
mp['KEYSTONE_SERVICE_PROTOCOL'] = keystone_service_proto
# Uri's of the http/https endpoints
mp['AUTH_ENDPOINT'] = urlunparse((keystone_auth_proto,
"%s:%s" % (keystone_auth_host, keystone_auth_port),
"v2.0", "", "", ""))
mp['SERVICE_ENDPOINT'] = urlunparse((keystone_service_proto,
"%s:%s" % (keystone_service_host, keystone_service_port),
"v2.0", "", "", ""))
return mp | en | 0.840016 | # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (C) 2012 Yahoo! Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This db will be dropped then created # Subdirs of the git checkout # Simple confs # This is a special conf/init script # Sync db command # What to start # Swift template additions # TODO: get rid of these # Quantum template additions # TODO: get rid of these # Use config parser and # then extract known configs that # ill need locations/directories/files made (or touched)... # Destroy then recreate it (the log file) # These be used to fill in the configuration/cmds + # params with actual values # If its still there, run it # these environment additions are important # in that they eventually affect how this script runs # These match what is in keystone_init.sh # Tokens and passwords # Components of the auth endpoint # Components of the service endpoint # Uri's of the http/https endpoints | 1.750474 | 2 |
trainer.py | suifengwangshi/MotifC | 0 | 6620912 | <reponame>suifengwangshi/MotifC<gh_stars>0
import os
import math
import datetime
import numpy as np
import os.path as osp
from utils import plotandsave, label_accuracy_score
from sklearn.metrics import roc_auc_score, average_precision_score
import torch
class Trainer(object):
"""build a trainer"""
def __init__(self, model, optimizer, criterion, device, checkpoint, start_epoch, max_epoch,
train_loader, test_loader, lr_policy):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.device = device
self.train_loader = train_loader
self.test_loader = test_loader
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.checkpoint = checkpoint
if not osp.exists(self.checkpoint):
os.mkdir(self.checkpoint)
self.LR_policy = lr_policy
self.epoch = 0
self.auc_best = 0.5
self.prauc_best = 0
self.state_best = None
def train(self):
"""training the model"""
self.model.to(self.device)
self.criterion.to(self.device)
for epoch in range(self.start_epoch, self.max_epoch):
# set training mode during the training process
self.model.train()
self.epoch = epoch
# self.LR_policy.step() # for cosine learning strategy
for i_batch, sample_batch in enumerate(self.train_loader):
X_data1 = sample_batch["data1"].float().to(self.device) # 是否加入进化信息需要进行修改
X_data2 = sample_batch["data2"].float().to(self.device)
label = sample_batch["label"].float().to(self.device)
self.optimizer.zero_grad()
label_p = self.model(X_data1, X_data2)
loss = self.criterion(label_p, label)
if np.isnan(loss.item()):
raise ValueError('loss is nan while training')
loss.backward()
self.optimizer.step()
print("epoch/i_batch: {}/{}---loss: {:.4f}---lr: {:.5f}".format(self.epoch, i_batch,
loss.item(),
self.optimizer.param_groups[0]['lr']))
# validation and save the model with higher accuracy
self.test()
return self.auc_best, self.prauc_best, self.state_best
def test(self):
"""validate the performance of the trained model."""
self.model.eval()
label_p_all = []
label_t_all = []
for i_batch, sample_batch in enumerate(self.test_loader):
X_data1 = sample_batch["data1"].float().to(self.device)
X_data2 = sample_batch["data2"].float().to(self.device)
label = sample_batch["label"].float().to(self.device)
with torch.no_grad():
label_p = self.model(X_data1, X_data2)
label_p_all.append(label_p.view(-1).data.cpu().numpy()[0])
label_t_all.append(label.view(-1).data.cpu().numpy()[0])
auc = roc_auc_score(label_t_all, label_p_all)
prauc = average_precision_score(label_t_all, label_p_all)
if (self.prauc_best + self.auc_best) < (prauc + auc):
self.auc_best = auc
self.prauc_best = prauc
self.state_best = self.model.state_dict()
print("auc: {:.3f}\tprauc: {:.3f}\n".format(auc, prauc))
class Trainer1(object):
"""build a trainer"""
def __init__(self, model, optimizer, criterion, device, checkpoint, start_epoch, max_epoch,
train_loader, test_loader, lr_policy):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.device = device
self.train_loader = train_loader
self.test_loader = test_loader
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.checkpoint = checkpoint
if not osp.exists(self.checkpoint):
os.mkdir(self.checkpoint)
self.LR_policy = lr_policy
self.epoch = 0
self.auc_best = 0.5
self.prauc_best = 0
self.state_best = None
def train(self):
"""training the model"""
self.model.to(self.device)
self.criterion.to(self.device)
for epoch in range(self.start_epoch, self.max_epoch):
# set training mode during the training process
self.model.train()
self.epoch = epoch
# self.LR_policy.step() # for cosine learning strategy
for i_batch, sample_batch in enumerate(self.train_loader):
X_data1 = sample_batch["data1"].float().to(self.device) # 是否加入进化信息需要进行修改
X_data2 = sample_batch["data2"].float().to(self.device)
X_data3 = sample_batch["data3"].float().to(self.device)
X_data4 = sample_batch["data4"].float().to(self.device)
label = sample_batch["label"].float().to(self.device)
self.optimizer.zero_grad()
label_p = self.model(X_data1, X_data2, X_data3, X_data4)
loss = self.criterion(label_p, label)
if np.isnan(loss.item()):
raise ValueError('loss is nan while training')
loss.backward()
self.optimizer.step()
print("epoch/i_batch: {}/{}---loss: {:.4f}---lr: {:.5f}".format(self.epoch, i_batch,
loss.item(),
self.optimizer.param_groups[0]['lr']))
# validation and save the model with higher accuracy
self.test()
return self.auc_best, self.prauc_best, self.state_best
def test(self):
"""validate the performance of the trained model."""
self.model.eval()
label_p_all = []
label_t_all = []
for i_batch, sample_batch in enumerate(self.test_loader):
X_data1 = sample_batch["data1"].float().to(self.device)
X_data2 = sample_batch["data2"].float().to(self.device)
X_data3 = sample_batch["data3"].float().to(self.device)
X_data4 = sample_batch["data4"].float().to(self.device)
label = sample_batch["label"].float().to(self.device)
with torch.no_grad():
label_p = self.model(X_data1, X_data2, X_data3, X_data4)
label_p_all.append(label_p.view(-1).data.cpu().numpy()[0])
label_t_all.append(label.view(-1).data.cpu().numpy()[0])
auc = roc_auc_score(label_t_all, label_p_all)
prauc = average_precision_score(label_t_all, label_p_all)
if (self.prauc_best + self.auc_best) < (prauc + auc):
self.auc_best = auc
self.prauc_best = prauc
self.state_best = self.model.state_dict()
print("auc: {:.3f}\tprauc: {:.3f}\n".format(auc, prauc))
class Trainer2(object):
"""build a trainer"""
def __init__(self, model, optimizer, criterion, device, checkpoint, start_epoch, max_epoch,
train_loader, test_loader, lr_policy):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.device = device
self.train_loader = train_loader
self.test_loader = test_loader
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.checkpoint = checkpoint
if not osp.exists(self.checkpoint):
os.mkdir(self.checkpoint)
self.LR_policy = lr_policy
self.epoch = 0
self.auc_best = 0.5
self.prauc_best = 0
self.state_best = None
def train(self):
"""training the model"""
self.model.to(self.device)
self.criterion.to(self.device)
for epoch in range(self.start_epoch, self.max_epoch):
# set training mode during the training process
self.model.train()
self.epoch = epoch
# self.LR_policy.step() # for cosine learning strategy
for i_batch, sample_batch in enumerate(self.train_loader):
X_data1 = sample_batch["data1"].float().to(self.device) # 是否加入进化信息需要进行修改
X_data2 = sample_batch["data2"].float().to(self.device)
X_data3 = sample_batch["data3"].float().to(self.device)
label = sample_batch["label"].float().to(self.device)
self.optimizer.zero_grad()
label_p = self.model(X_data1, X_data2, X_data3)
loss = self.criterion(label_p, label)
if np.isnan(loss.item()):
raise ValueError('loss is nan while training')
loss.backward()
self.optimizer.step()
print("epoch/i_batch: {}/{}---loss: {:.4f}---lr: {:.5f}".format(self.epoch, i_batch,
loss.item(),
self.optimizer.param_groups[0]['lr']))
# validation and save the model with higher accuracy
self.test()
return self.auc_best, self.prauc_best, self.state_best
def test(self):
"""validate the performance of the trained model."""
self.model.eval()
label_p_all = []
label_t_all = []
for i_batch, sample_batch in enumerate(self.test_loader):
X_data1 = sample_batch["data1"].float().to(self.device)
X_data2 = sample_batch["data2"].float().to(self.device)
X_data3 = sample_batch["data3"].float().to(self.device)
label = sample_batch["label"].float().to(self.device)
with torch.no_grad():
label_p = self.model(X_data1, X_data2, X_data3)
label_p_all.append(label_p.view(-1).data.cpu().numpy()[0])
label_t_all.append(label.view(-1).data.cpu().numpy()[0])
auc = roc_auc_score(label_t_all, label_p_all)
prauc = average_precision_score(label_t_all, label_p_all)
if (self.prauc_best + self.auc_best) < (prauc + auc):
self.auc_best = auc
self.prauc_best = prauc
self.state_best = self.model.state_dict()
print("auc: {:.3f}\tprauc: {:.3f}\n".format(auc, prauc))
| import os
import math
import datetime
import numpy as np
import os.path as osp
from utils import plotandsave, label_accuracy_score
from sklearn.metrics import roc_auc_score, average_precision_score
import torch
class Trainer(object):
"""build a trainer"""
def __init__(self, model, optimizer, criterion, device, checkpoint, start_epoch, max_epoch,
train_loader, test_loader, lr_policy):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.device = device
self.train_loader = train_loader
self.test_loader = test_loader
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.checkpoint = checkpoint
if not osp.exists(self.checkpoint):
os.mkdir(self.checkpoint)
self.LR_policy = lr_policy
self.epoch = 0
self.auc_best = 0.5
self.prauc_best = 0
self.state_best = None
def train(self):
"""training the model"""
self.model.to(self.device)
self.criterion.to(self.device)
for epoch in range(self.start_epoch, self.max_epoch):
# set training mode during the training process
self.model.train()
self.epoch = epoch
# self.LR_policy.step() # for cosine learning strategy
for i_batch, sample_batch in enumerate(self.train_loader):
X_data1 = sample_batch["data1"].float().to(self.device) # 是否加入进化信息需要进行修改
X_data2 = sample_batch["data2"].float().to(self.device)
label = sample_batch["label"].float().to(self.device)
self.optimizer.zero_grad()
label_p = self.model(X_data1, X_data2)
loss = self.criterion(label_p, label)
if np.isnan(loss.item()):
raise ValueError('loss is nan while training')
loss.backward()
self.optimizer.step()
print("epoch/i_batch: {}/{}---loss: {:.4f}---lr: {:.5f}".format(self.epoch, i_batch,
loss.item(),
self.optimizer.param_groups[0]['lr']))
# validation and save the model with higher accuracy
self.test()
return self.auc_best, self.prauc_best, self.state_best
def test(self):
"""validate the performance of the trained model."""
self.model.eval()
label_p_all = []
label_t_all = []
for i_batch, sample_batch in enumerate(self.test_loader):
X_data1 = sample_batch["data1"].float().to(self.device)
X_data2 = sample_batch["data2"].float().to(self.device)
label = sample_batch["label"].float().to(self.device)
with torch.no_grad():
label_p = self.model(X_data1, X_data2)
label_p_all.append(label_p.view(-1).data.cpu().numpy()[0])
label_t_all.append(label.view(-1).data.cpu().numpy()[0])
auc = roc_auc_score(label_t_all, label_p_all)
prauc = average_precision_score(label_t_all, label_p_all)
if (self.prauc_best + self.auc_best) < (prauc + auc):
self.auc_best = auc
self.prauc_best = prauc
self.state_best = self.model.state_dict()
print("auc: {:.3f}\tprauc: {:.3f}\n".format(auc, prauc))
class Trainer1(object):
"""build a trainer"""
def __init__(self, model, optimizer, criterion, device, checkpoint, start_epoch, max_epoch,
train_loader, test_loader, lr_policy):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.device = device
self.train_loader = train_loader
self.test_loader = test_loader
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.checkpoint = checkpoint
if not osp.exists(self.checkpoint):
os.mkdir(self.checkpoint)
self.LR_policy = lr_policy
self.epoch = 0
self.auc_best = 0.5
self.prauc_best = 0
self.state_best = None
def train(self):
"""training the model"""
self.model.to(self.device)
self.criterion.to(self.device)
for epoch in range(self.start_epoch, self.max_epoch):
# set training mode during the training process
self.model.train()
self.epoch = epoch
# self.LR_policy.step() # for cosine learning strategy
for i_batch, sample_batch in enumerate(self.train_loader):
X_data1 = sample_batch["data1"].float().to(self.device) # 是否加入进化信息需要进行修改
X_data2 = sample_batch["data2"].float().to(self.device)
X_data3 = sample_batch["data3"].float().to(self.device)
X_data4 = sample_batch["data4"].float().to(self.device)
label = sample_batch["label"].float().to(self.device)
self.optimizer.zero_grad()
label_p = self.model(X_data1, X_data2, X_data3, X_data4)
loss = self.criterion(label_p, label)
if np.isnan(loss.item()):
raise ValueError('loss is nan while training')
loss.backward()
self.optimizer.step()
print("epoch/i_batch: {}/{}---loss: {:.4f}---lr: {:.5f}".format(self.epoch, i_batch,
loss.item(),
self.optimizer.param_groups[0]['lr']))
# validation and save the model with higher accuracy
self.test()
return self.auc_best, self.prauc_best, self.state_best
def test(self):
"""validate the performance of the trained model."""
self.model.eval()
label_p_all = []
label_t_all = []
for i_batch, sample_batch in enumerate(self.test_loader):
X_data1 = sample_batch["data1"].float().to(self.device)
X_data2 = sample_batch["data2"].float().to(self.device)
X_data3 = sample_batch["data3"].float().to(self.device)
X_data4 = sample_batch["data4"].float().to(self.device)
label = sample_batch["label"].float().to(self.device)
with torch.no_grad():
label_p = self.model(X_data1, X_data2, X_data3, X_data4)
label_p_all.append(label_p.view(-1).data.cpu().numpy()[0])
label_t_all.append(label.view(-1).data.cpu().numpy()[0])
auc = roc_auc_score(label_t_all, label_p_all)
prauc = average_precision_score(label_t_all, label_p_all)
if (self.prauc_best + self.auc_best) < (prauc + auc):
self.auc_best = auc
self.prauc_best = prauc
self.state_best = self.model.state_dict()
print("auc: {:.3f}\tprauc: {:.3f}\n".format(auc, prauc))
class Trainer2(object):
"""build a trainer"""
def __init__(self, model, optimizer, criterion, device, checkpoint, start_epoch, max_epoch,
train_loader, test_loader, lr_policy):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.device = device
self.train_loader = train_loader
self.test_loader = test_loader
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.checkpoint = checkpoint
if not osp.exists(self.checkpoint):
os.mkdir(self.checkpoint)
self.LR_policy = lr_policy
self.epoch = 0
self.auc_best = 0.5
self.prauc_best = 0
self.state_best = None
def train(self):
"""training the model"""
self.model.to(self.device)
self.criterion.to(self.device)
for epoch in range(self.start_epoch, self.max_epoch):
# set training mode during the training process
self.model.train()
self.epoch = epoch
# self.LR_policy.step() # for cosine learning strategy
for i_batch, sample_batch in enumerate(self.train_loader):
X_data1 = sample_batch["data1"].float().to(self.device) # 是否加入进化信息需要进行修改
X_data2 = sample_batch["data2"].float().to(self.device)
X_data3 = sample_batch["data3"].float().to(self.device)
label = sample_batch["label"].float().to(self.device)
self.optimizer.zero_grad()
label_p = self.model(X_data1, X_data2, X_data3)
loss = self.criterion(label_p, label)
if np.isnan(loss.item()):
raise ValueError('loss is nan while training')
loss.backward()
self.optimizer.step()
print("epoch/i_batch: {}/{}---loss: {:.4f}---lr: {:.5f}".format(self.epoch, i_batch,
loss.item(),
self.optimizer.param_groups[0]['lr']))
# validation and save the model with higher accuracy
self.test()
return self.auc_best, self.prauc_best, self.state_best
def test(self):
"""validate the performance of the trained model."""
self.model.eval()
label_p_all = []
label_t_all = []
for i_batch, sample_batch in enumerate(self.test_loader):
X_data1 = sample_batch["data1"].float().to(self.device)
X_data2 = sample_batch["data2"].float().to(self.device)
X_data3 = sample_batch["data3"].float().to(self.device)
label = sample_batch["label"].float().to(self.device)
with torch.no_grad():
label_p = self.model(X_data1, X_data2, X_data3)
label_p_all.append(label_p.view(-1).data.cpu().numpy()[0])
label_t_all.append(label.view(-1).data.cpu().numpy()[0])
auc = roc_auc_score(label_t_all, label_p_all)
prauc = average_precision_score(label_t_all, label_p_all)
if (self.prauc_best + self.auc_best) < (prauc + auc):
self.auc_best = auc
self.prauc_best = prauc
self.state_best = self.model.state_dict()
print("auc: {:.3f}\tprauc: {:.3f}\n".format(auc, prauc)) | en | 0.809954 | build a trainer training the model # set training mode during the training process # self.LR_policy.step() # for cosine learning strategy # 是否加入进化信息需要进行修改 # validation and save the model with higher accuracy validate the performance of the trained model. build a trainer training the model # set training mode during the training process # self.LR_policy.step() # for cosine learning strategy # 是否加入进化信息需要进行修改 # validation and save the model with higher accuracy validate the performance of the trained model. build a trainer training the model # set training mode during the training process # self.LR_policy.step() # for cosine learning strategy # 是否加入进化信息需要进行修改 # validation and save the model with higher accuracy validate the performance of the trained model. | 2.386212 | 2 |
pysrc/torchfcts.py | juliusbierk/simultant | 0 | 6620913 | <reponame>juliusbierk/simultant
import inspect
import time
import traceback
import torch
from silly import sillyode
from torch import sin, cos, exp, tensor, sqrt, asin, acos, ones, zeros, linspace, logspace, arange, \
eye, zeros_like, ones_like, heaviside, cat, hstack, vstack, gather, nonzero, reshape, squeeze, take, \
transpose, unsqueeze, abs, cosh, sinh, tan, tanh, asinh, acosh, atanh, ceil, clamp, erf, erfc, \
floor, log, lgamma, log10, logical_and, logical_not, logical_or, logical_xor, pow, round, sigmoid, \
argmin, argmax, amin, amax, min, max, mean, mode, median, sum, prod, std, unique, var, isinf, isnan, \
isfinite, fft, rfft, ifft, cross, cumsum, cumprod, diag, flatten, roll, dot, det, solve, trapz, empty, \
empty_like
import logging
import typing
import inspect
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
torchfcts = {"sin": sin, "cos": cos, "exp": exp, "tensor": tensor, "sqrt": sqrt, "asin": asin,
"acos": acos, "ones": ones, "zeros": zeros, "linspace": linspace, "logspace": logspace, "arange": arange,
"eye": eye, "zeros_like": zeros_like, "ones_like": ones_like, "heaviside": heaviside,
"cat": cat, "hstack": hstack, "vstack": vstack, "gather": gather, "nonzero": nonzero, "reshape": reshape,
"squeeze": squeeze, "take": take,
"transpose": transpose, "unsqueeze": unsqueeze, "abs": abs, "cosh": cosh, "sinh": sinh,
"tan": tan, "tanh": tanh, "asinh": asinh, "acosh": acosh, "atanh": atanh, "ceil": ceil, "clamp": clamp,
"erf": erf, "erfc": erfc,
"floor": floor, "log": log, "lgamma": lgamma, "log10": log10, "logical_and": logical_and,
"logical_not": logical_not, "logical_or": logical_or, "logical_xor": logical_xor, "pow": pow,
"round": round, "sigmoid": sigmoid,
"argmin": argmin, "argmax": argmax, "amin": amin, "amax": amax, "min": min, "max": max,
"mean": mean, "mode": mode, "median": median, "sum": sum, "prod": prod, "std": std, "unique": unique,
"var": var, "isinf": isinf, "isnan": isnan,
"isfinite": isfinite, "fft": fft, "ifft": ifft, "rfft": rfft, "cross": cross, "cumsum": cumsum,
"cumprod": cumprod, "diag": diag, "flatten": flatten, "roll": roll, "dot": dot, "det": det, "solve": solve,
"trapz": trapz, "empty": empty, "empty_like": empty_like}
class RangeType:
start = None
stop = None
def __init__(self, const=False):
self.const = const
def __call__(self, i):
return typing.NewType(f'{i}', typing.Any)
def __getitem__(self, sli):
start = sli.start
stop = sli.stop
t = typing.NewType(f'{start}-{stop}', typing.Any)
t.start = start
t.stop = stop
t.const = self.const
return t
R = RangeType()
C = RangeType(const=True)
def check_function_run(f, kwargs, expr=True, ode_dim=None, ode_dim_select=None):
tensor_kwargs = {}
for k in kwargs:
tensor_kwargs[k] = torch.tensor(kwargs[k], dtype=torch.double)
error = None
if expr:
try:
r = f(x=torch.tensor([1.0]), **tensor_kwargs)
if hasattr(r, '__len__') and len(r) != 1:
error = 'Output is not one-dimensional.'
except Exception as e:
logger.debug('Could not get arguments', exc_info=e)
error = str(e).replace('<string>, ', '')
else:
try:
r = f(x=torch.tensor([1.0], dtype=torch.double),
y=torch.tensor(kwargs['y0'], dtype=torch.double),
**tensor_kwargs)
if ode_dim > 1 and len(r) != ode_dim:
error = f'Output of function does not have required dimension ({ode_dim})'
if not (0 <= ode_dim_select <= len(r) - 1):
error = 'Invalid selected output dimension'
elif ode_dim == 1 and ode_dim_select != 0:
error = 'Selected dimension must be zero for ODE of dimension one. '
except Exception as e:
logger.debug('Could not run function', exc_info=e)
error = str(e).replace('<string>, ', '')
return error
def check_code_get_args(code, f_name, expr_mode, ode_dim, ode_dim_select):
try:
f = function_from_code(code, f_name)
except Exception as e:
logger.debug('Could not form function', exc_info=e)
error = str(e).replace('<string>, ', '')
error += '\n\n' + "\n".join(traceback.format_exc(limit=0).split('\n')[1:-2])
return {'error': error}
try:
kwargs = get_default_args(f, expr_mode, ode_dim)
except Exception as e:
logger.debug('Could not get arguments', exc_info=e)
error = 'Could not extract arguments:\n' + str(e)
return {'error': error}
if not expr_mode and len(kwargs['y0']) != ode_dim:
return {'error': 'y0 must be a list of length equal to the ODE dimension.'}
error_on_run = check_function_run(f, kwargs, expr=expr_mode, ode_dim=ode_dim,
ode_dim_select=ode_dim_select)
bounds = get_bounds(f)
args = [{'name': k, 'value': v, 'lower': bounds[k][0], 'upper': bounds[k][1]} for k, v in kwargs.items()]
return {'error': error_on_run, 'args': args}
def get_default_args(func, expr, dim=1):
signature = inspect.signature(func)
kwargs = {
k: v.default if v.default is not inspect.Parameter.empty else (1 if expr else (1 if k != 'y0' else [1] * dim))
for k, v in signature.parameters.items()
}
del kwargs['x']
if not expr:
del kwargs['y']
return kwargs
def get_bounds(func):
signature = inspect.signature(func)
bounds = {
k: [0, None] if v.annotation is inspect.Parameter.empty else [v.annotation.start, v.annotation.stop]
for k, v in signature.parameters.items()
}
return bounds
def get_const_bools(func):
signature = inspect.signature(func)
consts = {
k: False if v.annotation is inspect.Parameter.empty else v.annotation.const
for k, v in signature.parameters.items()
}
return consts
def function_from_code(code, f_name):
d = {'R': R, 'C': C}
exec(code, torchfcts, d)
f: typing.Callable = d[f_name]
f._transform = d.get('_transform')
f._event = d.get('_event')
f._bounds = get_bounds(f)
return f
class TimeoutError(Exception):
pass
def ode_from_code(code, f_name, ode_dim_select, timeout=30):
atol = 5e-7
rtol = 1e-5
f = function_from_code(code, f_name)
def ode_f(x, **kwargs):
x_orig = x
mask = torch.logical_and(x >= 0, torch.isfinite(x))
x = x[mask]
added_zero = False
if x[0] != 0:
added_zero = True
x = torch.hstack((torch.tensor(0, dtype=x.dtype), x))
start_time = time.time()
def curied(x, y):
if time.time() - start_time > timeout:
logger.warning('ODE Solving timed out')
raise TimeoutError
r = f(x, y, **kwargs)
if isinstance(r, torch.Tensor):
r = (r, )
return torch.hstack(r)
if f._event is None:
sol = sillyode(curied, kwargs['y0'], x, atol=atol, rtol=rtol)
if f._transform is None:
sol = sol[:, ode_dim_select]
else:
sol = f._transform(x, sol.t(), **kwargs)
else:
def curied_event(x, y):
return f._event(x, y, **kwargs)
curied_event.direction = f._event.direction if hasattr(f._event, 'direction') else 0
curied_event.X_factor = f._event.X_factor if hasattr(f._event, 'X_factor') else 1
sol, event_x, event_y = sillyode(curied, kwargs['y0'], x, atol=atol, rtol=rtol, event=curied_event)
sol = f._transform(x, sol.t(), event_x, event_y, **kwargs)
if added_zero:
sol = sol[1:]
res = torch.tensor(float('nan')) * torch.zeros_like(x_orig)
res[mask] = sol
return res
ode_f._bounds = f._bounds
return ode_f
def get_f_expr_or_ode(code, expr_mode, f_name, ode_dim_select):
f = function_from_code(code, f_name)
if not expr_mode:
f = ode_from_code(code, f_name, ode_dim_select)
return f | import inspect
import time
import traceback
import torch
from silly import sillyode
from torch import sin, cos, exp, tensor, sqrt, asin, acos, ones, zeros, linspace, logspace, arange, \
eye, zeros_like, ones_like, heaviside, cat, hstack, vstack, gather, nonzero, reshape, squeeze, take, \
transpose, unsqueeze, abs, cosh, sinh, tan, tanh, asinh, acosh, atanh, ceil, clamp, erf, erfc, \
floor, log, lgamma, log10, logical_and, logical_not, logical_or, logical_xor, pow, round, sigmoid, \
argmin, argmax, amin, amax, min, max, mean, mode, median, sum, prod, std, unique, var, isinf, isnan, \
isfinite, fft, rfft, ifft, cross, cumsum, cumprod, diag, flatten, roll, dot, det, solve, trapz, empty, \
empty_like
import logging
import typing
import inspect
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
torchfcts = {"sin": sin, "cos": cos, "exp": exp, "tensor": tensor, "sqrt": sqrt, "asin": asin,
"acos": acos, "ones": ones, "zeros": zeros, "linspace": linspace, "logspace": logspace, "arange": arange,
"eye": eye, "zeros_like": zeros_like, "ones_like": ones_like, "heaviside": heaviside,
"cat": cat, "hstack": hstack, "vstack": vstack, "gather": gather, "nonzero": nonzero, "reshape": reshape,
"squeeze": squeeze, "take": take,
"transpose": transpose, "unsqueeze": unsqueeze, "abs": abs, "cosh": cosh, "sinh": sinh,
"tan": tan, "tanh": tanh, "asinh": asinh, "acosh": acosh, "atanh": atanh, "ceil": ceil, "clamp": clamp,
"erf": erf, "erfc": erfc,
"floor": floor, "log": log, "lgamma": lgamma, "log10": log10, "logical_and": logical_and,
"logical_not": logical_not, "logical_or": logical_or, "logical_xor": logical_xor, "pow": pow,
"round": round, "sigmoid": sigmoid,
"argmin": argmin, "argmax": argmax, "amin": amin, "amax": amax, "min": min, "max": max,
"mean": mean, "mode": mode, "median": median, "sum": sum, "prod": prod, "std": std, "unique": unique,
"var": var, "isinf": isinf, "isnan": isnan,
"isfinite": isfinite, "fft": fft, "ifft": ifft, "rfft": rfft, "cross": cross, "cumsum": cumsum,
"cumprod": cumprod, "diag": diag, "flatten": flatten, "roll": roll, "dot": dot, "det": det, "solve": solve,
"trapz": trapz, "empty": empty, "empty_like": empty_like}
class RangeType:
start = None
stop = None
def __init__(self, const=False):
self.const = const
def __call__(self, i):
return typing.NewType(f'{i}', typing.Any)
def __getitem__(self, sli):
start = sli.start
stop = sli.stop
t = typing.NewType(f'{start}-{stop}', typing.Any)
t.start = start
t.stop = stop
t.const = self.const
return t
R = RangeType()
C = RangeType(const=True)
def check_function_run(f, kwargs, expr=True, ode_dim=None, ode_dim_select=None):
tensor_kwargs = {}
for k in kwargs:
tensor_kwargs[k] = torch.tensor(kwargs[k], dtype=torch.double)
error = None
if expr:
try:
r = f(x=torch.tensor([1.0]), **tensor_kwargs)
if hasattr(r, '__len__') and len(r) != 1:
error = 'Output is not one-dimensional.'
except Exception as e:
logger.debug('Could not get arguments', exc_info=e)
error = str(e).replace('<string>, ', '')
else:
try:
r = f(x=torch.tensor([1.0], dtype=torch.double),
y=torch.tensor(kwargs['y0'], dtype=torch.double),
**tensor_kwargs)
if ode_dim > 1 and len(r) != ode_dim:
error = f'Output of function does not have required dimension ({ode_dim})'
if not (0 <= ode_dim_select <= len(r) - 1):
error = 'Invalid selected output dimension'
elif ode_dim == 1 and ode_dim_select != 0:
error = 'Selected dimension must be zero for ODE of dimension one. '
except Exception as e:
logger.debug('Could not run function', exc_info=e)
error = str(e).replace('<string>, ', '')
return error
def check_code_get_args(code, f_name, expr_mode, ode_dim, ode_dim_select):
try:
f = function_from_code(code, f_name)
except Exception as e:
logger.debug('Could not form function', exc_info=e)
error = str(e).replace('<string>, ', '')
error += '\n\n' + "\n".join(traceback.format_exc(limit=0).split('\n')[1:-2])
return {'error': error}
try:
kwargs = get_default_args(f, expr_mode, ode_dim)
except Exception as e:
logger.debug('Could not get arguments', exc_info=e)
error = 'Could not extract arguments:\n' + str(e)
return {'error': error}
if not expr_mode and len(kwargs['y0']) != ode_dim:
return {'error': 'y0 must be a list of length equal to the ODE dimension.'}
error_on_run = check_function_run(f, kwargs, expr=expr_mode, ode_dim=ode_dim,
ode_dim_select=ode_dim_select)
bounds = get_bounds(f)
args = [{'name': k, 'value': v, 'lower': bounds[k][0], 'upper': bounds[k][1]} for k, v in kwargs.items()]
return {'error': error_on_run, 'args': args}
def get_default_args(func, expr, dim=1):
signature = inspect.signature(func)
kwargs = {
k: v.default if v.default is not inspect.Parameter.empty else (1 if expr else (1 if k != 'y0' else [1] * dim))
for k, v in signature.parameters.items()
}
del kwargs['x']
if not expr:
del kwargs['y']
return kwargs
def get_bounds(func):
signature = inspect.signature(func)
bounds = {
k: [0, None] if v.annotation is inspect.Parameter.empty else [v.annotation.start, v.annotation.stop]
for k, v in signature.parameters.items()
}
return bounds
def get_const_bools(func):
signature = inspect.signature(func)
consts = {
k: False if v.annotation is inspect.Parameter.empty else v.annotation.const
for k, v in signature.parameters.items()
}
return consts
def function_from_code(code, f_name):
d = {'R': R, 'C': C}
exec(code, torchfcts, d)
f: typing.Callable = d[f_name]
f._transform = d.get('_transform')
f._event = d.get('_event')
f._bounds = get_bounds(f)
return f
class TimeoutError(Exception):
pass
def ode_from_code(code, f_name, ode_dim_select, timeout=30):
atol = 5e-7
rtol = 1e-5
f = function_from_code(code, f_name)
def ode_f(x, **kwargs):
x_orig = x
mask = torch.logical_and(x >= 0, torch.isfinite(x))
x = x[mask]
added_zero = False
if x[0] != 0:
added_zero = True
x = torch.hstack((torch.tensor(0, dtype=x.dtype), x))
start_time = time.time()
def curied(x, y):
if time.time() - start_time > timeout:
logger.warning('ODE Solving timed out')
raise TimeoutError
r = f(x, y, **kwargs)
if isinstance(r, torch.Tensor):
r = (r, )
return torch.hstack(r)
if f._event is None:
sol = sillyode(curied, kwargs['y0'], x, atol=atol, rtol=rtol)
if f._transform is None:
sol = sol[:, ode_dim_select]
else:
sol = f._transform(x, sol.t(), **kwargs)
else:
def curied_event(x, y):
return f._event(x, y, **kwargs)
curied_event.direction = f._event.direction if hasattr(f._event, 'direction') else 0
curied_event.X_factor = f._event.X_factor if hasattr(f._event, 'X_factor') else 1
sol, event_x, event_y = sillyode(curied, kwargs['y0'], x, atol=atol, rtol=rtol, event=curied_event)
sol = f._transform(x, sol.t(), event_x, event_y, **kwargs)
if added_zero:
sol = sol[1:]
res = torch.tensor(float('nan')) * torch.zeros_like(x_orig)
res[mask] = sol
return res
ode_f._bounds = f._bounds
return ode_f
def get_f_expr_or_ode(code, expr_mode, f_name, ode_dim_select):
f = function_from_code(code, f_name)
if not expr_mode:
f = ode_from_code(code, f_name, ode_dim_select)
return f | none | 1 | 2.151812 | 2 | |
tensorflow/python/distribute/experimental/rpc/rpc_ops.py | neochristou/tensorflow | 4 | 6620914 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to expose RPC APIs in tensorflow."""
from typing import Any, Callable, Optional, Sequence, Union
import tensorflow.distribute.experimental.rpc.kernels.gen_rpc_ops as gen_rpc_ops
from tensorflow.distribute.experimental.rpc.proto import tf_rpc_service_pb2 as rpc_pb2
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as tf_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.types import core as core_tf_types
from tensorflow.python.util import nest
def get_output_specs_from_function(func: tf_function.ConcreteFunction):
output_specs = nest.map_structure(type_spec.type_spec_from_value,
func.structured_outputs)
encoder = nested_structure_coder.StructureCoder()
output_specs_proto = encoder.encode_structure(output_specs)
return output_specs_proto.SerializeToString()
def get_input_specs_from_function(func: tf_function.ConcreteFunction):
arg_specs, _ = func.structured_input_signature
encoder = nested_structure_coder.StructureCoder()
arg_specs_proto = encoder.encode_structure(arg_specs)
return arg_specs_proto.SerializeToString()
class Server(object):
"""Server object encapsulates a resource with GRPC server.
Functions can be registered locally and are exposed via RPCs.
Example:
```
server = rpc_ops.Server("host:port")
@tf.function
def add(a, b):
return a + b
server.register("add", add)
server.start()
```
"""
def __init__(self, address: str):
self._server_handle = gen_rpc_ops.rpc_server(address)
if context.executing_eagerly():
self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._server_handle, handle_device=self._server_handle.device)
else:
raise NotImplementedError("Please create the server outside tf.function.")
def register(self, method_name: str,
func: Union[def_function.Function, tf_function.ConcreteFunction,
Callable[..., Any]]):
"""Method for registering functions."""
if isinstance(func, def_function.Function):
if func._function_spec.arg_names: # pylint: disable=protected-access
if func.input_signature is None:
raise ValueError("Input signature not specified for the function.")
concrete_fn = func.get_concrete_function()
gen_rpc_ops.rpc_server_register(
self._server_handle,
method_name=method_name,
captured_inputs=concrete_fn.captured_inputs,
input_specs=get_input_specs_from_function(concrete_fn),
output_specs=get_output_specs_from_function(concrete_fn),
f=concrete_fn)
elif isinstance(func, tf_function.ConcreteFunction):
gen_rpc_ops.rpc_server_register(
self._server_handle,
method_name=method_name,
captured_inputs=func.captured_inputs,
input_specs=get_input_specs_from_function(concrete_fn),
output_specs=get_output_specs_from_function(func),
f=func)
else:
# Python functions
# TODO(b/186762191): Add an implementation to support python functions.
raise ValueError("Only TF functions are supported with Register method")
def start(self):
"""Starts GRPC server."""
gen_rpc_ops.rpc_server_start(self._server_handle)
class Client():
"""Client wrapper to connect to remote RPC server.
If Client is created with (list_registered_methods=True):
1. Input and output specs for the methods till this point will be fetched from
Server.
2. convenience methods are added to invoke registered methods directly from
client.
For example:
For call a server method `add`
client.add(a, b) or client.add_async(a, b) can be used instead of
client.call(args=[a,b], output_specs=[..])
Prerequiste for using list_registered_methods=True:
1. Server should be already started with the registered methods.
2. Client must be created in Eager mode.
"""
def __init__(self,
address: str,
name: str = "",
list_registered_methods=False,
timeout_in_ms=0):
self._client_handle, methods = gen_rpc_ops.rpc_client(
shared_name=name,
server_address=address,
list_registered_methods=list_registered_methods,
timeout_in_ms=timeout_in_ms)
if context.executing_eagerly():
self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._client_handle, handle_device=self._client_handle.device)
else:
raise NotImplementedError(
"Client creation is supported only in eager mode.")
self._server_address = address
decoder = nested_structure_coder.StructureCoder()
self._method_registry = {}
for method in methods.numpy():
m = rpc_pb2.RegisteredMethod()
m.ParseFromString(method)
output_specs = decoder.decode_proto(m.output_specs)
input_specs = decoder.decode_proto(m.input_specs)
self._method_registry[m.method] = output_specs
# TODO(ishark): Perhaps doc string can also be taken as input during
# function registration.
doc_string = "RPC Call for " + m.method + " method to server " + address
self._add_method(m.method, output_specs, input_specs, self._client_handle,
doc_string)
def _add_method(self, method_name, output_specs, input_specs, client_handle,
doc_string):
"""Method to add RPC methods to the client object."""
def validate_and_get_flat_inputs(*args):
if args is None:
args = []
if input_specs:
nest.assert_same_structure(args, input_specs)
flat_inputs = nest.flatten(args)
return flat_inputs
def call_wrapper(*args):
status_or, deleter = gen_rpc_ops.rpc_call(
client_handle,
args=validate_and_get_flat_inputs(*args),
method_name=method_name)
return StatusOrResult(status_or, deleter, output_specs)
setattr(self, method_name, call_wrapper)
setattr(getattr(self, method_name), "__doc__", doc_string)
def call(self,
method_name: str,
args: Optional[Sequence[core_tf_types.Tensor]] = None,
output_specs=None):
"""Method to invoke remote registered functions on the connected server.
Server should be started before making an RPC Call.
Args:
method_name: Registered method to invoke on Server.
args: Input arguments for the method.
output_specs: Output specs for the output from method.
For example, if tf function is:
@tf.function(input_signature=[
tensor_spec.TensorSpec([], tf.int32),
tensor_spec.TensorSpec([], tf.int32)
])
def multiply_fn(a, b):
return tf.math.multiply(a, b)
output_spec is: tf.TensorSpec((), tf.int32)
If you have access to TF Function, the output specs can be generated
from tf.function by calling:
output_specs = tf.nest.map_structure(tf.type_spec_from_value,
tf_function.get_concrete_function().structured_outputs)
Returns:
StatusOrResult object. This function issues the RPC call to server, it
does not block for the duration of RPC. Please call is_ok, get_error or
get_value methods on the returned object to blocked till RPC finishes.
"""
if args is None:
args = []
status_or, deleter = gen_rpc_ops.rpc_call(
self._client_handle, args=nest.flatten(args), method_name=method_name)
return StatusOrResult(status_or, deleter, output_specs)
class StatusOrResult(object):
"""Class representing result and status from RPC Call."""
def __init__(self, status_or, deleter, output_specs=None):
self._status_or = status_or
self._output_specs = output_specs
self._deleter = deleter
self._error_code, self._error_message = None, None
def _check_status(self):
if self._error_code is None:
self._error_code, self._error_message = gen_rpc_ops.rpc_check_status(
self._status_or)
def __del__(self):
# Make sure the resource is deleted in the same mode as it was created in.
if context.executing_eagerly():
with context.eager_mode():
gen_rpc_ops.delete_rpc_future_resource(
handle=self._status_or, deleter=self._deleter)
else:
with context.graph_mode():
gen_rpc_ops.delete_rpc_future_resource(
handle=self._status_or, deleter=self._deleter)
def is_ok(self):
self._check_status()
return math_ops.equal(self._error_code,
constant_op.constant(0, dtype=dtypes.int64))
def get_error(self):
self._check_status()
return self._error_code, self._error_message
def get_value(self):
"""output_specs: Output specs for the output from method.
For example, if tf function is:
@tf.function(input_signature=[
tensor_spec.TensorSpec([], tf.int32),
tensor_spec.TensorSpec([], tf.int32)
])
def multiply_fn(a, b):
return tf.math.multiply(a, b)
output_spec is: tf.TensorSpec((), tf.int32)
If you have access to TF Function, the output specs can be generated
from tf.function by calling:
output_specs = tf.nest.map_structure(tf.type_spec_from_value,
tf_function.get_concrete_function().structured_outputs)
Returns:
Output of the RPC call.
"""
self._check_status()
if self._output_specs is None or isinstance(self._output_specs,
structure.NoneTensorSpec):
flat_output_dtypes = []
return_none = True
else:
return_none = False
flat_output_dtypes = [s.dtype for s in nest.flatten(self._output_specs)]
result = gen_rpc_ops.rpc_get_value(self._status_or, Tout=flat_output_dtypes)
if return_none:
return None
else:
return nest.pack_sequence_as(self._output_specs, result)
| # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to expose RPC APIs in tensorflow."""
from typing import Any, Callable, Optional, Sequence, Union
import tensorflow.distribute.experimental.rpc.kernels.gen_rpc_ops as gen_rpc_ops
from tensorflow.distribute.experimental.rpc.proto import tf_rpc_service_pb2 as rpc_pb2
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as tf_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.types import core as core_tf_types
from tensorflow.python.util import nest
def get_output_specs_from_function(func: tf_function.ConcreteFunction):
output_specs = nest.map_structure(type_spec.type_spec_from_value,
func.structured_outputs)
encoder = nested_structure_coder.StructureCoder()
output_specs_proto = encoder.encode_structure(output_specs)
return output_specs_proto.SerializeToString()
def get_input_specs_from_function(func: tf_function.ConcreteFunction):
arg_specs, _ = func.structured_input_signature
encoder = nested_structure_coder.StructureCoder()
arg_specs_proto = encoder.encode_structure(arg_specs)
return arg_specs_proto.SerializeToString()
class Server(object):
"""Server object encapsulates a resource with GRPC server.
Functions can be registered locally and are exposed via RPCs.
Example:
```
server = rpc_ops.Server("host:port")
@tf.function
def add(a, b):
return a + b
server.register("add", add)
server.start()
```
"""
def __init__(self, address: str):
self._server_handle = gen_rpc_ops.rpc_server(address)
if context.executing_eagerly():
self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._server_handle, handle_device=self._server_handle.device)
else:
raise NotImplementedError("Please create the server outside tf.function.")
def register(self, method_name: str,
func: Union[def_function.Function, tf_function.ConcreteFunction,
Callable[..., Any]]):
"""Method for registering functions."""
if isinstance(func, def_function.Function):
if func._function_spec.arg_names: # pylint: disable=protected-access
if func.input_signature is None:
raise ValueError("Input signature not specified for the function.")
concrete_fn = func.get_concrete_function()
gen_rpc_ops.rpc_server_register(
self._server_handle,
method_name=method_name,
captured_inputs=concrete_fn.captured_inputs,
input_specs=get_input_specs_from_function(concrete_fn),
output_specs=get_output_specs_from_function(concrete_fn),
f=concrete_fn)
elif isinstance(func, tf_function.ConcreteFunction):
gen_rpc_ops.rpc_server_register(
self._server_handle,
method_name=method_name,
captured_inputs=func.captured_inputs,
input_specs=get_input_specs_from_function(concrete_fn),
output_specs=get_output_specs_from_function(func),
f=func)
else:
# Python functions
# TODO(b/186762191): Add an implementation to support python functions.
raise ValueError("Only TF functions are supported with Register method")
def start(self):
"""Starts GRPC server."""
gen_rpc_ops.rpc_server_start(self._server_handle)
class Client():
"""Client wrapper to connect to remote RPC server.
If Client is created with (list_registered_methods=True):
1. Input and output specs for the methods till this point will be fetched from
Server.
2. convenience methods are added to invoke registered methods directly from
client.
For example:
For call a server method `add`
client.add(a, b) or client.add_async(a, b) can be used instead of
client.call(args=[a,b], output_specs=[..])
Prerequiste for using list_registered_methods=True:
1. Server should be already started with the registered methods.
2. Client must be created in Eager mode.
"""
def __init__(self,
address: str,
name: str = "",
list_registered_methods=False,
timeout_in_ms=0):
self._client_handle, methods = gen_rpc_ops.rpc_client(
shared_name=name,
server_address=address,
list_registered_methods=list_registered_methods,
timeout_in_ms=timeout_in_ms)
if context.executing_eagerly():
self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._client_handle, handle_device=self._client_handle.device)
else:
raise NotImplementedError(
"Client creation is supported only in eager mode.")
self._server_address = address
decoder = nested_structure_coder.StructureCoder()
self._method_registry = {}
for method in methods.numpy():
m = rpc_pb2.RegisteredMethod()
m.ParseFromString(method)
output_specs = decoder.decode_proto(m.output_specs)
input_specs = decoder.decode_proto(m.input_specs)
self._method_registry[m.method] = output_specs
# TODO(ishark): Perhaps doc string can also be taken as input during
# function registration.
doc_string = "RPC Call for " + m.method + " method to server " + address
self._add_method(m.method, output_specs, input_specs, self._client_handle,
doc_string)
def _add_method(self, method_name, output_specs, input_specs, client_handle,
doc_string):
"""Method to add RPC methods to the client object."""
def validate_and_get_flat_inputs(*args):
if args is None:
args = []
if input_specs:
nest.assert_same_structure(args, input_specs)
flat_inputs = nest.flatten(args)
return flat_inputs
def call_wrapper(*args):
status_or, deleter = gen_rpc_ops.rpc_call(
client_handle,
args=validate_and_get_flat_inputs(*args),
method_name=method_name)
return StatusOrResult(status_or, deleter, output_specs)
setattr(self, method_name, call_wrapper)
setattr(getattr(self, method_name), "__doc__", doc_string)
def call(self,
method_name: str,
args: Optional[Sequence[core_tf_types.Tensor]] = None,
output_specs=None):
"""Method to invoke remote registered functions on the connected server.
Server should be started before making an RPC Call.
Args:
method_name: Registered method to invoke on Server.
args: Input arguments for the method.
output_specs: Output specs for the output from method.
For example, if tf function is:
@tf.function(input_signature=[
tensor_spec.TensorSpec([], tf.int32),
tensor_spec.TensorSpec([], tf.int32)
])
def multiply_fn(a, b):
return tf.math.multiply(a, b)
output_spec is: tf.TensorSpec((), tf.int32)
If you have access to TF Function, the output specs can be generated
from tf.function by calling:
output_specs = tf.nest.map_structure(tf.type_spec_from_value,
tf_function.get_concrete_function().structured_outputs)
Returns:
StatusOrResult object. This function issues the RPC call to server, it
does not block for the duration of RPC. Please call is_ok, get_error or
get_value methods on the returned object to blocked till RPC finishes.
"""
if args is None:
args = []
status_or, deleter = gen_rpc_ops.rpc_call(
self._client_handle, args=nest.flatten(args), method_name=method_name)
return StatusOrResult(status_or, deleter, output_specs)
class StatusOrResult(object):
"""Class representing result and status from RPC Call."""
def __init__(self, status_or, deleter, output_specs=None):
self._status_or = status_or
self._output_specs = output_specs
self._deleter = deleter
self._error_code, self._error_message = None, None
def _check_status(self):
if self._error_code is None:
self._error_code, self._error_message = gen_rpc_ops.rpc_check_status(
self._status_or)
def __del__(self):
# Make sure the resource is deleted in the same mode as it was created in.
if context.executing_eagerly():
with context.eager_mode():
gen_rpc_ops.delete_rpc_future_resource(
handle=self._status_or, deleter=self._deleter)
else:
with context.graph_mode():
gen_rpc_ops.delete_rpc_future_resource(
handle=self._status_or, deleter=self._deleter)
def is_ok(self):
self._check_status()
return math_ops.equal(self._error_code,
constant_op.constant(0, dtype=dtypes.int64))
def get_error(self):
self._check_status()
return self._error_code, self._error_message
def get_value(self):
"""output_specs: Output specs for the output from method.
For example, if tf function is:
@tf.function(input_signature=[
tensor_spec.TensorSpec([], tf.int32),
tensor_spec.TensorSpec([], tf.int32)
])
def multiply_fn(a, b):
return tf.math.multiply(a, b)
output_spec is: tf.TensorSpec((), tf.int32)
If you have access to TF Function, the output specs can be generated
from tf.function by calling:
output_specs = tf.nest.map_structure(tf.type_spec_from_value,
tf_function.get_concrete_function().structured_outputs)
Returns:
Output of the RPC call.
"""
self._check_status()
if self._output_specs is None or isinstance(self._output_specs,
structure.NoneTensorSpec):
flat_output_dtypes = []
return_none = True
else:
return_none = False
flat_output_dtypes = [s.dtype for s in nest.flatten(self._output_specs)]
result = gen_rpc_ops.rpc_get_value(self._status_or, Tout=flat_output_dtypes)
if return_none:
return None
else:
return nest.pack_sequence_as(self._output_specs, result)
| en | 0.683424 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Module to expose RPC APIs in tensorflow. Server object encapsulates a resource with GRPC server. Functions can be registered locally and are exposed via RPCs. Example: ``` server = rpc_ops.Server("host:port") @tf.function def add(a, b): return a + b server.register("add", add) server.start() ``` Method for registering functions. # pylint: disable=protected-access # Python functions # TODO(b/186762191): Add an implementation to support python functions. Starts GRPC server. Client wrapper to connect to remote RPC server. If Client is created with (list_registered_methods=True): 1. Input and output specs for the methods till this point will be fetched from Server. 2. convenience methods are added to invoke registered methods directly from client. For example: For call a server method `add` client.add(a, b) or client.add_async(a, b) can be used instead of client.call(args=[a,b], output_specs=[..]) Prerequiste for using list_registered_methods=True: 1. Server should be already started with the registered methods. 2. Client must be created in Eager mode. # TODO(ishark): Perhaps doc string can also be taken as input during # function registration. Method to add RPC methods to the client object. Method to invoke remote registered functions on the connected server. Server should be started before making an RPC Call. Args: method_name: Registered method to invoke on Server. args: Input arguments for the method. output_specs: Output specs for the output from method. For example, if tf function is: @tf.function(input_signature=[ tensor_spec.TensorSpec([], tf.int32), tensor_spec.TensorSpec([], tf.int32) ]) def multiply_fn(a, b): return tf.math.multiply(a, b) output_spec is: tf.TensorSpec((), tf.int32) If you have access to TF Function, the output specs can be generated from tf.function by calling: output_specs = tf.nest.map_structure(tf.type_spec_from_value, tf_function.get_concrete_function().structured_outputs) Returns: StatusOrResult object. This function issues the RPC call to server, it does not block for the duration of RPC. Please call is_ok, get_error or get_value methods on the returned object to blocked till RPC finishes. Class representing result and status from RPC Call. # Make sure the resource is deleted in the same mode as it was created in. output_specs: Output specs for the output from method. For example, if tf function is: @tf.function(input_signature=[ tensor_spec.TensorSpec([], tf.int32), tensor_spec.TensorSpec([], tf.int32) ]) def multiply_fn(a, b): return tf.math.multiply(a, b) output_spec is: tf.TensorSpec((), tf.int32) If you have access to TF Function, the output specs can be generated from tf.function by calling: output_specs = tf.nest.map_structure(tf.type_spec_from_value, tf_function.get_concrete_function().structured_outputs) Returns: Output of the RPC call. | 1.703 | 2 |
tutorials/walkthrough/mnist_DPriv_mt_scratch.py | Mtroglia/privacy | 0 | 6620915 | <reponame>Mtroglia/privacy<filename>tutorials/walkthrough/mnist_DPriv_mt_scratch.py
# Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scratchpad for training a CNN on MNIST with DPSGD."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(0, '/Users/trog/Documents/Spyder/privacy')
import numpy as np
import tensorflow as tf
from privacy.optimizers import dp_optimizer
learning_rate=0.25
noise_multiplier=1.3
l2_norm_clip=1.5
batch_size=256
epochs=2
num_microbatches=256
try:
tf.flags.DEFINE_float('learning_rate', learning_rate, 'Learning rate for training')
tf.flags.DEFINE_integer('batch_size', batch_size, 'Batch size')
tf.flags.DEFINE_integer('epochs', epochs, 'Number of epochs')
tf.flags.DEFINE_float("noise_multiplier",noise_multiplier, "Noise Mult")
tf.flags.DEFINE_integer("num_microbatches",num_microbatches, "num microbatches")
tf.flags.DEFINE_float("l2_norm_clip" , l2_norm_clip, "L2 clipping ")
except :
print("Duplicate Flags Skip...")
FLAGS = tf.flags.FLAGS
class EpsilonPrintingTrainingHook(tf.estimator.SessionRunHook):
"""Training hook to print current value of epsilon after an epoch."""
def __init__(self, ledger):
"""Initalizes the EpsilonPrintingTrainingHook.
Args:
ledger: The privacy ledger.
"""
self._samples, self._queries = ledger.get_unformatted_ledger()
def end(self, session):
orders = [1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64))
samples = session.run(self._samples)
queries = session.run(self._queries)
formatted_ledger = privacy_ledger.format_ledger(samples, queries)
rdp = compute_rdp_from_ledger(formatted_ledger, orders)
eps = get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
print('For delta=1e-5, the current epsilon is: %.2f' % eps)
def cnn_model_fn(features, labels, mode):
"""Model function for a CNN."""
# Define CNN architecture using tf.keras.layers.
input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
y = tf.keras.layers.Conv2D(16, 8,
strides=2,
padding='same',
activation='relu').apply(input_layer)
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
y = tf.keras.layers.Conv2D(32, 4,
strides=2,
padding='valid',
activation='relu').apply(y)
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
y = tf.keras.layers.Flatten().apply(y)
y = tf.keras.layers.Dense(32, activation='relu').apply(y)
logits = tf.keras.layers.Dense(10).apply(y)
# Calculate loss as a vector and as its average across minibatch.
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits)
scalar_loss = tf.reduce_mean(vector_loss)
# Configure the training op (for TRAIN mode).
if mode == tf.estimator.ModeKeys.TRAIN:
print("Train data mode")
#optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
optimizer=dp_optimizer.DPGradientDescentGaussianOptimizer(l2_norm_clip=FLAGS.l2_norm_clip,
noise_multiplier=FLAGS.noise_multiplier,
num_microbatches=FLAGS.num_microbatches,
learning_rate=FLAGS.learning_rate,
population_size=60000,
training_hooks = [EpsilonPrintingTrainingHook(ledger)])
opt_loss = scalar_loss
global_step = tf.train.get_global_step()
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
return( tf.estimator.EstimatorSpec(mode=mode,
loss=scalar_loss,
train_op=train_op))
# Add evaluation metrics (for EVAL mode).
elif mode == tf.estimator.ModeKeys.EVAL:
print("Evaluate data mode")
eval_metric_ops = {
'accuracy':
tf.metrics.accuracy(
labels=labels,
predictions=tf.argmax(input=logits, axis=1))
}
return (tf.estimator.EstimatorSpec(mode=mode,
loss=scalar_loss,
eval_metric_ops=eval_metric_ops))
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
print("predicting data mode")
predictions={
'class_ids': predicted_classes[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
predictions.values()
return(tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions))
def load_mnist():
"""Loads MNIST and preprocesses to combine training and validation data."""
train, test = tf.keras.datasets.mnist.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = np.array(train_data, dtype=np.float32) / 255
test_data = np.array(test_data, dtype=np.float32) / 255
train_labels = np.array(train_labels, dtype=np.int32)
test_labels = np.array(test_labels, dtype=np.int32)
assert train_data.min() == 0.
assert train_data.max() == 1.
assert test_data.min() == 0.
assert test_data.max() == 1.
assert train_labels.ndim == 1
assert test_labels.ndim == 1
return train_data, train_labels, test_data, test_labels
#
#
#def main(unused_argv):
# tf.logging.set_verbosity(tf.logging.INFO)
#
# # Load training and test data.
# train_data, train_labels, test_data, test_labels = load_mnist()
#
# # Instantiate the tf.Estimator.
# mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn)
#
# # Create tf.Estimator input functions for the training and test data.
# train_input_fn = tf.estimator.inputs.numpy_input_fn(
# x={'x': train_data},
# y=train_labels,
# batch_size=FLAGS.batch_size,
# num_epochs=FLAGS.epochs,
# shuffle=True)
# eval_input_fn = tf.estimator.inputs.numpy_input_fn(
# x={'x': test_data},
# y=test_labels,
# num_epochs=1,
# shuffle=False)
#
# # Training loop.
# steps_per_epoch = 60000 // FLAGS.batch_size
# for epoch in range(1, FLAGS.epochs + 1):
# # Train the model for one epoch.
# mnist_classifier.train(input_fn=train_input_fn, steps=steps_per_epoch)
#
# # Evaluate the model and print results
# eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
# test_accuracy = eval_results['accuracy']
# print('Test accuracy after %d epochs is: %.3f' % (epoch, test_accuracy))
tf.logging.set_verbosity(tf.logging.INFO)
# Load training and test data.
train_data, train_labels, test_data, test_labels = load_mnist()
# Instantiate the tf.Estimator.
mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn)
# Create tf.Estimator input functions for the training and test data.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.epochs,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': test_data},
y=test_labels,
num_epochs=1,
shuffle=False)
pred_input_fn = tf.estimator.inputs.numpy_input_fn( {'x':test_data[1]},shuffle=False,num_epochs=1)
# Training loop.
steps_per_epoch = 60000 // FLAGS.batch_size
for epoch in range(1, FLAGS.epochs + 1):
# Train the model for one epoch.
mnist_classifier.train(input_fn=train_input_fn, steps=steps_per_epoch)
# Evaluate the model and print results
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
test_accuracy = eval_results['accuracy']
print('Test accuracy after %d epochs is: %.3f' % (epoch, test_accuracy))
# predict_results = mnist_classifier.predict( input_fn=pred_input_fn)
#
# for result in predict_results:
# print ('result: {}'.format(result))
#if __name__ == '__main__':
#tf.app.run()
#labels = set(train_labels)
#predVal = mnist_classifier.predict(input_fn=pred_input_fn, predict_keys = labels)
#predict_results = mnist_classifier.predict( input_fn=lambda:eval_input_fn(pred_input_fn, None, 1))
#tf.logging.info("Predictions:") for pred_dict, expec in zip(predict_results, expected): template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
#class_id = pred_dict['class_ids'][0]
#probability = pred_dict['probabilities'][class_id]
#print(template.format(CLASSES[class_id],100 * probability, expec)) | # Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scratchpad for training a CNN on MNIST with DPSGD."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(0, '/Users/trog/Documents/Spyder/privacy')
import numpy as np
import tensorflow as tf
from privacy.optimizers import dp_optimizer
learning_rate=0.25
noise_multiplier=1.3
l2_norm_clip=1.5
batch_size=256
epochs=2
num_microbatches=256
try:
tf.flags.DEFINE_float('learning_rate', learning_rate, 'Learning rate for training')
tf.flags.DEFINE_integer('batch_size', batch_size, 'Batch size')
tf.flags.DEFINE_integer('epochs', epochs, 'Number of epochs')
tf.flags.DEFINE_float("noise_multiplier",noise_multiplier, "Noise Mult")
tf.flags.DEFINE_integer("num_microbatches",num_microbatches, "num microbatches")
tf.flags.DEFINE_float("l2_norm_clip" , l2_norm_clip, "L2 clipping ")
except :
print("Duplicate Flags Skip...")
FLAGS = tf.flags.FLAGS
class EpsilonPrintingTrainingHook(tf.estimator.SessionRunHook):
"""Training hook to print current value of epsilon after an epoch."""
def __init__(self, ledger):
"""Initalizes the EpsilonPrintingTrainingHook.
Args:
ledger: The privacy ledger.
"""
self._samples, self._queries = ledger.get_unformatted_ledger()
def end(self, session):
orders = [1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64))
samples = session.run(self._samples)
queries = session.run(self._queries)
formatted_ledger = privacy_ledger.format_ledger(samples, queries)
rdp = compute_rdp_from_ledger(formatted_ledger, orders)
eps = get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
print('For delta=1e-5, the current epsilon is: %.2f' % eps)
def cnn_model_fn(features, labels, mode):
"""Model function for a CNN."""
# Define CNN architecture using tf.keras.layers.
input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
y = tf.keras.layers.Conv2D(16, 8,
strides=2,
padding='same',
activation='relu').apply(input_layer)
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
y = tf.keras.layers.Conv2D(32, 4,
strides=2,
padding='valid',
activation='relu').apply(y)
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
y = tf.keras.layers.Flatten().apply(y)
y = tf.keras.layers.Dense(32, activation='relu').apply(y)
logits = tf.keras.layers.Dense(10).apply(y)
# Calculate loss as a vector and as its average across minibatch.
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits)
scalar_loss = tf.reduce_mean(vector_loss)
# Configure the training op (for TRAIN mode).
if mode == tf.estimator.ModeKeys.TRAIN:
print("Train data mode")
#optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
optimizer=dp_optimizer.DPGradientDescentGaussianOptimizer(l2_norm_clip=FLAGS.l2_norm_clip,
noise_multiplier=FLAGS.noise_multiplier,
num_microbatches=FLAGS.num_microbatches,
learning_rate=FLAGS.learning_rate,
population_size=60000,
training_hooks = [EpsilonPrintingTrainingHook(ledger)])
opt_loss = scalar_loss
global_step = tf.train.get_global_step()
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
return( tf.estimator.EstimatorSpec(mode=mode,
loss=scalar_loss,
train_op=train_op))
# Add evaluation metrics (for EVAL mode).
elif mode == tf.estimator.ModeKeys.EVAL:
print("Evaluate data mode")
eval_metric_ops = {
'accuracy':
tf.metrics.accuracy(
labels=labels,
predictions=tf.argmax(input=logits, axis=1))
}
return (tf.estimator.EstimatorSpec(mode=mode,
loss=scalar_loss,
eval_metric_ops=eval_metric_ops))
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
print("predicting data mode")
predictions={
'class_ids': predicted_classes[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
predictions.values()
return(tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions))
def load_mnist():
"""Loads MNIST and preprocesses to combine training and validation data."""
train, test = tf.keras.datasets.mnist.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = np.array(train_data, dtype=np.float32) / 255
test_data = np.array(test_data, dtype=np.float32) / 255
train_labels = np.array(train_labels, dtype=np.int32)
test_labels = np.array(test_labels, dtype=np.int32)
assert train_data.min() == 0.
assert train_data.max() == 1.
assert test_data.min() == 0.
assert test_data.max() == 1.
assert train_labels.ndim == 1
assert test_labels.ndim == 1
return train_data, train_labels, test_data, test_labels
#
#
#def main(unused_argv):
# tf.logging.set_verbosity(tf.logging.INFO)
#
# # Load training and test data.
# train_data, train_labels, test_data, test_labels = load_mnist()
#
# # Instantiate the tf.Estimator.
# mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn)
#
# # Create tf.Estimator input functions for the training and test data.
# train_input_fn = tf.estimator.inputs.numpy_input_fn(
# x={'x': train_data},
# y=train_labels,
# batch_size=FLAGS.batch_size,
# num_epochs=FLAGS.epochs,
# shuffle=True)
# eval_input_fn = tf.estimator.inputs.numpy_input_fn(
# x={'x': test_data},
# y=test_labels,
# num_epochs=1,
# shuffle=False)
#
# # Training loop.
# steps_per_epoch = 60000 // FLAGS.batch_size
# for epoch in range(1, FLAGS.epochs + 1):
# # Train the model for one epoch.
# mnist_classifier.train(input_fn=train_input_fn, steps=steps_per_epoch)
#
# # Evaluate the model and print results
# eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
# test_accuracy = eval_results['accuracy']
# print('Test accuracy after %d epochs is: %.3f' % (epoch, test_accuracy))
tf.logging.set_verbosity(tf.logging.INFO)
# Load training and test data.
train_data, train_labels, test_data, test_labels = load_mnist()
# Instantiate the tf.Estimator.
mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn)
# Create tf.Estimator input functions for the training and test data.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.epochs,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': test_data},
y=test_labels,
num_epochs=1,
shuffle=False)
pred_input_fn = tf.estimator.inputs.numpy_input_fn( {'x':test_data[1]},shuffle=False,num_epochs=1)
# Training loop.
steps_per_epoch = 60000 // FLAGS.batch_size
for epoch in range(1, FLAGS.epochs + 1):
# Train the model for one epoch.
mnist_classifier.train(input_fn=train_input_fn, steps=steps_per_epoch)
# Evaluate the model and print results
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
test_accuracy = eval_results['accuracy']
print('Test accuracy after %d epochs is: %.3f' % (epoch, test_accuracy))
# predict_results = mnist_classifier.predict( input_fn=pred_input_fn)
#
# for result in predict_results:
# print ('result: {}'.format(result))
#if __name__ == '__main__':
#tf.app.run()
#labels = set(train_labels)
#predVal = mnist_classifier.predict(input_fn=pred_input_fn, predict_keys = labels)
#predict_results = mnist_classifier.predict( input_fn=lambda:eval_input_fn(pred_input_fn, None, 1))
#tf.logging.info("Predictions:") for pred_dict, expec in zip(predict_results, expected): template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
#class_id = pred_dict['class_ids'][0]
#probability = pred_dict['probabilities'][class_id]
#print(template.format(CLASSES[class_id],100 * probability, expec)) | en | 0.573166 | # Copyright 2019, The TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Scratchpad for training a CNN on MNIST with DPSGD. Training hook to print current value of epsilon after an epoch. Initalizes the EpsilonPrintingTrainingHook. Args: ledger: The privacy ledger. Model function for a CNN. # Define CNN architecture using tf.keras.layers. # Calculate loss as a vector and as its average across minibatch. # Configure the training op (for TRAIN mode). #optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate) # Add evaluation metrics (for EVAL mode). Loads MNIST and preprocesses to combine training and validation data. # # #def main(unused_argv): # tf.logging.set_verbosity(tf.logging.INFO) # # # Load training and test data. # train_data, train_labels, test_data, test_labels = load_mnist() # # # Instantiate the tf.Estimator. # mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn) # # # Create tf.Estimator input functions for the training and test data. # train_input_fn = tf.estimator.inputs.numpy_input_fn( # x={'x': train_data}, # y=train_labels, # batch_size=FLAGS.batch_size, # num_epochs=FLAGS.epochs, # shuffle=True) # eval_input_fn = tf.estimator.inputs.numpy_input_fn( # x={'x': test_data}, # y=test_labels, # num_epochs=1, # shuffle=False) # # # Training loop. # steps_per_epoch = 60000 // FLAGS.batch_size # for epoch in range(1, FLAGS.epochs + 1): # # Train the model for one epoch. # mnist_classifier.train(input_fn=train_input_fn, steps=steps_per_epoch) # # # Evaluate the model and print results # eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn) # test_accuracy = eval_results['accuracy'] # print('Test accuracy after %d epochs is: %.3f' % (epoch, test_accuracy)) # Load training and test data. # Instantiate the tf.Estimator. # Create tf.Estimator input functions for the training and test data. # Training loop. # Train the model for one epoch. # Evaluate the model and print results # predict_results = mnist_classifier.predict( input_fn=pred_input_fn) # # for result in predict_results: # print ('result: {}'.format(result)) #if __name__ == '__main__': #tf.app.run() #labels = set(train_labels) #predVal = mnist_classifier.predict(input_fn=pred_input_fn, predict_keys = labels) #predict_results = mnist_classifier.predict( input_fn=lambda:eval_input_fn(pred_input_fn, None, 1)) #tf.logging.info("Predictions:") for pred_dict, expec in zip(predict_results, expected): template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') #class_id = pred_dict['class_ids'][0] #probability = pred_dict['probabilities'][class_id] #print(template.format(CLASSES[class_id],100 * probability, expec)) | 2.334743 | 2 |
cs-pound.py | Tesshin/CS-Pound-async | 0 | 6620916 | # -------------------- IMPORTS --------------------
import aiohttp
import asyncio
from datetime import datetime, timedelta
import discord
from discord.ext.commands import Bot
from discord.ext import commands
import hashlib
import io
import json
import logging
import lxml.html
import math
import os
from PIL import Image, ImageFont, ImageDraw
import platform
import psutil
import subprocess
import time as pytime
import urllib.request
# -------------------- VARIABLES --------------------
start_time = datetime.now() # The time the script started running
prefix = ',' # Prefix to call CS Pound Discord Bot
version = '1.8' # CS Pound Discord Bot version
tokens = [token.replace('\n', '') for token in list(open('tokens.txt'))] # Get tokens from tokens.txt file
cooldown = False # Cooldown of Auto Remind
help_hash = '' # Current hash of help.json
help_list = {}
autoremind_hash = '' # Current hash of autoremind.txt
autoremind_times = [] # Unique Auto Remind times
# -------------------- HELP TEXT --------------------
warning_help = '''\
CS Pound website (Where you also get the invite link)
http://tailstar.us
-''' # Title help
chickensmoothie_help2 = '''\
`,archive <query>` - Search the ChickenSmoothie archives (Under Development)
`,fair <link>` - Determine whether a trade is fair (Under Development)
`,image <link>` - Displays pet image only
`,oekaki <link>` - Displays Oekaki drawing
`,pet <link>` - Displays pet information
`,time` - Tells you how long until the pound opens
`,trade <link>` - Displays trade information (Under Development)
_'''
chickensmoothie_help = '''\
`,image <link>` - Displays pet image only
`,oekaki <link>` - Displays Oekaki drawing
`,pet <link>` - Displays pet information
`,time` - Tells you how long until the pound opens
_''' # Chicken Smoothie related commands help
general_help = '''\
`,autoremind <on/off> <time>` - Turns on or off global auto reminding
`,remindme <time>` - Pings you after specified amount of time
_''' # General commands help
informational_help = '''\
`,help` - Displays this message
`,support` - PM's you the link to the CS Pound Development Server
`,statistics` - Displays bot statistics
''' # Informational commands help
# -------------------- FUNCTIONS --------------------
def time_extractor(time): # Convert given time into seconds
time = time.lower() # Change all letters to lowercase
htotal = 0
mtotal = 0
stotal = 0
if 'h' not in time and 'm' not in time and 's' not in time: # If there is no time at all
pass
elif 'h' in time or 'hr' in time: # If hours in input
htotal = time.split('h')[0] # Split input and get number of hours
if 'm' in time: # If minutes in input
temp = time.split('h')[1] # Split input and get leftover time (minutes and seconds)
mtotal = temp.split('m')[0] # Split temp and get number of minutes
if 's' in time: # If seconds in input
temp = time.split('h')[1] # Split input and get leftover time (minutes and seconds)
temp2 = temp.split('m')[1] # Split temp and get leftover time (seconds)
stotal = temp2.split('s')[0] # Split temp2 and get number of seconds
else: # If no minutes in input
if 's' in time: # If seconds in input
temp = time.split('h')[1] # Split input and get leftover time (seconds)
stotal = temp.split('s')[0] # Split temp and get number of seconds
else: # If no hours in input
if 'm' in time: # If minutes in input
mtotal = time.split('m')[0] # Split input and get number of minutes
if 's' in time: # If seconds in input
temp = time.split('m')[1] # Split input and get leftover time (seconds)
stotal = temp.split('s')[0] # Split temp and get number of seconds
else: # If no minutes in input
if 's' in time: # If seconds in input
stotal = time.split('s')[0] # Split input and get number of seconds
htotal = int(htotal) # Convert 'htotal' into integer
mtotal = int(mtotal) # Convert 'mtotal' into integer
stotal = int(stotal) # Convert 'stotal' into integer
if htotal == 0 and mtotal == 0 and stotal == 0: # If hours, minutes and seconds is 0
finaltotal = 0
else: # If values in hours, minutes or seconds
finaltotal = int((htotal * 60 * 60) + (mtotal * 60) + stotal) # Total time in seconds
return finaltotal, htotal, mtotal, stotal # Return a tuple
def resolver(day, hour, minute, second): # Pretty format time layout given days, hours, minutes and seconds
day_section = ''
hour_section = ''
minute_section = ''
second_section = ''
def pluralise(string, value, and_placement=''): # Correctly prefix or suffix ',' or 'and' placements
if value == 0: # If given time has no value
return ''
else: # If given time has value
return (' and ' if and_placement == 'pre' else '') + str(value) + ' ' + string + ('s' if value > 1 else '') + (' and ' if and_placement == 'suf' else (', ' if and_placement == 'com' else ''))
# If 'and_placement' is set to prefix add 'and' otherwise leave blank
# The value of the time
# The type of time (day, hour, minute, second)
# If value is larger than 1 then pluralise the time
# If 'and_placement' is set to suffix add 'and' otherwise add a ',' instead if 'and_placement' is set to comma otherwise leave blank
if day != 0 and ((hour == 0 and minute == 0) or (hour == 0 and second == 0) or (minute == 0 and second == 0)):
# If there are day(s) but:
# No hours or minutes
# No hours or seconds
# No minutes or seconds
day_section = pluralise('day', day, 'suf') # Pluralise the day section with a suffixed 'and' placement
elif day != 0 and ((hour != 0 and minute != 0 and second != 0) or (hour != 0 and minute == 0) or (hour != 0 and second == 0) or (minute != 0 and second == 0) or (hour == 0 and minute != 0) or (hour == 0 and second != 0) or (minute == 0 and second != 0)):
# If there are day(s) but:
# There are hour(s) and minute(s) and second(s)
# There are hour(s) but no minutes
# There are hour(s) but no seconds
# There are minute(s) but no hours
# There are minute(s) but no seconds
# There are second(s) but no hours
# There are second(s) but no minutes
day_section = pluralise('day', day, 'com') # Pluralise the day section with a suffixed ',' placement
if minute == 0: # If there are no minutes
hour_section = pluralise('hour', hour) # Pluralise the hour section
elif minute != 0 and second == 0: # If there are minute(s) but no seconds
hour_section = pluralise('hour', hour, 'suf') # Pluralise the hour section with a suffixed 'and' placement
else: # If there are minute(s) and second(s)
hour_section = pluralise('hour', hour, 'com') # Pluralise the hour section with a suffixed ',' placement
minute_section = pluralise('minute', minute) # Pluralise the minute section
if hour != 0 or minute != 0: # If there are hour(s) or minute(s)
second_section = pluralise('second', second, 'pre') # Pluralise the second section with a prefixed 'and' placement
else: # If there are no hours or minutes
second_section = pluralise('second', second) # Pluralise the second section
return day_section + hour_section + minute_section + second_section # Return the formatted text
async def get_web_data(link, command_source): # Get web data from link
success = False # Boolean for whether link is valid
headers = { # HTTP request headers
'User-Agent': 'CS Pound Discord Bot Agent ' + version, # Connecting User-Agent
'From': '<EMAIL>' # Contact email
}
if link == '' and command_source != 'pound': # If no link provided
description = 'You didn\'t provide a ' + command_source + ' link!'
return success, discord.Embed(title=command_source.capitalize(), description=description, colour=0xff5252) # Create embed
else: # If arguments provided
try: # Checking link format
if command_source != 'pound': # If command source does not come from ,time
parameters = link.split('?')[1].split('&') # Get the PHP $GET values
success = True # Link is valid
else: # If command source comes from ,time
success = True
except IndexError: # If cannot get $GET value
return success, discord.Embed(title=command_source.capitalize(), description='That is not a valid ' + command_source + ' link!', colour=0xff5252) # Create embed
if success: # If link exists and is valid
data = {} # PHP $POST parameters
if command_source == 'pet': # If function is being called from the Pet command
base_link = 'http://www.chickensmoothie.com/viewpet.php' # Base PHP link for Pet command
parameters = parameters[0].split('=') # Split the $POST variable
data[parameters[0]] = parameters[1] # Add dictionary item with $POST variable and value
elif command_source == 'oekaki': # If function is being called from the Oekaki command
base_link = 'http://www.chickensmoothie.com/Forum/viewtopic.php' # Base PHP link for Oekaki command
for param in range(len(parameters)): # For each parameter
temp = parameters[param].split('=') # Split the $POST variables
data[temp[0]] = temp[1] # Add dictionary item with $POST variable and value
elif command_source == 'pound': # If function is being called from the Pound command
base_link = 'http://www.chickensmoothie.com/pound.php' # Base PHP link for Time command
async with aiohttp.ClientSession() as session: # Create an AIOHTTP session
async with session.post(base_link, data=data, headers=headers) as response: # POST the variables to the base php link
connection = await response.text() # Request HTML page data
dom = lxml.html.fromstring(connection) # Extract HTML from site
return success, dom # Return whether connection was successful and DOM data
else: # If link is not valid
return success # Return whether connection was successful
def process_help(command): # Get the help text from help.json
global help_hash, help_list
def monospace(string): # Returns string in Discord monospace format
return '`' + string + '`' # `string`
def italic(string): # Returns string in Discord italics format
return '*' + string + '*' # *string*
new_help_hash = hashlib.md5(open('help.json').read().encode()).hexdigest() # MD5 hash of help.json
if help_hash != new_help_hash: # If help.json has been changed
help_hash = new_help_hash # Set hash to the new changes
with open('help.json') as f: # Open help.json
help_list = json.load(f) # Load the JSON data
command_information = help_list[command] # Get the command information of the command
message = monospace(command_information['usage']) + ' - ' + command_information['description'] # `usage` - description
if command_information['examples']: # If there are examples for the command
message += '\n' + italic('Examples:') + ' ' + ', '.join([monospace(value) for key, value in command_information['examples'].items()]) # *Examples:* `example1`, `example2`, `example3`
if command_information['aliases']: # If there are aliases for the command
message += '\n' + italic('Aliases:') + ' ' + ', '.join([monospace(value) for key, value in command_information['aliases'].items()]) # *Aliases:* `alias1`, `alias2`, `alias3`
return message
# -------------------- DISCORD BOT SETTINGS --------------------
client = Bot(description='CS Pound by Peko#7955', command_prefix=prefix, pm_help=None) # Set the bot description and prefix
client.remove_command('help') # Remove default help command to add custom help
logger = logging.getLogger('discord') # Create logger
logger.setLevel(logging.DEBUG) # Set logging level to DEBUG
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w') # Set logging file
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s')) # Set logging format
logger.addHandler(handler) # Start logger
@client.event
async def on_ready(): # When Client is loaded
print('Logged in as ' + client.user.name + ' (ID: ' + client.user.id + ')')
print('--------')
print('Current Discord.py Version: {}'.format(discord.__version__))
print('--------')
print('Use this link to invite {}:'.format(client.user.name))
print('https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=268569600'.format(client.user.id))
print('--------')
print('You are running ' + client.user.name + ' v' + version)
print('Created by Peko#7955')
await client.change_presence(game=discord.Game(name=',help | By: Peko#7955'), status=discord.Status.online) # Change Playing status to ,help | By: Peko#7955
# -------------------- HELP COMMAND --------------------
@client.command(pass_context=True)
async def help(ctx, args=''): # Help Command
embed = discord.Embed(colour=0x4ba139) # Create empty embed
# -------------------- CHICKENSMOOTHIE HELP --------------------
if args == 'archive': # If requested Archive command help
embed.add_field(name='**Archive**', value=process_help(args)) # Add Archive help information to embed
elif args == 'fair': # If requested Fair command help
embed.add_field(name='**Fair', value=process_help(args)) # Add Fair help information to embed
elif args == 'oekaki': # If requested Oekaki command help
embed.add_field(name='**Oekaki**', value=process_help(args)) # Add Oekaki help information to embed
elif args == 'pet': # If included 'pet' argument
embed.add_field(name='**Pet**', value=process_help(args)) # Embed Pet help information
elif args == 'time': # If included 'time' argument
embed.add_field(name='**Time**', value=process_help(args)) # Embed Time help information
elif args == 'trade': # If included 'trade' argument
embed.add_field(name='**Trade**', value=process_help(args)) # Embed Trade help information
# -------------------- GENERAL HELP --------------------
elif args == 'autoremind': # If included 'autoremind' argument
embed.add_field(name='**Auto Remind**', value=process_help(args)) # Embed Auto Remind help information
elif args == 'remindme': # If included 'remineme' argument
embed.add_field(name='**Remind Me**', value=process_help(args)) # Embed Remind Me help information
# -------------------- INFORMATIONAL HELP --------------------
elif args == 'help': # If included 'help' argument
embed.add_field(name='**Help**', value=process_help(args)) # Embed Help help information
elif args == 'support':
embed.add_field(name='**Support**', value=process_help(args)) # Embed Support help information
elif args == 'statistics':
embed.add_field(name='**Statistics**', value=process_help(args)) # Embed Statistics help information
else: # If provided no arguments or requested a help topic that doesn't exist
embed.add_field(name=":pencil: __**To know about command usage or examples, use: ,help <command>**__", value=warning_help) # add Warning help information to embed
embed.add_field(name=':dog: __**ChickenSmoothie Commands**__', value=chickensmoothie_help) # Embed Chicken Smoothie related commands
embed.add_field(name=':file_folder: __**General Commands**__', value=general_help) # Embed General commands
embed.add_field(name=':wrench: __**Informational Commands**__', value=informational_help) # Embed informational commands
try:
await client.whisper(embed=embed) # PM the embed to the user
if ctx.message.channel.is_private: # If the user is calling command from PM
embed = discord.Embed() # Replace with new empty embed
else: # If the user is calling command from a channel
embed = discord.Embed(title='Help', description='A PM has been sent to you!', colour=0x4ba139) # Create embed
await client.say(embed=embed) # Send embed
except discord.errors.Forbidden: # If cannot send PM to user
embed = discord.Embed(title='Help', description='A PM couldn\'t be sent to you, it may be that you have \'Allow direct messages from server members\' disabled in your privacy settings.', colour=0xff5252) # Create embed
await client.say(embed=embed) # Send embed
# -------------------- AUTOREMIND COMMAND --------------------
@client.command(pass_context=True, no_pm=True) # Disallow using this command in PM's
async def autoremind(ctx, args=''): # Auto Remind command
grep_statement = 'grep -n \'' + ctx.message.author.id + '\' autoremind.txt | cut -f1 -d:' # Get line number of ID
id_exists = subprocess.Popen(grep_statement, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')[:-1] # Run grep statement
server_roles = ctx.message.server.roles # List of roles in server
for role in server_roles: # For each role in the server
if role.name == "CS Pound": # If 'CS Pound' role exists
permission = role.permissions.manage_roles # Check whether role has 'Manage Roles' permission and set boolean value
break # Break out of for loop
else: # If role doesn't exist
permission = False
if permission: # If bot has permission to 'Manage Roles'
server_roles = ctx.message.server.roles # List of roles in server
for role in server_roles: # Checks if role already exists in server
if role.name == "Auto Remind": # If role exists
break # Break out of for loop
else: # If role doesn't exist
await client.create_role(ctx.message.server, name='Auto Remind') # Create 'Auto Remind' role in server
if args == 'off': # If user wants to turn off Auto Remind
if id_exists == '': # If user doesn't exist in database
embed = discord.Embed(title='Auto Remind', description='You don\'t have Auto Remind setup!', colour=0xff5252) # Create embed
else: # If user exists
sed_statement = 'sed -i.bak ' + id_exists + 'd autoremind.txt' # sed statement
subprocess.Popen(sed_statement, shell=True) # Run sed statement
if permission: # If bot has permission to 'Manage Roles'
await client.remove_roles(ctx.message.author, discord.utils.get(server_roles, name='Auto Remind')) # Remove role from user
embed = discord.Embed(title='Auto Remind', description='You have been removed from the Auto Remind role.', colour=0x4ba139) # Create embed
else: # If bot doesn't have permission to 'Manage Roles'
embed = discord.Embed(title='Auto Remind', description='You have been removed from the Auto Remind.', colour=0x4ba139) # Create embed
else: # If user is setting an Auto Remind
valid = False
if args == '': # If no arguments provided
embed = discord.Embed(title='Auto Remind', description='You didn\'t input a time!', colour=0xff5252) # Create embed
elif args.isdigit(): # If the input is a digit
valid = True
else: # If the input isn't a digit
args = args[:-1] # Remove the minute marker
if args.isdigit(): # If the input is a digit now
valid = True
else: # If input is still not digit
embed = discord.Embed(title='Auto Remind', description='That is not a valid time!', colour=0xff5252) # Create embed
if valid: # If inputted time was valid
if int(args) > 60: # If time is bigger than 60 minutes
embed = discord.Embed(title='Auto Remind', description='That time is too far!', colour=0xff5252) # Create embed
else: # If time is less than 60 minutes
if id_exists != '': # If user has already set an Auto Remind
embed = discord.Embed(title='Auto Remind', description='You already have Auto Remind setup!', colour=0xff5252) # Create embed
else: # If user doesn't have an Auto Remind setup
text = ctx.message.server.id + ' ' + ctx.message.channel.id + ' ' + ctx.message.author.id + ' ' + args + '\n' # Write in the format 'SERVER_ID CHANNEL_ID USER_ID REMIND_TIME'
with open('autoremind.txt', 'a+') as file: # Open autoremind.txt
file.write(text) # Write the text
if permission: # If bot has 'Manage Roles' permission
await client.add_roles(ctx.message.author, discord.utils.get(server_roles, name='Auto Remind')) # Add user to Auto Remind role
message = 'Will ping you ' + args + ' minutes before the pound opens!'
embed = discord.Embed(title='Auto Remind', description=message, colour=0x4ba139) # Create embed
await client.say(embed=embed) # Send embed
# -------------------- IMAGE COMMAND --------------------
@client.command(no_pm=True, aliases=['img'], pass_context=True) # Disallow using this command in PM's
async def image(ctx, link: str = ''): # Autoremind command
data = await get_web_data(link, 'pet') # Get pet data
if data[0]: # If data is valid
information = {}
owner_name = data[1].xpath('//td[@class="r"]/a/text()')[0] # User of pet
titles = data[1].xpath('//td[@class="l"]/text()') # Titles of pet information
values = data[1].xpath('//td[@class="r"]') # Values of pet information
pet_image = data[1].xpath('//img[@id="petimg"]/@src')[0] # Pet image link
if 'trans' in pet_image: # If pet image is transparent (i.e. Pet has items)
pet_image = 'http://www.chickensmoothie.com' + pet_image # Pet image link
transparent = True
else:
transparent = False
if titles[0] == 'PPS': # If pet is PPS
pps = True
else: # If pet is not PPS
pps = False
if len(titles) + len(values) < 16: # If the amount of titles and values don't add up
no_name = True
else: # If they add up
no_name = False
if no_name: # If pet has no name
case1 = 'Pet\'s name:'
case2 = 'Adopted:'
case3 = 1
case4 = 1
if pps: # If pet has no name and is PPS
case1 = 'Pet\'s name:'
case2 = 'Pet ID:'
case3 = 2
case4 = 1
elif pps: # If pet has a name and is PPS
case1 = 'Pet ID:'
case2 = 'Pet\'s name:'
case3 = 2
case4 = 1
else: # If pet has a name but is not PPS
case1 = 'Pet\'s name:'
case2 = 'Adopted:'
case3 = 1
case4 = 1
temp = len(titles) - 1 if pps else len(titles) # Is pet is PPS, remove one title, else all titles
for i in range(temp): # For each title in titles
if titles[i] == (case1):
information['Name'] = values[i].xpath('text()')[0] # Add pet name to information dictionary
elif titles[i] == (case2):
information['Adopted'] = values[i].xpath('text()')[0] # Add pet adoption date to information dictionary
elif titles[i] == ('Growth:' if pps else 'Rarity:'): # If pet is PPS, if titles[i] matches 'Growth:', otherwise if not PPS, if titles[i] matches with 'Rarity:'
information['Rarity'] = 'rarities/' + values[i].xpath('img/@src')[0][12:] # Local link to rarity image
if titles[case3] == 'Pet ID:':
filename = values[case4].xpath('text()')[0] # Get pet ID
else: # If ID cannot be found
filename = 'pet'
async with aiohttp.ClientSession() as session: # Create an AIOHTTP session
async with session.get(pet_image) as response: # GET HTTP response of pet image link
connection = await response.read() # Read the response content
pet_image = io.BytesIO(connection) # Convert the content into bytes
image_files = [pet_image, information['Rarity']]
font = ImageFont.truetype('Verdana.ttf', 12) # Verdana font size 15
images = map(Image.open, image_files) # Map the image files
widths, heights = zip(*(i.size for i in images)) # Tuple of widths and heights of both images
images = list(map(Image.open, image_files)) # List of image file name
temp_draw = ImageDraw.Draw(Image.new('RGBA', (0, 0))) # Temporary drawing canvas to calculate text sizes
max_width = max(widths) # Max width of images
total_height = sum(heights) + (15 * len(information)) # Total height of images
current_width = 0
for key, value in information.items(): # For each item in information
if 'rarities/' in value:
temp_width = 106
else:
temp_width = temp_draw.textsize(value, font=font)[0] # Width of text
if current_width < temp_width: # If current width is less than width of texts
current_width = temp_width
if max_width < current_width:
max_width = current_width * 2
image = Image.new('RGBA', (max_width, total_height), (225, 246, 179, 255)) # Create an RGBA image of max_width x total_height, with colour 225, 246, 179
pil_image = ImageDraw.Draw(image) # Draw the image to PIL
y_offset = 0 # Offset for vertically stacking images
if transparent: # If pet has items
image.paste(images[0], (math.floor((max_width - images[0].size[0])/2), y_offset), images[0]) # Paste first image at ((MAX_WIDTH - IMAGE_WIDTH) / 2) using the mask from images[0]
else: # If pet doesn't have items
image.paste(images[0], (math.floor((max_width - images[0].size[0])/2), y_offset)) # Paste first image at ((MAX_WIDTH - IMAGE_WIDTH) / 2)
y_offset += images[0].size[1] # Add height of image + 10 to offset
try:
pil_image.text((math.floor(((max_width - math.floor(pil_image.textsize(information['Name'], font=font)[0]))/2)), y_offset), information['Name'], fill=(0, 0, 0), font=font) # Paste text at (((MAX_WIDTH - (TEXT_WIDTH) / 2)) - (TEXT_WIDTH / 2) - 5, y_offset) with colour (0, 0, 0) and font
y_offset += 15 # Add offset of 15
except KeyError:
pass
try:
pil_image.text((math.floor(((max_width - math.floor(pil_image.textsize(information['Adopted'], font=font)[0]))/2)), y_offset), information['Adopted'], fill=(0, 0, 0), font=font) # Paste text at (((MAX_WIDTH - (TEXT_WIDTH) / 2)) - (TEXT_WIDTH / 2) - 5, y_offset) with colour (0, 0, 0) and font
y_offset += 15 # Add offset of 15
except KeyError:
pass
image.paste(images[1], (math.floor((max_width - images[1].size[0])/2), y_offset), images[1]) # Paste first image at ((MAX_WIDTH - IMAGE_WIDTH) / 2) using the mask from images[1]
output_buffer = io.BytesIO() # Convert the PIL output into bytes
image.save(output_buffer, 'png') # Save the bytes as a PNG format
output_buffer.seek(0) # Move the 'cursor' back to the start
filename += '.png' # Set filename as (Pet ID).png
await client.send_file(ctx.message.channel, fp=output_buffer, filename=filename) # Upload the file to the channel where message came from
else: # If data is invalid
await client.say(embed=data[1]) # Send embed
# -------------------- OEKAKI COMMAND --------------------
@client.command(no_pm=True) # Disallow using this command in PM's
async def oekaki(link: str = ''): # Oekaki command
data = await get_web_data(link, 'oekaki') # Get Oekaki data
if data[0]: # If data is valid
base_link = 'http://www.chickensmoothie.com/Forum/'
oekaki_title = data[1].xpath('//h3[@class="first"]/a/text()')[0] # Title of drawing
image = 'https://www.chickensmoothie.com' + data[1].xpath('//li[@class="ok-topic-head-image large"]/img/@src')[0] # Image of drawing
user_icon = base_link[:-1] + data[1].xpath('//dl[@class="postprofile"]')[0].xpath('dt/a/img/@src')[0][1:] # The profile picture of the artist
titles = data[1].xpath('//table[@class="ok-drawing-info"]/tr/td[@class="label"]/text()') # Get titles of drawing information
warning_text = 'Reminder!! Copying another person\'s art without permission to reproduce their work is a form of art-theft!' # General warning text regarding Oekaki art
if data[1].xpath('//table[@class="ok-drawing-info"]/tr')[0].xpath('td')[1].xpath('a/text()')[0] == 'Click to view': # If drawing is based off another drawing
artist_links = data[1].xpath('//table[@class="ok-drawing-info"]/tr')[1].xpath('td')[1].xpath('a/@href') # Drawing information titles
artist_values = data[1].xpath('//table[@class="ok-drawing-info"]/tr')[1].xpath('td')[1].xpath('a/text()') # Drawing information values
else: # If drawing is not based off another drawing
artist_links = data[1].xpath('//table[@class="ok-drawing-info"]/tr')[0].xpath('td')[1].xpath('a/@href') # Drawing information titles
artist_values = data[1].xpath('//table[@class="ok-drawing-info"]/tr')[0].xpath('td')[1].xpath('a/text()') # Drawing information values
artist_text = '[' + artist_values[0] + '](' + base_link + artist_links[0][1:] + ') [' + artist_values[1] + '(' + base_link + artist_links[1][1:] + ')]' # [Artist Name](Link to Artist) [gallery](Link to Artist gallery) | Formats to Artist Name [gallery]
embed = discord.Embed(title=oekaki_title, colour=0x4ba139, url=link) # Create embed
embed.add_field(name='Artist', value=artist_text) # Add Artist field
embed.set_footer(text=warning_text, icon_url="https://vignette.wikia.nocookie.net/pufflescp/images/6/68/Red_Warning_Triangle.png/revision/latest?cb=20160718024653&format=original") # Add warning text to footer
embed.set_image(url=image) # Add drawing to embed
embed.set_thumbnail(url=user_icon) # Set thumbnail as user profile picture
await client.say(embed=embed) # Send embed
else: # If data is not valid
await client.say(embed=data[1]) # Send embed
# -------------------- PET COMMAND --------------------
@client.command(no_pm=True) # Disallow using this command in PM's
async def pet(link: str = ''): # Pet command
data = await get_web_data(link, 'pet') # Get pet data
if data[0]: # If data is valid
titles = data[1].xpath('//td[@class="l"]/text()') # Titles of pet information
values = data[1].xpath('//td[@class="r"]') # Values of pet information
given = True # Pet has been given by another user
value_list = []
petimg = data[1].xpath('//img[@id="petimg"]/@src')[0] # Pet image link
if 'trans' in petimg: # If pet image is transparent (i.e. Pet has items)
petimg = 'http://www.chickensmoothie.<EMAIL>' + petimg # Pet image link
owner_name = data[1].xpath('//td[@class="r"]/a/text()')[0] # User of pet
owner_link = 'http://www.chickensmoothie.com/' + data[1].xpath('//td[@class="r"]/a/@href')[0] # Link to user profile
if titles[0] == 'PPS': # If pet is PPS
value_list.append('[This pet has "PPS". What\'s that?](http://www.chickensmoothie.com/help/pets#pps)') # Append PPS value
value_list.append('[' + owner_name + '](' + owner_link + ')') # [Owner Name](Link to Owner) | Formats to Owner Name
pps = True
else: # If pet is not PPS
value_list.append('[' + owner_name + '](' + owner_link + ')') # [Owner Name](Link to Owner) | Formats to Owner Name
pps = False
tables = len(titles) # Number of rows
temp = tables - 1 if pps else tables # -1 Rows if pet is PPS
for i in range(temp): # For each title in titles
if i == 0: # If 'i' is at first value (PPS or Owner name)
pass # Pass as first value has already been set
elif temp - i == 2 or temp - i == 1: # If 'i' is at second last or last value
if titles[i] == ('Age:' if pps else 'Growth:') or not given: # If text of titles at 'i' is 'Age:' if pet is PPS otherwise 'Growth:' or pet not given
given = False
if temp - i == 2: # If 'i' is second last value (i.e. Growth)
value_list.append(values[i].xpath('text()')[0]) # Append growth of pet
elif temp - i == 1: # If 'i' is last value (i.e. Rarity)
value_list.append(values[i].xpath('img/@alt')[0]) # Append rarity of pet
elif titles[i] == ('Growth:' if pps else 'Rarity:') or given: # If text of titles at 'i' is 'Growth:' is pet is PPS otherwise 'Rarity:' or pet is given
given = True
if temp - i == 2: # If 'i' is second last value (i.e. Rarity)
value_list.append(values[i].xpath('img/@alt')[0]) # Append rarity of pet
elif temp - i == 1: # If 'i' is last value (i.e. Given by)
titles[i] = titles[i].replace('\t', '').replace('\n', '') # Remove extra formatting
value_list.append('[' + data[1].xpath('//td[@class="r"]/a/text()')[1] + ']' + '(' + 'http://www.chickensmoothie.com/' + data[1].xpath('//td[@class="r"]/a/@href')[1] + ')') # Append given user profile
else: # Any other 'i'
value_list.append(values[i].xpath('text()')[0]) # Append text
embed = discord.Embed(title=owner_name + '\'s Pet', colour=0x4ba139) # Create embed
embed.set_image(url=petimg) # Set image
for i in range(tables): # For each title in titles
if i == 0: # If 'i' is first value (PPS or Owner name)
embed.add_field(name=titles[i], value=value_list[i], inline=False) # Add field with no inline
else: # Any other 'i'
embed.add_field(name=titles[i], value=value_list[i], inline=True) # Add field with inline
await client.say(embed=embed) # Send embed
else: # If data is not valid
await client.say(embed=data[1]) # Send embed
# -------------------- REMINDME COMMAND --------------------
@client.command(pass_context=True, aliases=['rm'], no_pm=True) # Disallow using this command in PM's
async def remindme(ctx, amount: str): # Remind Me command
finaltotal = time_extractor(amount) # Get formatted times
if finaltotal[0] == 0: # If no time specified
embed = discord.Embed(title='Remind Me', description='That is not a valid time!', colour=0xff5252) # Create embed
await client.say(embed=embed) # Send embed
elif finaltotal[0] > 86400: # If time is longer than 24 hours
embed = discord.Embed(title='Remind Me', description='That time is too long!', colour=0xff5252) # Create embed
await client.say(embed=embed) # Send embed
else: # If time is valid
before_message = 'A reminder has been set for {0.mention} in '.format(ctx.message.author) + resolver(0, finaltotal[1], finaltotal[2], finaltotal[3]) + '.' # A reminder has been set for USER in X hours, Y minutes, and Z seconds.
embed = discord.Embed(title='Remind Me', description=before_message, colour=0x4ba139) # Create embed
await client.say(embed=embed) # Send embed
after_message = 'Reminder for {0.mention}!'.format(ctx.message.author) # Reminder for USER!
await asyncio.sleep(finaltotal[0]) # Sleep for set time
await client.say(after_message) # Send message
# -------------------- STATS COMMAND --------------------
@client.command(no_pm=True, aliases=['stats']) # Disallow using this command in PM's
async def statistics(): # Statistics command
def converter(seconds): # Convert seconds into days, hours, minutes and seconds
d = datetime(1, 1, 1) + timedelta(seconds=int(seconds)) # Create tuple of date values
return d.day-1, d.hour, d.minute, d.second # Return tuple of date values
system_memory_mb = str(round(psutil.virtual_memory()[3] / 1000 / 1024, 2)) + ' MB' # Get the used virtual memory (physical memory) of the system | X MB
system_memory_percent = str(psutil.virtual_memory()[2]) + '%' # Get the available virtual memory (physical memory) of the system | X%
bot_memory_mb = str(round(psutil.Process(os.getpid()).memory_info()[0] / 1024**2, 2)) + ' MB' # Get the memory usage of the bot (i.e. This script) | X MB
bot_memory_percent = str(round(psutil.Process(os.getpid()).memory_percent(), 2)) + '%' # Get used memory percentage of the bot (i.e. This script) | X%
discord_py_version = discord.__version__ # Discord.py version
server_count = str(len(client.servers)) # The number of servers this CS Pound is in
member_count = str(len(set(client.get_all_members()))) # The number of unique users the CS Pound is connected to
bot_uptime = converter((datetime.now() - start_time).total_seconds()) # The time the bot (script) has been running
system_uptime = converter(round(pytime.time() - psutil.boot_time())) # The time the system has been running
bot_uptime = resolver(bot_uptime[0], bot_uptime[1], bot_uptime[2], bot_uptime[3]) # Pretty format the bot uptime
system_uptime = resolver(system_uptime[0], system_uptime[1], system_uptime[2], system_uptime[3]) # Pretty format the system uptime
embed = discord.Embed(title='Stats', description='', colour=0x4ba139) # Create empty embed
embed.add_field(name='System Memory Usage', value=system_memory_percent + ' (' + system_memory_mb + ')', inline=False) # Add system memory usage to embed
embed.add_field(name=client.user.name + ' Memory Usage', value=bot_memory_percent + ' (' + bot_memory_mb + ')', inline=False) # Add bot memory usage to embed
embed.add_field(name=client.user.name + ' Version', value=version, inline=False) # Add bot version to embed
embed.add_field(name='Discord.py Version', value=discord_py_version, inline=False) # Add Discord.py version to embed
embed.add_field(name='Server Count', value=server_count, inline=False) # Add server count to embed
embed.add_field(name='Member Count', value=member_count, inline=False) # Add member count to embed
embed.add_field(name=client.user.name + ' Uptime', value=bot_uptime, inline=False) # Add bot uptime to embed
embed.add_field(name='System Uptime', value=system_uptime, inline=False) # Add system uptime to embed
await client.say(embed=embed) # Send embed
# -------------------- SUPPORT COMMAND --------------------
@client.command()
async def support(): # Support command
try:
await client.whisper('https://discord.gg/PbzHqm9') # PM Discord link to the CS-Pound Development Server to user
embed = discord.Embed(title='Support', description='A PM has been sent to you!', colour=0x4ba139) # Create embed
except discord.errors.Forbidden: # If cannot send PM to user
embed = discord.Embed(title='Support', description='A PM couldn\'t be sent to you, it may be that you have \'Allow direct messages from server members\' disabled in your privacy settings.', colour=0xff5252) # Create embed
await client.say(embed=embed) # Send embed
# -------------------- TIME COMMAND --------------------
@client.command(no_pm=True, aliases=['pound']) # Disallow using this command in PM's
async def time(): # Time command
async with aiohttp.ClientSession() as session: # Create an AIOHTTP session
async with session.get('http://www.chickensmoothie.com/pound.php') as response: # GET HTTP response of pound page
connection = await response.text() # Request HTML page data
dom = lxml.html.fromstring(connection) # Extract HTML from site
text = dom.xpath('//h2/text()') # Pound opening text
try:
if ':)' in text[1]: # If :) in text
output = text[1][:-85].replace('\n', r'').replace('\t', r'') + ' The pound opens at totally random times of day, so check back later to try again :)' # Remove excess formatting text
else: # If any other text in text
output = text[1].replace('Sorry, the pound is closed at the moment.', '').replace('\n', r'').replace('\t', r'') + '.'
except IndexError: # If text doesn't exist
output = 'Pound is currently open!'
embed = discord.Embed(title='Time', description=output, colour=0x4ba139) # Create embed
await client.say(embed=embed) # Send embed
async def compose_message(time): # Function to compose and send mention messages to channels
grep_statement = 'grep \'[0-9]*\\s[0-9]*\\s[0-9]*\\s' + time + '\' autoremind.txt | cut -f2 -d\' \' | sort -u' # Get channels with Auto Remind set at 'time'
channel_ids = subprocess.Popen(grep_statement, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')[:-1].split('\n') # Run grep statement
for i in range(len(channel_ids)): # For each Discord channel ID
grep_statement = 'grep \'[0-9]*\\s' + channel_ids[i] + '\\s[0-9]*\\s' + time + '\' autoremind.txt | cut -f3 -d\' \'' # Grab all unique Discord user ID's with that channel ID
user_ids = subprocess.Popen(grep_statement, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')[:-1].split('\n') # Run grep statement
if time == '1': # If there is only one minute left
message = time + ' minute until pound opens! '
else: # If there is more than 1 minute left
message = time + ' minutes until pound opens! '
for j in range(len(user_ids)): # For each Discord user
message += '<@' + user_ids[j] + '> ' # Message format for mentioning users | <@USER_ID>
try:
await client.send_message(client.get_channel(channel_ids[i]), content=message) # Send message to Discord channel with mention message
except discord.errors.NotFound:
pass
async def minute_check(time): # Function to check if any user has Auto Remind setup at 'time'
global autoremind_hash, autoremind_times
time = str(time)
new_hash = hashlib.md5(open('autoremind.txt').read().encode()).hexdigest() # MD5 hash of autoremind.txt
if autoremind_hash != new_hash: # If file has been modified since last check
autoremind_hash = new_hash
cut_statement = 'cut -f4 -d\' \' autoremind.txt | sort -u' # Grab all unique reminding times from autoremind.txt
autoremind_times = subprocess.Popen(cut_statement, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')[:-1].split('\n') # Run cut statement
if time in autoremind_times: # If someone has a Auto Remind set at current 'time'
await compose_message(time) # Run compose message
async def pound_countdown(): # Background task to countdown to when the pound opens
global cooldown # Use cooldown from global scope
await client.wait_until_ready() # Wait until client has loaded before starting background task
while not client.is_closed: # While client is still running
if not cooldown: # If command is not on cooldown
data = await get_web_data('', 'pound') # Get pound data
if data[0]: # If pound data is valid and contains content
text = data[1].xpath('//h2/text()') # List all texts with H2 element
try: # Try getting pound opening text
text = text[1] # Grab the pound opening time text
value = [int(s) for s in text.split() if s.isdigit()] # Extract the numbers in the text
if len(value) == 1: # If there is only one number
value = value[0]
if 'hour' in text: # If hour in pound opening time
if value == 1: # If there is one hour left
cooldown = True
value = 60 # Start countdown from 60 minutes
sleep_amount = 0
else: # If there is more than one hour
sleep_amount = (value - 2) * 3600 # -1 hour and convert into seconds
elif 'minute' in text: # If minute in pound opening time
sleep_amount = 0
cooldown = True
elif 'second' in text: # If second in pound opening time
pass
elif len(value) == 2: # If there are two numbers
if 'hour' and 'minute' in text:
sleep_amount = value[1] * 60 # Get the minutes and convert to seconds
value = 60
text = 'minute'
cooldown = True
elif 'minute' and 'second' in text:
pass
elif len(value) == 0: # If there are no times i.e. Pound recently closed or not opening anytime soon
sleep_amount = 3600 # 1 hour
except IndexError: # Pound is currently open
sleep_amount = 3600 # 1 hour
else: # If pound data isn't valid
sleep_amount = 11400 # 3 hours 10 minutes
else: # If command is on cooldown
if 'hour' in text: # If hour in text
if value != 0: # If minutes left is not zero
await minute_check(value) # Run minute check
value -= 1 # Remove one minute
sleep_amount = 60 # 1 minute
else: # If time ran out (i.e. Pound is now open)
cooldown = False
sleep_amount = 10800 # 3 hours
elif 'minute' and 'second' in text: # If minute and second in text
sleep_amount = value[1]
value = 1
elif 'minute' in text: # If minute in text
if value != 0: # If minutes left is not zero
await minute_check(value) # Run minute check
value -= 1 # Remove one minute
sleep_amount = 60 # 1 minute
else: # If time ran out (i.e. Pound is now open)
cooldown = False
sleep_amount = 10800 # 3 hours
elif 'second' in text: # If second in text
pass
await asyncio.sleep(sleep_amount) # Sleep for sleep amount
client.loop.create_task(pound_countdown()) # Run 'pound_countdown' background task
client.run(tokens[0]) # Start bot
| # -------------------- IMPORTS --------------------
import aiohttp
import asyncio
from datetime import datetime, timedelta
import discord
from discord.ext.commands import Bot
from discord.ext import commands
import hashlib
import io
import json
import logging
import lxml.html
import math
import os
from PIL import Image, ImageFont, ImageDraw
import platform
import psutil
import subprocess
import time as pytime
import urllib.request
# -------------------- VARIABLES --------------------
start_time = datetime.now() # The time the script started running
prefix = ',' # Prefix to call CS Pound Discord Bot
version = '1.8' # CS Pound Discord Bot version
tokens = [token.replace('\n', '') for token in list(open('tokens.txt'))] # Get tokens from tokens.txt file
cooldown = False # Cooldown of Auto Remind
help_hash = '' # Current hash of help.json
help_list = {}
autoremind_hash = '' # Current hash of autoremind.txt
autoremind_times = [] # Unique Auto Remind times
# -------------------- HELP TEXT --------------------
warning_help = '''\
CS Pound website (Where you also get the invite link)
http://tailstar.us
-''' # Title help
chickensmoothie_help2 = '''\
`,archive <query>` - Search the ChickenSmoothie archives (Under Development)
`,fair <link>` - Determine whether a trade is fair (Under Development)
`,image <link>` - Displays pet image only
`,oekaki <link>` - Displays Oekaki drawing
`,pet <link>` - Displays pet information
`,time` - Tells you how long until the pound opens
`,trade <link>` - Displays trade information (Under Development)
_'''
chickensmoothie_help = '''\
`,image <link>` - Displays pet image only
`,oekaki <link>` - Displays Oekaki drawing
`,pet <link>` - Displays pet information
`,time` - Tells you how long until the pound opens
_''' # Chicken Smoothie related commands help
general_help = '''\
`,autoremind <on/off> <time>` - Turns on or off global auto reminding
`,remindme <time>` - Pings you after specified amount of time
_''' # General commands help
informational_help = '''\
`,help` - Displays this message
`,support` - PM's you the link to the CS Pound Development Server
`,statistics` - Displays bot statistics
''' # Informational commands help
# -------------------- FUNCTIONS --------------------
def time_extractor(time): # Convert given time into seconds
time = time.lower() # Change all letters to lowercase
htotal = 0
mtotal = 0
stotal = 0
if 'h' not in time and 'm' not in time and 's' not in time: # If there is no time at all
pass
elif 'h' in time or 'hr' in time: # If hours in input
htotal = time.split('h')[0] # Split input and get number of hours
if 'm' in time: # If minutes in input
temp = time.split('h')[1] # Split input and get leftover time (minutes and seconds)
mtotal = temp.split('m')[0] # Split temp and get number of minutes
if 's' in time: # If seconds in input
temp = time.split('h')[1] # Split input and get leftover time (minutes and seconds)
temp2 = temp.split('m')[1] # Split temp and get leftover time (seconds)
stotal = temp2.split('s')[0] # Split temp2 and get number of seconds
else: # If no minutes in input
if 's' in time: # If seconds in input
temp = time.split('h')[1] # Split input and get leftover time (seconds)
stotal = temp.split('s')[0] # Split temp and get number of seconds
else: # If no hours in input
if 'm' in time: # If minutes in input
mtotal = time.split('m')[0] # Split input and get number of minutes
if 's' in time: # If seconds in input
temp = time.split('m')[1] # Split input and get leftover time (seconds)
stotal = temp.split('s')[0] # Split temp and get number of seconds
else: # If no minutes in input
if 's' in time: # If seconds in input
stotal = time.split('s')[0] # Split input and get number of seconds
htotal = int(htotal) # Convert 'htotal' into integer
mtotal = int(mtotal) # Convert 'mtotal' into integer
stotal = int(stotal) # Convert 'stotal' into integer
if htotal == 0 and mtotal == 0 and stotal == 0: # If hours, minutes and seconds is 0
finaltotal = 0
else: # If values in hours, minutes or seconds
finaltotal = int((htotal * 60 * 60) + (mtotal * 60) + stotal) # Total time in seconds
return finaltotal, htotal, mtotal, stotal # Return a tuple
def resolver(day, hour, minute, second): # Pretty format time layout given days, hours, minutes and seconds
day_section = ''
hour_section = ''
minute_section = ''
second_section = ''
def pluralise(string, value, and_placement=''): # Correctly prefix or suffix ',' or 'and' placements
if value == 0: # If given time has no value
return ''
else: # If given time has value
return (' and ' if and_placement == 'pre' else '') + str(value) + ' ' + string + ('s' if value > 1 else '') + (' and ' if and_placement == 'suf' else (', ' if and_placement == 'com' else ''))
# If 'and_placement' is set to prefix add 'and' otherwise leave blank
# The value of the time
# The type of time (day, hour, minute, second)
# If value is larger than 1 then pluralise the time
# If 'and_placement' is set to suffix add 'and' otherwise add a ',' instead if 'and_placement' is set to comma otherwise leave blank
if day != 0 and ((hour == 0 and minute == 0) or (hour == 0 and second == 0) or (minute == 0 and second == 0)):
# If there are day(s) but:
# No hours or minutes
# No hours or seconds
# No minutes or seconds
day_section = pluralise('day', day, 'suf') # Pluralise the day section with a suffixed 'and' placement
elif day != 0 and ((hour != 0 and minute != 0 and second != 0) or (hour != 0 and minute == 0) or (hour != 0 and second == 0) or (minute != 0 and second == 0) or (hour == 0 and minute != 0) or (hour == 0 and second != 0) or (minute == 0 and second != 0)):
# If there are day(s) but:
# There are hour(s) and minute(s) and second(s)
# There are hour(s) but no minutes
# There are hour(s) but no seconds
# There are minute(s) but no hours
# There are minute(s) but no seconds
# There are second(s) but no hours
# There are second(s) but no minutes
day_section = pluralise('day', day, 'com') # Pluralise the day section with a suffixed ',' placement
if minute == 0: # If there are no minutes
hour_section = pluralise('hour', hour) # Pluralise the hour section
elif minute != 0 and second == 0: # If there are minute(s) but no seconds
hour_section = pluralise('hour', hour, 'suf') # Pluralise the hour section with a suffixed 'and' placement
else: # If there are minute(s) and second(s)
hour_section = pluralise('hour', hour, 'com') # Pluralise the hour section with a suffixed ',' placement
minute_section = pluralise('minute', minute) # Pluralise the minute section
if hour != 0 or minute != 0: # If there are hour(s) or minute(s)
second_section = pluralise('second', second, 'pre') # Pluralise the second section with a prefixed 'and' placement
else: # If there are no hours or minutes
second_section = pluralise('second', second) # Pluralise the second section
return day_section + hour_section + minute_section + second_section # Return the formatted text
async def get_web_data(link, command_source): # Get web data from link
success = False # Boolean for whether link is valid
headers = { # HTTP request headers
'User-Agent': 'CS Pound Discord Bot Agent ' + version, # Connecting User-Agent
'From': '<EMAIL>' # Contact email
}
if link == '' and command_source != 'pound': # If no link provided
description = 'You didn\'t provide a ' + command_source + ' link!'
return success, discord.Embed(title=command_source.capitalize(), description=description, colour=0xff5252) # Create embed
else: # If arguments provided
try: # Checking link format
if command_source != 'pound': # If command source does not come from ,time
parameters = link.split('?')[1].split('&') # Get the PHP $GET values
success = True # Link is valid
else: # If command source comes from ,time
success = True
except IndexError: # If cannot get $GET value
return success, discord.Embed(title=command_source.capitalize(), description='That is not a valid ' + command_source + ' link!', colour=0xff5252) # Create embed
if success: # If link exists and is valid
data = {} # PHP $POST parameters
if command_source == 'pet': # If function is being called from the Pet command
base_link = 'http://www.chickensmoothie.com/viewpet.php' # Base PHP link for Pet command
parameters = parameters[0].split('=') # Split the $POST variable
data[parameters[0]] = parameters[1] # Add dictionary item with $POST variable and value
elif command_source == 'oekaki': # If function is being called from the Oekaki command
base_link = 'http://www.chickensmoothie.com/Forum/viewtopic.php' # Base PHP link for Oekaki command
for param in range(len(parameters)): # For each parameter
temp = parameters[param].split('=') # Split the $POST variables
data[temp[0]] = temp[1] # Add dictionary item with $POST variable and value
elif command_source == 'pound': # If function is being called from the Pound command
base_link = 'http://www.chickensmoothie.com/pound.php' # Base PHP link for Time command
async with aiohttp.ClientSession() as session: # Create an AIOHTTP session
async with session.post(base_link, data=data, headers=headers) as response: # POST the variables to the base php link
connection = await response.text() # Request HTML page data
dom = lxml.html.fromstring(connection) # Extract HTML from site
return success, dom # Return whether connection was successful and DOM data
else: # If link is not valid
return success # Return whether connection was successful
def process_help(command): # Get the help text from help.json
global help_hash, help_list
def monospace(string): # Returns string in Discord monospace format
return '`' + string + '`' # `string`
def italic(string): # Returns string in Discord italics format
return '*' + string + '*' # *string*
new_help_hash = hashlib.md5(open('help.json').read().encode()).hexdigest() # MD5 hash of help.json
if help_hash != new_help_hash: # If help.json has been changed
help_hash = new_help_hash # Set hash to the new changes
with open('help.json') as f: # Open help.json
help_list = json.load(f) # Load the JSON data
command_information = help_list[command] # Get the command information of the command
message = monospace(command_information['usage']) + ' - ' + command_information['description'] # `usage` - description
if command_information['examples']: # If there are examples for the command
message += '\n' + italic('Examples:') + ' ' + ', '.join([monospace(value) for key, value in command_information['examples'].items()]) # *Examples:* `example1`, `example2`, `example3`
if command_information['aliases']: # If there are aliases for the command
message += '\n' + italic('Aliases:') + ' ' + ', '.join([monospace(value) for key, value in command_information['aliases'].items()]) # *Aliases:* `alias1`, `alias2`, `alias3`
return message
# -------------------- DISCORD BOT SETTINGS --------------------
client = Bot(description='CS Pound by Peko#7955', command_prefix=prefix, pm_help=None) # Set the bot description and prefix
client.remove_command('help') # Remove default help command to add custom help
logger = logging.getLogger('discord') # Create logger
logger.setLevel(logging.DEBUG) # Set logging level to DEBUG
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w') # Set logging file
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s')) # Set logging format
logger.addHandler(handler) # Start logger
@client.event
async def on_ready(): # When Client is loaded
print('Logged in as ' + client.user.name + ' (ID: ' + client.user.id + ')')
print('--------')
print('Current Discord.py Version: {}'.format(discord.__version__))
print('--------')
print('Use this link to invite {}:'.format(client.user.name))
print('https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=268569600'.format(client.user.id))
print('--------')
print('You are running ' + client.user.name + ' v' + version)
print('Created by Peko#7955')
await client.change_presence(game=discord.Game(name=',help | By: Peko#7955'), status=discord.Status.online) # Change Playing status to ,help | By: Peko#7955
# -------------------- HELP COMMAND --------------------
@client.command(pass_context=True)
async def help(ctx, args=''): # Help Command
embed = discord.Embed(colour=0x4ba139) # Create empty embed
# -------------------- CHICKENSMOOTHIE HELP --------------------
if args == 'archive': # If requested Archive command help
embed.add_field(name='**Archive**', value=process_help(args)) # Add Archive help information to embed
elif args == 'fair': # If requested Fair command help
embed.add_field(name='**Fair', value=process_help(args)) # Add Fair help information to embed
elif args == 'oekaki': # If requested Oekaki command help
embed.add_field(name='**Oekaki**', value=process_help(args)) # Add Oekaki help information to embed
elif args == 'pet': # If included 'pet' argument
embed.add_field(name='**Pet**', value=process_help(args)) # Embed Pet help information
elif args == 'time': # If included 'time' argument
embed.add_field(name='**Time**', value=process_help(args)) # Embed Time help information
elif args == 'trade': # If included 'trade' argument
embed.add_field(name='**Trade**', value=process_help(args)) # Embed Trade help information
# -------------------- GENERAL HELP --------------------
elif args == 'autoremind': # If included 'autoremind' argument
embed.add_field(name='**Auto Remind**', value=process_help(args)) # Embed Auto Remind help information
elif args == 'remindme': # If included 'remineme' argument
embed.add_field(name='**Remind Me**', value=process_help(args)) # Embed Remind Me help information
# -------------------- INFORMATIONAL HELP --------------------
elif args == 'help': # If included 'help' argument
embed.add_field(name='**Help**', value=process_help(args)) # Embed Help help information
elif args == 'support':
embed.add_field(name='**Support**', value=process_help(args)) # Embed Support help information
elif args == 'statistics':
embed.add_field(name='**Statistics**', value=process_help(args)) # Embed Statistics help information
else: # If provided no arguments or requested a help topic that doesn't exist
embed.add_field(name=":pencil: __**To know about command usage or examples, use: ,help <command>**__", value=warning_help) # add Warning help information to embed
embed.add_field(name=':dog: __**ChickenSmoothie Commands**__', value=chickensmoothie_help) # Embed Chicken Smoothie related commands
embed.add_field(name=':file_folder: __**General Commands**__', value=general_help) # Embed General commands
embed.add_field(name=':wrench: __**Informational Commands**__', value=informational_help) # Embed informational commands
try:
await client.whisper(embed=embed) # PM the embed to the user
if ctx.message.channel.is_private: # If the user is calling command from PM
embed = discord.Embed() # Replace with new empty embed
else: # If the user is calling command from a channel
embed = discord.Embed(title='Help', description='A PM has been sent to you!', colour=0x4ba139) # Create embed
await client.say(embed=embed) # Send embed
except discord.errors.Forbidden: # If cannot send PM to user
embed = discord.Embed(title='Help', description='A PM couldn\'t be sent to you, it may be that you have \'Allow direct messages from server members\' disabled in your privacy settings.', colour=0xff5252) # Create embed
await client.say(embed=embed) # Send embed
# -------------------- AUTOREMIND COMMAND --------------------
@client.command(pass_context=True, no_pm=True) # Disallow using this command in PM's
async def autoremind(ctx, args=''): # Auto Remind command
grep_statement = 'grep -n \'' + ctx.message.author.id + '\' autoremind.txt | cut -f1 -d:' # Get line number of ID
id_exists = subprocess.Popen(grep_statement, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')[:-1] # Run grep statement
server_roles = ctx.message.server.roles # List of roles in server
for role in server_roles: # For each role in the server
if role.name == "CS Pound": # If 'CS Pound' role exists
permission = role.permissions.manage_roles # Check whether role has 'Manage Roles' permission and set boolean value
break # Break out of for loop
else: # If role doesn't exist
permission = False
if permission: # If bot has permission to 'Manage Roles'
server_roles = ctx.message.server.roles # List of roles in server
for role in server_roles: # Checks if role already exists in server
if role.name == "Auto Remind": # If role exists
break # Break out of for loop
else: # If role doesn't exist
await client.create_role(ctx.message.server, name='Auto Remind') # Create 'Auto Remind' role in server
if args == 'off': # If user wants to turn off Auto Remind
if id_exists == '': # If user doesn't exist in database
embed = discord.Embed(title='Auto Remind', description='You don\'t have Auto Remind setup!', colour=0xff5252) # Create embed
else: # If user exists
sed_statement = 'sed -i.bak ' + id_exists + 'd autoremind.txt' # sed statement
subprocess.Popen(sed_statement, shell=True) # Run sed statement
if permission: # If bot has permission to 'Manage Roles'
await client.remove_roles(ctx.message.author, discord.utils.get(server_roles, name='Auto Remind')) # Remove role from user
embed = discord.Embed(title='Auto Remind', description='You have been removed from the Auto Remind role.', colour=0x4ba139) # Create embed
else: # If bot doesn't have permission to 'Manage Roles'
embed = discord.Embed(title='Auto Remind', description='You have been removed from the Auto Remind.', colour=0x4ba139) # Create embed
else: # If user is setting an Auto Remind
valid = False
if args == '': # If no arguments provided
embed = discord.Embed(title='Auto Remind', description='You didn\'t input a time!', colour=0xff5252) # Create embed
elif args.isdigit(): # If the input is a digit
valid = True
else: # If the input isn't a digit
args = args[:-1] # Remove the minute marker
if args.isdigit(): # If the input is a digit now
valid = True
else: # If input is still not digit
embed = discord.Embed(title='Auto Remind', description='That is not a valid time!', colour=0xff5252) # Create embed
if valid: # If inputted time was valid
if int(args) > 60: # If time is bigger than 60 minutes
embed = discord.Embed(title='Auto Remind', description='That time is too far!', colour=0xff5252) # Create embed
else: # If time is less than 60 minutes
if id_exists != '': # If user has already set an Auto Remind
embed = discord.Embed(title='Auto Remind', description='You already have Auto Remind setup!', colour=0xff5252) # Create embed
else: # If user doesn't have an Auto Remind setup
text = ctx.message.server.id + ' ' + ctx.message.channel.id + ' ' + ctx.message.author.id + ' ' + args + '\n' # Write in the format 'SERVER_ID CHANNEL_ID USER_ID REMIND_TIME'
with open('autoremind.txt', 'a+') as file: # Open autoremind.txt
file.write(text) # Write the text
if permission: # If bot has 'Manage Roles' permission
await client.add_roles(ctx.message.author, discord.utils.get(server_roles, name='Auto Remind')) # Add user to Auto Remind role
message = 'Will ping you ' + args + ' minutes before the pound opens!'
embed = discord.Embed(title='Auto Remind', description=message, colour=0x4ba139) # Create embed
await client.say(embed=embed) # Send embed
# -------------------- IMAGE COMMAND --------------------
@client.command(no_pm=True, aliases=['img'], pass_context=True) # Disallow using this command in PM's
async def image(ctx, link: str = ''): # Autoremind command
data = await get_web_data(link, 'pet') # Get pet data
if data[0]: # If data is valid
information = {}
owner_name = data[1].xpath('//td[@class="r"]/a/text()')[0] # User of pet
titles = data[1].xpath('//td[@class="l"]/text()') # Titles of pet information
values = data[1].xpath('//td[@class="r"]') # Values of pet information
pet_image = data[1].xpath('//img[@id="petimg"]/@src')[0] # Pet image link
if 'trans' in pet_image: # If pet image is transparent (i.e. Pet has items)
pet_image = 'http://www.chickensmoothie.com' + pet_image # Pet image link
transparent = True
else:
transparent = False
if titles[0] == 'PPS': # If pet is PPS
pps = True
else: # If pet is not PPS
pps = False
if len(titles) + len(values) < 16: # If the amount of titles and values don't add up
no_name = True
else: # If they add up
no_name = False
if no_name: # If pet has no name
case1 = 'Pet\'s name:'
case2 = 'Adopted:'
case3 = 1
case4 = 1
if pps: # If pet has no name and is PPS
case1 = 'Pet\'s name:'
case2 = 'Pet ID:'
case3 = 2
case4 = 1
elif pps: # If pet has a name and is PPS
case1 = 'Pet ID:'
case2 = 'Pet\'s name:'
case3 = 2
case4 = 1
else: # If pet has a name but is not PPS
case1 = 'Pet\'s name:'
case2 = 'Adopted:'
case3 = 1
case4 = 1
temp = len(titles) - 1 if pps else len(titles) # Is pet is PPS, remove one title, else all titles
for i in range(temp): # For each title in titles
if titles[i] == (case1):
information['Name'] = values[i].xpath('text()')[0] # Add pet name to information dictionary
elif titles[i] == (case2):
information['Adopted'] = values[i].xpath('text()')[0] # Add pet adoption date to information dictionary
elif titles[i] == ('Growth:' if pps else 'Rarity:'): # If pet is PPS, if titles[i] matches 'Growth:', otherwise if not PPS, if titles[i] matches with 'Rarity:'
information['Rarity'] = 'rarities/' + values[i].xpath('img/@src')[0][12:] # Local link to rarity image
if titles[case3] == 'Pet ID:':
filename = values[case4].xpath('text()')[0] # Get pet ID
else: # If ID cannot be found
filename = 'pet'
async with aiohttp.ClientSession() as session: # Create an AIOHTTP session
async with session.get(pet_image) as response: # GET HTTP response of pet image link
connection = await response.read() # Read the response content
pet_image = io.BytesIO(connection) # Convert the content into bytes
image_files = [pet_image, information['Rarity']]
font = ImageFont.truetype('Verdana.ttf', 12) # Verdana font size 15
images = map(Image.open, image_files) # Map the image files
widths, heights = zip(*(i.size for i in images)) # Tuple of widths and heights of both images
images = list(map(Image.open, image_files)) # List of image file name
temp_draw = ImageDraw.Draw(Image.new('RGBA', (0, 0))) # Temporary drawing canvas to calculate text sizes
max_width = max(widths) # Max width of images
total_height = sum(heights) + (15 * len(information)) # Total height of images
current_width = 0
for key, value in information.items(): # For each item in information
if 'rarities/' in value:
temp_width = 106
else:
temp_width = temp_draw.textsize(value, font=font)[0] # Width of text
if current_width < temp_width: # If current width is less than width of texts
current_width = temp_width
if max_width < current_width:
max_width = current_width * 2
image = Image.new('RGBA', (max_width, total_height), (225, 246, 179, 255)) # Create an RGBA image of max_width x total_height, with colour 225, 246, 179
pil_image = ImageDraw.Draw(image) # Draw the image to PIL
y_offset = 0 # Offset for vertically stacking images
if transparent: # If pet has items
image.paste(images[0], (math.floor((max_width - images[0].size[0])/2), y_offset), images[0]) # Paste first image at ((MAX_WIDTH - IMAGE_WIDTH) / 2) using the mask from images[0]
else: # If pet doesn't have items
image.paste(images[0], (math.floor((max_width - images[0].size[0])/2), y_offset)) # Paste first image at ((MAX_WIDTH - IMAGE_WIDTH) / 2)
y_offset += images[0].size[1] # Add height of image + 10 to offset
try:
pil_image.text((math.floor(((max_width - math.floor(pil_image.textsize(information['Name'], font=font)[0]))/2)), y_offset), information['Name'], fill=(0, 0, 0), font=font) # Paste text at (((MAX_WIDTH - (TEXT_WIDTH) / 2)) - (TEXT_WIDTH / 2) - 5, y_offset) with colour (0, 0, 0) and font
y_offset += 15 # Add offset of 15
except KeyError:
pass
try:
pil_image.text((math.floor(((max_width - math.floor(pil_image.textsize(information['Adopted'], font=font)[0]))/2)), y_offset), information['Adopted'], fill=(0, 0, 0), font=font) # Paste text at (((MAX_WIDTH - (TEXT_WIDTH) / 2)) - (TEXT_WIDTH / 2) - 5, y_offset) with colour (0, 0, 0) and font
y_offset += 15 # Add offset of 15
except KeyError:
pass
image.paste(images[1], (math.floor((max_width - images[1].size[0])/2), y_offset), images[1]) # Paste first image at ((MAX_WIDTH - IMAGE_WIDTH) / 2) using the mask from images[1]
output_buffer = io.BytesIO() # Convert the PIL output into bytes
image.save(output_buffer, 'png') # Save the bytes as a PNG format
output_buffer.seek(0) # Move the 'cursor' back to the start
filename += '.png' # Set filename as (Pet ID).png
await client.send_file(ctx.message.channel, fp=output_buffer, filename=filename) # Upload the file to the channel where message came from
else: # If data is invalid
await client.say(embed=data[1]) # Send embed
# -------------------- OEKAKI COMMAND --------------------
@client.command(no_pm=True) # Disallow using this command in PM's
async def oekaki(link: str = ''): # Oekaki command
data = await get_web_data(link, 'oekaki') # Get Oekaki data
if data[0]: # If data is valid
base_link = 'http://www.chickensmoothie.com/Forum/'
oekaki_title = data[1].xpath('//h3[@class="first"]/a/text()')[0] # Title of drawing
image = 'https://www.chickensmoothie.com' + data[1].xpath('//li[@class="ok-topic-head-image large"]/img/@src')[0] # Image of drawing
user_icon = base_link[:-1] + data[1].xpath('//dl[@class="postprofile"]')[0].xpath('dt/a/img/@src')[0][1:] # The profile picture of the artist
titles = data[1].xpath('//table[@class="ok-drawing-info"]/tr/td[@class="label"]/text()') # Get titles of drawing information
warning_text = 'Reminder!! Copying another person\'s art without permission to reproduce their work is a form of art-theft!' # General warning text regarding Oekaki art
if data[1].xpath('//table[@class="ok-drawing-info"]/tr')[0].xpath('td')[1].xpath('a/text()')[0] == 'Click to view': # If drawing is based off another drawing
artist_links = data[1].xpath('//table[@class="ok-drawing-info"]/tr')[1].xpath('td')[1].xpath('a/@href') # Drawing information titles
artist_values = data[1].xpath('//table[@class="ok-drawing-info"]/tr')[1].xpath('td')[1].xpath('a/text()') # Drawing information values
else: # If drawing is not based off another drawing
artist_links = data[1].xpath('//table[@class="ok-drawing-info"]/tr')[0].xpath('td')[1].xpath('a/@href') # Drawing information titles
artist_values = data[1].xpath('//table[@class="ok-drawing-info"]/tr')[0].xpath('td')[1].xpath('a/text()') # Drawing information values
artist_text = '[' + artist_values[0] + '](' + base_link + artist_links[0][1:] + ') [' + artist_values[1] + '(' + base_link + artist_links[1][1:] + ')]' # [Artist Name](Link to Artist) [gallery](Link to Artist gallery) | Formats to Artist Name [gallery]
embed = discord.Embed(title=oekaki_title, colour=0x4ba139, url=link) # Create embed
embed.add_field(name='Artist', value=artist_text) # Add Artist field
embed.set_footer(text=warning_text, icon_url="https://vignette.wikia.nocookie.net/pufflescp/images/6/68/Red_Warning_Triangle.png/revision/latest?cb=20160718024653&format=original") # Add warning text to footer
embed.set_image(url=image) # Add drawing to embed
embed.set_thumbnail(url=user_icon) # Set thumbnail as user profile picture
await client.say(embed=embed) # Send embed
else: # If data is not valid
await client.say(embed=data[1]) # Send embed
# -------------------- PET COMMAND --------------------
@client.command(no_pm=True) # Disallow using this command in PM's
async def pet(link: str = ''): # Pet command
data = await get_web_data(link, 'pet') # Get pet data
if data[0]: # If data is valid
titles = data[1].xpath('//td[@class="l"]/text()') # Titles of pet information
values = data[1].xpath('//td[@class="r"]') # Values of pet information
given = True # Pet has been given by another user
value_list = []
petimg = data[1].xpath('//img[@id="petimg"]/@src')[0] # Pet image link
if 'trans' in petimg: # If pet image is transparent (i.e. Pet has items)
petimg = 'http://www.chickensmoothie.<EMAIL>' + petimg # Pet image link
owner_name = data[1].xpath('//td[@class="r"]/a/text()')[0] # User of pet
owner_link = 'http://www.chickensmoothie.com/' + data[1].xpath('//td[@class="r"]/a/@href')[0] # Link to user profile
if titles[0] == 'PPS': # If pet is PPS
value_list.append('[This pet has "PPS". What\'s that?](http://www.chickensmoothie.com/help/pets#pps)') # Append PPS value
value_list.append('[' + owner_name + '](' + owner_link + ')') # [Owner Name](Link to Owner) | Formats to Owner Name
pps = True
else: # If pet is not PPS
value_list.append('[' + owner_name + '](' + owner_link + ')') # [Owner Name](Link to Owner) | Formats to Owner Name
pps = False
tables = len(titles) # Number of rows
temp = tables - 1 if pps else tables # -1 Rows if pet is PPS
for i in range(temp): # For each title in titles
if i == 0: # If 'i' is at first value (PPS or Owner name)
pass # Pass as first value has already been set
elif temp - i == 2 or temp - i == 1: # If 'i' is at second last or last value
if titles[i] == ('Age:' if pps else 'Growth:') or not given: # If text of titles at 'i' is 'Age:' if pet is PPS otherwise 'Growth:' or pet not given
given = False
if temp - i == 2: # If 'i' is second last value (i.e. Growth)
value_list.append(values[i].xpath('text()')[0]) # Append growth of pet
elif temp - i == 1: # If 'i' is last value (i.e. Rarity)
value_list.append(values[i].xpath('img/@alt')[0]) # Append rarity of pet
elif titles[i] == ('Growth:' if pps else 'Rarity:') or given: # If text of titles at 'i' is 'Growth:' is pet is PPS otherwise 'Rarity:' or pet is given
given = True
if temp - i == 2: # If 'i' is second last value (i.e. Rarity)
value_list.append(values[i].xpath('img/@alt')[0]) # Append rarity of pet
elif temp - i == 1: # If 'i' is last value (i.e. Given by)
titles[i] = titles[i].replace('\t', '').replace('\n', '') # Remove extra formatting
value_list.append('[' + data[1].xpath('//td[@class="r"]/a/text()')[1] + ']' + '(' + 'http://www.chickensmoothie.com/' + data[1].xpath('//td[@class="r"]/a/@href')[1] + ')') # Append given user profile
else: # Any other 'i'
value_list.append(values[i].xpath('text()')[0]) # Append text
embed = discord.Embed(title=owner_name + '\'s Pet', colour=0x4ba139) # Create embed
embed.set_image(url=petimg) # Set image
for i in range(tables): # For each title in titles
if i == 0: # If 'i' is first value (PPS or Owner name)
embed.add_field(name=titles[i], value=value_list[i], inline=False) # Add field with no inline
else: # Any other 'i'
embed.add_field(name=titles[i], value=value_list[i], inline=True) # Add field with inline
await client.say(embed=embed) # Send embed
else: # If data is not valid
await client.say(embed=data[1]) # Send embed
# -------------------- REMINDME COMMAND --------------------
@client.command(pass_context=True, aliases=['rm'], no_pm=True) # Disallow using this command in PM's
async def remindme(ctx, amount: str): # Remind Me command
finaltotal = time_extractor(amount) # Get formatted times
if finaltotal[0] == 0: # If no time specified
embed = discord.Embed(title='Remind Me', description='That is not a valid time!', colour=0xff5252) # Create embed
await client.say(embed=embed) # Send embed
elif finaltotal[0] > 86400: # If time is longer than 24 hours
embed = discord.Embed(title='Remind Me', description='That time is too long!', colour=0xff5252) # Create embed
await client.say(embed=embed) # Send embed
else: # If time is valid
before_message = 'A reminder has been set for {0.mention} in '.format(ctx.message.author) + resolver(0, finaltotal[1], finaltotal[2], finaltotal[3]) + '.' # A reminder has been set for USER in X hours, Y minutes, and Z seconds.
embed = discord.Embed(title='Remind Me', description=before_message, colour=0x4ba139) # Create embed
await client.say(embed=embed) # Send embed
after_message = 'Reminder for {0.mention}!'.format(ctx.message.author) # Reminder for USER!
await asyncio.sleep(finaltotal[0]) # Sleep for set time
await client.say(after_message) # Send message
# -------------------- STATS COMMAND --------------------
@client.command(no_pm=True, aliases=['stats']) # Disallow using this command in PM's
async def statistics(): # Statistics command
def converter(seconds): # Convert seconds into days, hours, minutes and seconds
d = datetime(1, 1, 1) + timedelta(seconds=int(seconds)) # Create tuple of date values
return d.day-1, d.hour, d.minute, d.second # Return tuple of date values
system_memory_mb = str(round(psutil.virtual_memory()[3] / 1000 / 1024, 2)) + ' MB' # Get the used virtual memory (physical memory) of the system | X MB
system_memory_percent = str(psutil.virtual_memory()[2]) + '%' # Get the available virtual memory (physical memory) of the system | X%
bot_memory_mb = str(round(psutil.Process(os.getpid()).memory_info()[0] / 1024**2, 2)) + ' MB' # Get the memory usage of the bot (i.e. This script) | X MB
bot_memory_percent = str(round(psutil.Process(os.getpid()).memory_percent(), 2)) + '%' # Get used memory percentage of the bot (i.e. This script) | X%
discord_py_version = discord.__version__ # Discord.py version
server_count = str(len(client.servers)) # The number of servers this CS Pound is in
member_count = str(len(set(client.get_all_members()))) # The number of unique users the CS Pound is connected to
bot_uptime = converter((datetime.now() - start_time).total_seconds()) # The time the bot (script) has been running
system_uptime = converter(round(pytime.time() - psutil.boot_time())) # The time the system has been running
bot_uptime = resolver(bot_uptime[0], bot_uptime[1], bot_uptime[2], bot_uptime[3]) # Pretty format the bot uptime
system_uptime = resolver(system_uptime[0], system_uptime[1], system_uptime[2], system_uptime[3]) # Pretty format the system uptime
embed = discord.Embed(title='Stats', description='', colour=0x4ba139) # Create empty embed
embed.add_field(name='System Memory Usage', value=system_memory_percent + ' (' + system_memory_mb + ')', inline=False) # Add system memory usage to embed
embed.add_field(name=client.user.name + ' Memory Usage', value=bot_memory_percent + ' (' + bot_memory_mb + ')', inline=False) # Add bot memory usage to embed
embed.add_field(name=client.user.name + ' Version', value=version, inline=False) # Add bot version to embed
embed.add_field(name='Discord.py Version', value=discord_py_version, inline=False) # Add Discord.py version to embed
embed.add_field(name='Server Count', value=server_count, inline=False) # Add server count to embed
embed.add_field(name='Member Count', value=member_count, inline=False) # Add member count to embed
embed.add_field(name=client.user.name + ' Uptime', value=bot_uptime, inline=False) # Add bot uptime to embed
embed.add_field(name='System Uptime', value=system_uptime, inline=False) # Add system uptime to embed
await client.say(embed=embed) # Send embed
# -------------------- SUPPORT COMMAND --------------------
@client.command()
async def support(): # Support command
try:
await client.whisper('https://discord.gg/PbzHqm9') # PM Discord link to the CS-Pound Development Server to user
embed = discord.Embed(title='Support', description='A PM has been sent to you!', colour=0x4ba139) # Create embed
except discord.errors.Forbidden: # If cannot send PM to user
embed = discord.Embed(title='Support', description='A PM couldn\'t be sent to you, it may be that you have \'Allow direct messages from server members\' disabled in your privacy settings.', colour=0xff5252) # Create embed
await client.say(embed=embed) # Send embed
# -------------------- TIME COMMAND --------------------
@client.command(no_pm=True, aliases=['pound']) # Disallow using this command in PM's
async def time(): # Time command
async with aiohttp.ClientSession() as session: # Create an AIOHTTP session
async with session.get('http://www.chickensmoothie.com/pound.php') as response: # GET HTTP response of pound page
connection = await response.text() # Request HTML page data
dom = lxml.html.fromstring(connection) # Extract HTML from site
text = dom.xpath('//h2/text()') # Pound opening text
try:
if ':)' in text[1]: # If :) in text
output = text[1][:-85].replace('\n', r'').replace('\t', r'') + ' The pound opens at totally random times of day, so check back later to try again :)' # Remove excess formatting text
else: # If any other text in text
output = text[1].replace('Sorry, the pound is closed at the moment.', '').replace('\n', r'').replace('\t', r'') + '.'
except IndexError: # If text doesn't exist
output = 'Pound is currently open!'
embed = discord.Embed(title='Time', description=output, colour=0x4ba139) # Create embed
await client.say(embed=embed) # Send embed
async def compose_message(time): # Function to compose and send mention messages to channels
grep_statement = 'grep \'[0-9]*\\s[0-9]*\\s[0-9]*\\s' + time + '\' autoremind.txt | cut -f2 -d\' \' | sort -u' # Get channels with Auto Remind set at 'time'
channel_ids = subprocess.Popen(grep_statement, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')[:-1].split('\n') # Run grep statement
for i in range(len(channel_ids)): # For each Discord channel ID
grep_statement = 'grep \'[0-9]*\\s' + channel_ids[i] + '\\s[0-9]*\\s' + time + '\' autoremind.txt | cut -f3 -d\' \'' # Grab all unique Discord user ID's with that channel ID
user_ids = subprocess.Popen(grep_statement, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')[:-1].split('\n') # Run grep statement
if time == '1': # If there is only one minute left
message = time + ' minute until pound opens! '
else: # If there is more than 1 minute left
message = time + ' minutes until pound opens! '
for j in range(len(user_ids)): # For each Discord user
message += '<@' + user_ids[j] + '> ' # Message format for mentioning users | <@USER_ID>
try:
await client.send_message(client.get_channel(channel_ids[i]), content=message) # Send message to Discord channel with mention message
except discord.errors.NotFound:
pass
async def minute_check(time): # Function to check if any user has Auto Remind setup at 'time'
global autoremind_hash, autoremind_times
time = str(time)
new_hash = hashlib.md5(open('autoremind.txt').read().encode()).hexdigest() # MD5 hash of autoremind.txt
if autoremind_hash != new_hash: # If file has been modified since last check
autoremind_hash = new_hash
cut_statement = 'cut -f4 -d\' \' autoremind.txt | sort -u' # Grab all unique reminding times from autoremind.txt
autoremind_times = subprocess.Popen(cut_statement, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')[:-1].split('\n') # Run cut statement
if time in autoremind_times: # If someone has a Auto Remind set at current 'time'
await compose_message(time) # Run compose message
async def pound_countdown(): # Background task to countdown to when the pound opens
global cooldown # Use cooldown from global scope
await client.wait_until_ready() # Wait until client has loaded before starting background task
while not client.is_closed: # While client is still running
if not cooldown: # If command is not on cooldown
data = await get_web_data('', 'pound') # Get pound data
if data[0]: # If pound data is valid and contains content
text = data[1].xpath('//h2/text()') # List all texts with H2 element
try: # Try getting pound opening text
text = text[1] # Grab the pound opening time text
value = [int(s) for s in text.split() if s.isdigit()] # Extract the numbers in the text
if len(value) == 1: # If there is only one number
value = value[0]
if 'hour' in text: # If hour in pound opening time
if value == 1: # If there is one hour left
cooldown = True
value = 60 # Start countdown from 60 minutes
sleep_amount = 0
else: # If there is more than one hour
sleep_amount = (value - 2) * 3600 # -1 hour and convert into seconds
elif 'minute' in text: # If minute in pound opening time
sleep_amount = 0
cooldown = True
elif 'second' in text: # If second in pound opening time
pass
elif len(value) == 2: # If there are two numbers
if 'hour' and 'minute' in text:
sleep_amount = value[1] * 60 # Get the minutes and convert to seconds
value = 60
text = 'minute'
cooldown = True
elif 'minute' and 'second' in text:
pass
elif len(value) == 0: # If there are no times i.e. Pound recently closed or not opening anytime soon
sleep_amount = 3600 # 1 hour
except IndexError: # Pound is currently open
sleep_amount = 3600 # 1 hour
else: # If pound data isn't valid
sleep_amount = 11400 # 3 hours 10 minutes
else: # If command is on cooldown
if 'hour' in text: # If hour in text
if value != 0: # If minutes left is not zero
await minute_check(value) # Run minute check
value -= 1 # Remove one minute
sleep_amount = 60 # 1 minute
else: # If time ran out (i.e. Pound is now open)
cooldown = False
sleep_amount = 10800 # 3 hours
elif 'minute' and 'second' in text: # If minute and second in text
sleep_amount = value[1]
value = 1
elif 'minute' in text: # If minute in text
if value != 0: # If minutes left is not zero
await minute_check(value) # Run minute check
value -= 1 # Remove one minute
sleep_amount = 60 # 1 minute
else: # If time ran out (i.e. Pound is now open)
cooldown = False
sleep_amount = 10800 # 3 hours
elif 'second' in text: # If second in text
pass
await asyncio.sleep(sleep_amount) # Sleep for sleep amount
client.loop.create_task(pound_countdown()) # Run 'pound_countdown' background task
client.run(tokens[0]) # Start bot
| en | 0.714358 | # -------------------- IMPORTS -------------------- # -------------------- VARIABLES -------------------- # The time the script started running # Prefix to call CS Pound Discord Bot # CS Pound Discord Bot version # Get tokens from tokens.txt file # Cooldown of Auto Remind # Current hash of help.json # Current hash of autoremind.txt # Unique Auto Remind times # -------------------- HELP TEXT -------------------- \ CS Pound website (Where you also get the invite link) http://tailstar.us - # Title help \ `,archive <query>` - Search the ChickenSmoothie archives (Under Development) `,fair <link>` - Determine whether a trade is fair (Under Development) `,image <link>` - Displays pet image only `,oekaki <link>` - Displays Oekaki drawing `,pet <link>` - Displays pet information `,time` - Tells you how long until the pound opens `,trade <link>` - Displays trade information (Under Development) _ \ `,image <link>` - Displays pet image only `,oekaki <link>` - Displays Oekaki drawing `,pet <link>` - Displays pet information `,time` - Tells you how long until the pound opens _ # Chicken Smoothie related commands help \ `,autoremind <on/off> <time>` - Turns on or off global auto reminding `,remindme <time>` - Pings you after specified amount of time _ # General commands help \ `,help` - Displays this message `,support` - PM's you the link to the CS Pound Development Server `,statistics` - Displays bot statistics # Informational commands help # -------------------- FUNCTIONS -------------------- # Convert given time into seconds # Change all letters to lowercase # If there is no time at all # If hours in input # Split input and get number of hours # If minutes in input # Split input and get leftover time (minutes and seconds) # Split temp and get number of minutes # If seconds in input # Split input and get leftover time (minutes and seconds) # Split temp and get leftover time (seconds) # Split temp2 and get number of seconds # If no minutes in input # If seconds in input # Split input and get leftover time (seconds) # Split temp and get number of seconds # If no hours in input # If minutes in input # Split input and get number of minutes # If seconds in input # Split input and get leftover time (seconds) # Split temp and get number of seconds # If no minutes in input # If seconds in input # Split input and get number of seconds # Convert 'htotal' into integer # Convert 'mtotal' into integer # Convert 'stotal' into integer # If hours, minutes and seconds is 0 # If values in hours, minutes or seconds # Total time in seconds # Return a tuple # Pretty format time layout given days, hours, minutes and seconds # Correctly prefix or suffix ',' or 'and' placements # If given time has no value # If given time has value # If 'and_placement' is set to prefix add 'and' otherwise leave blank # The value of the time # The type of time (day, hour, minute, second) # If value is larger than 1 then pluralise the time # If 'and_placement' is set to suffix add 'and' otherwise add a ',' instead if 'and_placement' is set to comma otherwise leave blank # If there are day(s) but: # No hours or minutes # No hours or seconds # No minutes or seconds # Pluralise the day section with a suffixed 'and' placement # If there are day(s) but: # There are hour(s) and minute(s) and second(s) # There are hour(s) but no minutes # There are hour(s) but no seconds # There are minute(s) but no hours # There are minute(s) but no seconds # There are second(s) but no hours # There are second(s) but no minutes # Pluralise the day section with a suffixed ',' placement # If there are no minutes # Pluralise the hour section # If there are minute(s) but no seconds # Pluralise the hour section with a suffixed 'and' placement # If there are minute(s) and second(s) # Pluralise the hour section with a suffixed ',' placement # Pluralise the minute section # If there are hour(s) or minute(s) # Pluralise the second section with a prefixed 'and' placement # If there are no hours or minutes # Pluralise the second section # Return the formatted text # Get web data from link # Boolean for whether link is valid # HTTP request headers # Connecting User-Agent # Contact email # If no link provided # Create embed # If arguments provided # Checking link format # If command source does not come from ,time # Get the PHP $GET values # Link is valid # If command source comes from ,time # If cannot get $GET value # Create embed # If link exists and is valid # PHP $POST parameters # If function is being called from the Pet command # Base PHP link for Pet command # Split the $POST variable # Add dictionary item with $POST variable and value # If function is being called from the Oekaki command # Base PHP link for Oekaki command # For each parameter # Split the $POST variables # Add dictionary item with $POST variable and value # If function is being called from the Pound command # Base PHP link for Time command # Create an AIOHTTP session # POST the variables to the base php link # Request HTML page data # Extract HTML from site # Return whether connection was successful and DOM data # If link is not valid # Return whether connection was successful # Get the help text from help.json # Returns string in Discord monospace format # `string` # Returns string in Discord italics format # *string* # MD5 hash of help.json # If help.json has been changed # Set hash to the new changes # Open help.json # Load the JSON data # Get the command information of the command # `usage` - description # If there are examples for the command # *Examples:* `example1`, `example2`, `example3` # If there are aliases for the command # *Aliases:* `alias1`, `alias2`, `alias3` # -------------------- DISCORD BOT SETTINGS -------------------- #7955', command_prefix=prefix, pm_help=None) # Set the bot description and prefix # Remove default help command to add custom help # Create logger # Set logging level to DEBUG # Set logging file # Set logging format # Start logger # When Client is loaded #7955') #7955'), status=discord.Status.online) # Change Playing status to ,help | By: Peko#7955 # -------------------- HELP COMMAND -------------------- # Help Command # Create empty embed # -------------------- CHICKENSMOOTHIE HELP -------------------- # If requested Archive command help # Add Archive help information to embed # If requested Fair command help # Add Fair help information to embed # If requested Oekaki command help # Add Oekaki help information to embed # If included 'pet' argument # Embed Pet help information # If included 'time' argument # Embed Time help information # If included 'trade' argument # Embed Trade help information # -------------------- GENERAL HELP -------------------- # If included 'autoremind' argument # Embed Auto Remind help information # If included 'remineme' argument # Embed Remind Me help information # -------------------- INFORMATIONAL HELP -------------------- # If included 'help' argument # Embed Help help information # Embed Support help information # Embed Statistics help information # If provided no arguments or requested a help topic that doesn't exist # add Warning help information to embed # Embed Chicken Smoothie related commands # Embed General commands # Embed informational commands # PM the embed to the user # If the user is calling command from PM # Replace with new empty embed # If the user is calling command from a channel # Create embed # Send embed # If cannot send PM to user # Create embed # Send embed # -------------------- AUTOREMIND COMMAND -------------------- # Disallow using this command in PM's # Auto Remind command # Get line number of ID # Run grep statement # List of roles in server # For each role in the server # If 'CS Pound' role exists # Check whether role has 'Manage Roles' permission and set boolean value # Break out of for loop # If role doesn't exist # If bot has permission to 'Manage Roles' # List of roles in server # Checks if role already exists in server # If role exists # Break out of for loop # If role doesn't exist # Create 'Auto Remind' role in server # If user wants to turn off Auto Remind # If user doesn't exist in database # Create embed # If user exists # sed statement # Run sed statement # If bot has permission to 'Manage Roles' # Remove role from user # Create embed # If bot doesn't have permission to 'Manage Roles' # Create embed # If user is setting an Auto Remind # If no arguments provided # Create embed # If the input is a digit # If the input isn't a digit # Remove the minute marker # If the input is a digit now # If input is still not digit # Create embed # If inputted time was valid # If time is bigger than 60 minutes # Create embed # If time is less than 60 minutes # If user has already set an Auto Remind # Create embed # If user doesn't have an Auto Remind setup # Write in the format 'SERVER_ID CHANNEL_ID USER_ID REMIND_TIME' # Open autoremind.txt # Write the text # If bot has 'Manage Roles' permission # Add user to Auto Remind role # Create embed # Send embed # -------------------- IMAGE COMMAND -------------------- # Disallow using this command in PM's # Autoremind command # Get pet data # If data is valid # User of pet # Titles of pet information # Values of pet information # Pet image link # If pet image is transparent (i.e. Pet has items) # Pet image link # If pet is PPS # If pet is not PPS # If the amount of titles and values don't add up # If they add up # If pet has no name # If pet has no name and is PPS # If pet has a name and is PPS # If pet has a name but is not PPS # Is pet is PPS, remove one title, else all titles # For each title in titles # Add pet name to information dictionary # Add pet adoption date to information dictionary # If pet is PPS, if titles[i] matches 'Growth:', otherwise if not PPS, if titles[i] matches with 'Rarity:' # Local link to rarity image # Get pet ID # If ID cannot be found # Create an AIOHTTP session # GET HTTP response of pet image link # Read the response content # Convert the content into bytes # Verdana font size 15 # Map the image files # Tuple of widths and heights of both images # List of image file name # Temporary drawing canvas to calculate text sizes # Max width of images # Total height of images # For each item in information # Width of text # If current width is less than width of texts # Create an RGBA image of max_width x total_height, with colour 225, 246, 179 # Draw the image to PIL # Offset for vertically stacking images # If pet has items # Paste first image at ((MAX_WIDTH - IMAGE_WIDTH) / 2) using the mask from images[0] # If pet doesn't have items # Paste first image at ((MAX_WIDTH - IMAGE_WIDTH) / 2) # Add height of image + 10 to offset # Paste text at (((MAX_WIDTH - (TEXT_WIDTH) / 2)) - (TEXT_WIDTH / 2) - 5, y_offset) with colour (0, 0, 0) and font # Add offset of 15 # Paste text at (((MAX_WIDTH - (TEXT_WIDTH) / 2)) - (TEXT_WIDTH / 2) - 5, y_offset) with colour (0, 0, 0) and font # Add offset of 15 # Paste first image at ((MAX_WIDTH - IMAGE_WIDTH) / 2) using the mask from images[1] # Convert the PIL output into bytes # Save the bytes as a PNG format # Move the 'cursor' back to the start # Set filename as (Pet ID).png # Upload the file to the channel where message came from # If data is invalid # Send embed # -------------------- OEKAKI COMMAND -------------------- # Disallow using this command in PM's # Oekaki command # Get Oekaki data # If data is valid # Title of drawing # Image of drawing # The profile picture of the artist # Get titles of drawing information # General warning text regarding Oekaki art # If drawing is based off another drawing # Drawing information titles # Drawing information values # If drawing is not based off another drawing # Drawing information titles # Drawing information values # [Artist Name](Link to Artist) [gallery](Link to Artist gallery) | Formats to Artist Name [gallery] # Create embed # Add Artist field # Add warning text to footer # Add drawing to embed # Set thumbnail as user profile picture # Send embed # If data is not valid # Send embed # -------------------- PET COMMAND -------------------- # Disallow using this command in PM's # Pet command # Get pet data # If data is valid # Titles of pet information # Values of pet information # Pet has been given by another user # Pet image link # If pet image is transparent (i.e. Pet has items) # Pet image link # User of pet # Link to user profile # If pet is PPS #pps)') # Append PPS value # [Owner Name](Link to Owner) | Formats to Owner Name # If pet is not PPS # [Owner Name](Link to Owner) | Formats to Owner Name # Number of rows # -1 Rows if pet is PPS # For each title in titles # If 'i' is at first value (PPS or Owner name) # Pass as first value has already been set # If 'i' is at second last or last value # If text of titles at 'i' is 'Age:' if pet is PPS otherwise 'Growth:' or pet not given # If 'i' is second last value (i.e. Growth) # Append growth of pet # If 'i' is last value (i.e. Rarity) # Append rarity of pet # If text of titles at 'i' is 'Growth:' is pet is PPS otherwise 'Rarity:' or pet is given # If 'i' is second last value (i.e. Rarity) # Append rarity of pet # If 'i' is last value (i.e. Given by) # Remove extra formatting # Append given user profile # Any other 'i' # Append text # Create embed # Set image # For each title in titles # If 'i' is first value (PPS or Owner name) # Add field with no inline # Any other 'i' # Add field with inline # Send embed # If data is not valid # Send embed # -------------------- REMINDME COMMAND -------------------- # Disallow using this command in PM's # Remind Me command # Get formatted times # If no time specified # Create embed # Send embed # If time is longer than 24 hours # Create embed # Send embed # If time is valid # A reminder has been set for USER in X hours, Y minutes, and Z seconds. # Create embed # Send embed # Reminder for USER! # Sleep for set time # Send message # -------------------- STATS COMMAND -------------------- # Disallow using this command in PM's # Statistics command # Convert seconds into days, hours, minutes and seconds # Create tuple of date values # Return tuple of date values # Get the used virtual memory (physical memory) of the system | X MB # Get the available virtual memory (physical memory) of the system | X% # Get the memory usage of the bot (i.e. This script) | X MB # Get used memory percentage of the bot (i.e. This script) | X% # Discord.py version # The number of servers this CS Pound is in # The number of unique users the CS Pound is connected to # The time the bot (script) has been running # The time the system has been running # Pretty format the bot uptime # Pretty format the system uptime # Create empty embed # Add system memory usage to embed # Add bot memory usage to embed # Add bot version to embed # Add Discord.py version to embed # Add server count to embed # Add member count to embed # Add bot uptime to embed # Add system uptime to embed # Send embed # -------------------- SUPPORT COMMAND -------------------- # Support command # PM Discord link to the CS-Pound Development Server to user # Create embed # If cannot send PM to user # Create embed # Send embed # -------------------- TIME COMMAND -------------------- # Disallow using this command in PM's # Time command # Create an AIOHTTP session # GET HTTP response of pound page # Request HTML page data # Extract HTML from site # Pound opening text # If :) in text # Remove excess formatting text # If any other text in text # If text doesn't exist # Create embed # Send embed # Function to compose and send mention messages to channels # Get channels with Auto Remind set at 'time' # Run grep statement # For each Discord channel ID # Grab all unique Discord user ID's with that channel ID # Run grep statement # If there is only one minute left # If there is more than 1 minute left # For each Discord user # Message format for mentioning users | <@USER_ID> # Send message to Discord channel with mention message # Function to check if any user has Auto Remind setup at 'time' # MD5 hash of autoremind.txt # If file has been modified since last check # Grab all unique reminding times from autoremind.txt # Run cut statement # If someone has a Auto Remind set at current 'time' # Run compose message # Background task to countdown to when the pound opens # Use cooldown from global scope # Wait until client has loaded before starting background task # While client is still running # If command is not on cooldown # Get pound data # If pound data is valid and contains content # List all texts with H2 element # Try getting pound opening text # Grab the pound opening time text # Extract the numbers in the text # If there is only one number # If hour in pound opening time # If there is one hour left # Start countdown from 60 minutes # If there is more than one hour # -1 hour and convert into seconds # If minute in pound opening time # If second in pound opening time # If there are two numbers # Get the minutes and convert to seconds # If there are no times i.e. Pound recently closed or not opening anytime soon # 1 hour # Pound is currently open # 1 hour # If pound data isn't valid # 3 hours 10 minutes # If command is on cooldown # If hour in text # If minutes left is not zero # Run minute check # Remove one minute # 1 minute # If time ran out (i.e. Pound is now open) # 3 hours # If minute and second in text # If minute in text # If minutes left is not zero # Run minute check # Remove one minute # 1 minute # If time ran out (i.e. Pound is now open) # 3 hours # If second in text # Sleep for sleep amount # Run 'pound_countdown' background task # Start bot | 2.542197 | 3 |
tests/test_randvars/test_arithmetic/test_constant.py | fxbriol/probnum | 1 | 6620917 | """Tests for random variable arithmetic for constants."""
import operator
from typing import Callable
import numpy as np
import pytest
from probnum import randvars
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.floordiv,
operator.pow,
],
)
@pytest.mark.parametrize("shape_const", [2, (3,), (2, 3)])
def test_constant_constant_entrywise_op(op: Callable, constant: randvars.Constant):
rv = op(constant, constant)
np.testing.assert_allclose(rv.support, op(constant.support, constant.support))
@pytest.mark.parametrize("shape_const", [(3,), (2, 2)])
def test_constant_constant_matmul(constant: randvars.Constant):
rv = constant @ constant
np.testing.assert_allclose(rv.support, constant.support @ constant.support)
| """Tests for random variable arithmetic for constants."""
import operator
from typing import Callable
import numpy as np
import pytest
from probnum import randvars
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.floordiv,
operator.pow,
],
)
@pytest.mark.parametrize("shape_const", [2, (3,), (2, 3)])
def test_constant_constant_entrywise_op(op: Callable, constant: randvars.Constant):
rv = op(constant, constant)
np.testing.assert_allclose(rv.support, op(constant.support, constant.support))
@pytest.mark.parametrize("shape_const", [(3,), (2, 2)])
def test_constant_constant_matmul(constant: randvars.Constant):
rv = constant @ constant
np.testing.assert_allclose(rv.support, constant.support @ constant.support)
| en | 0.74962 | Tests for random variable arithmetic for constants. | 2.829098 | 3 |
tests/testapp/permissions.py | yezyilomo/drf-guard | 7 | 6620918 | <reponame>yezyilomo/drf-guard<gh_stars>1-10
from rest_framework import permissions
class IsSelfUser(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
return obj == request.user
class IsAdminUser(permissions.BasePermission):
"""
Custom permission to check if user is admin
"""
def has_permission(self, request, view):
return request.user.is_authenticated and request.user.is_admin
def has_object_permission(self, request, view, obj):
return request.user.is_authenticated and request.user.is_admin
class IsTeacherAccessingStudent(permissions.BasePermission):
"""
Custom permission to check if user is admin
"""
def has_object_permission(self, request, view, obj):
return request.user.is_teacher and obj.is_student
| from rest_framework import permissions
class IsSelfUser(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
return obj == request.user
class IsAdminUser(permissions.BasePermission):
"""
Custom permission to check if user is admin
"""
def has_permission(self, request, view):
return request.user.is_authenticated and request.user.is_admin
def has_object_permission(self, request, view, obj):
return request.user.is_authenticated and request.user.is_admin
class IsTeacherAccessingStudent(permissions.BasePermission):
"""
Custom permission to check if user is admin
"""
def has_object_permission(self, request, view, obj):
return request.user.is_teacher and obj.is_student | en | 0.887285 | Custom permission to only allow owners of an object to edit it. Custom permission to check if user is admin Custom permission to check if user is admin | 2.876518 | 3 |
src/main/python/hdfs_storage_client.py | eark-project/dm-hdfs-storage-client | 0 | 6620919 | import requests
SERVER_PROTOCOL_PREFIX = 'http://'
SERVER_NAME = '192.168.3.11/dm-hdfs-storage'
SERVER_HDFS = SERVER_PROTOCOL_PREFIX + SERVER_NAME + '/hsink/fileresource'
FILE_RESOURCE = SERVER_HDFS + '/files/{0}'
def copy_to_hdfs(local_path):
with open(local_path, 'r') as f:
filename = local_path.rpartition('/')[2]
r = requests.put(FILE_RESOURCE.format(filename), data=f)
if r.status_code == 201:
return r.headers['location'].rpartition('/files/')[2]
else:
return ""
def copy_from_hdfs(hdfs_path, local_dir):
r = requests.get(FILE_RESOURCE.format(hdfs_path), stream=True)
filename = hdfs_path.rpartition('/')[2]
with open(local_dir + '/' + filename, 'w') as f:
for chunk in r.iter_content(1024 * 1024):
f.write(chunk)
#hdfs_path = copy_to_hdfs('some_aip.tar')
#print(hdfs_path)
#if (hdfs_path != ""):
# copy_from_hdfs(hdfs_path, 'local')
| import requests
SERVER_PROTOCOL_PREFIX = 'http://'
SERVER_NAME = '192.168.3.11/dm-hdfs-storage'
SERVER_HDFS = SERVER_PROTOCOL_PREFIX + SERVER_NAME + '/hsink/fileresource'
FILE_RESOURCE = SERVER_HDFS + '/files/{0}'
def copy_to_hdfs(local_path):
with open(local_path, 'r') as f:
filename = local_path.rpartition('/')[2]
r = requests.put(FILE_RESOURCE.format(filename), data=f)
if r.status_code == 201:
return r.headers['location'].rpartition('/files/')[2]
else:
return ""
def copy_from_hdfs(hdfs_path, local_dir):
r = requests.get(FILE_RESOURCE.format(hdfs_path), stream=True)
filename = hdfs_path.rpartition('/')[2]
with open(local_dir + '/' + filename, 'w') as f:
for chunk in r.iter_content(1024 * 1024):
f.write(chunk)
#hdfs_path = copy_to_hdfs('some_aip.tar')
#print(hdfs_path)
#if (hdfs_path != ""):
# copy_from_hdfs(hdfs_path, 'local')
| en | 0.27024 | #hdfs_path = copy_to_hdfs('some_aip.tar') #print(hdfs_path) #if (hdfs_path != ""): # copy_from_hdfs(hdfs_path, 'local') | 2.972229 | 3 |
imread/tests/test_jpeg.py | luispedro/imread | 51 | 6620920 | <reponame>luispedro/imread
import pytest
import numpy as np
from imread import imread, imsave
from . import file_path
import glob
_filename = 'imread_testing_file.jpg'
@pytest.fixture(autouse=True)
def _remove_files():
yield
from os import unlink
from glob import glob
filelist = glob("*.jpg")
for f in filelist:
try:
unlink(f)
except:
pass
def test_jpeg():
f = np.arange(64*16).reshape((64,16))
f %= 16
f = f.astype(np.uint8)
imsave(_filename, f, 'jpeg')
g = imread(_filename).squeeze()
assert np.mean(np.abs(f.astype(float)-g)) < 1.
def test_error():
with pytest.raises(RuntimeError):
imread(file_path('error.jpg'))
def test_error_noent():
with pytest.raises(OSError):
imread(file_path('this-file-does-not-exist.jpeg'))
def test_quality():
def pixel_diff(a):
return np.mean(np.abs(a.astype(float) - data))
data = np.arange(256*256*3)
data %= 51
data = data.reshape((256,256,3))
data = data.astype(np.uint8)
imsave('imread_def.jpg', data)
imsave('imread_def91.jpg', data, opts={'jpeg:quality': 91} )
readback = imread('imread_def.jpg')
readback91 = imread('imread_def91.jpg')
assert pixel_diff(readback91) < pixel_diff(readback)
| import pytest
import numpy as np
from imread import imread, imsave
from . import file_path
import glob
_filename = 'imread_testing_file.jpg'
@pytest.fixture(autouse=True)
def _remove_files():
yield
from os import unlink
from glob import glob
filelist = glob("*.jpg")
for f in filelist:
try:
unlink(f)
except:
pass
def test_jpeg():
f = np.arange(64*16).reshape((64,16))
f %= 16
f = f.astype(np.uint8)
imsave(_filename, f, 'jpeg')
g = imread(_filename).squeeze()
assert np.mean(np.abs(f.astype(float)-g)) < 1.
def test_error():
with pytest.raises(RuntimeError):
imread(file_path('error.jpg'))
def test_error_noent():
with pytest.raises(OSError):
imread(file_path('this-file-does-not-exist.jpeg'))
def test_quality():
def pixel_diff(a):
return np.mean(np.abs(a.astype(float) - data))
data = np.arange(256*256*3)
data %= 51
data = data.reshape((256,256,3))
data = data.astype(np.uint8)
imsave('imread_def.jpg', data)
imsave('imread_def91.jpg', data, opts={'jpeg:quality': 91} )
readback = imread('imread_def.jpg')
readback91 = imread('imread_def91.jpg')
assert pixel_diff(readback91) < pixel_diff(readback) | none | 1 | 2.264919 | 2 | |
assignment_02/src/q5.py | BhekimpiloNdhlela/TW324NumericalMethods | 1 | 6620921 | #!/usr/bin/python
def newton_method_sys(fxy, j0, j1, debug=True):
xn = zeros((2, 9)) #store itteration results for x^[n + 1]
jx = zeros((2, 2)) #store currant itteration jacobian inverse
fx = zeros((2, 1)) #store the results of the f(x^[n]) for a particular itteration
sx = zeros((2, 1))
for i in xrange(len(xn[1]) - 1):
jx[0][0], jx[0][1] = j0(xn[0][i], xn[1][i])
jx[1][0], jx[1][1] = j1(xn[0][i], xn[1][i])
fx[0][0], fx[1][0] = fxy(xn[0][i], xn[1][i])
sx = linalg.solve(negative(jx), fx)
xn[0][i + 1] = xn[0][i] + sx[0]
xn[1][i + 1] = xn[1][i] + sx[1]
if debug is True:
print "xn = |", xn[0][-1], xn[1][-1], "|"
if __name__ == "__main__":
from numpy import (array, zeros, exp, linalg, negative)
from math import (cos, sin)
fxy = lambda x, y: (x * exp(y) + y - 7, sin(x) - cos(y))
j0 = lambda x, y: (exp(y), x * exp(y) + 1)
j1 = lambda x, y: (cos(x), sin(y))
newton_method_sys(fxy, j0, j1)
else:
import sys
sys.exit("please run as client...")
| #!/usr/bin/python
def newton_method_sys(fxy, j0, j1, debug=True):
xn = zeros((2, 9)) #store itteration results for x^[n + 1]
jx = zeros((2, 2)) #store currant itteration jacobian inverse
fx = zeros((2, 1)) #store the results of the f(x^[n]) for a particular itteration
sx = zeros((2, 1))
for i in xrange(len(xn[1]) - 1):
jx[0][0], jx[0][1] = j0(xn[0][i], xn[1][i])
jx[1][0], jx[1][1] = j1(xn[0][i], xn[1][i])
fx[0][0], fx[1][0] = fxy(xn[0][i], xn[1][i])
sx = linalg.solve(negative(jx), fx)
xn[0][i + 1] = xn[0][i] + sx[0]
xn[1][i + 1] = xn[1][i] + sx[1]
if debug is True:
print "xn = |", xn[0][-1], xn[1][-1], "|"
if __name__ == "__main__":
from numpy import (array, zeros, exp, linalg, negative)
from math import (cos, sin)
fxy = lambda x, y: (x * exp(y) + y - 7, sin(x) - cos(y))
j0 = lambda x, y: (exp(y), x * exp(y) + 1)
j1 = lambda x, y: (cos(x), sin(y))
newton_method_sys(fxy, j0, j1)
else:
import sys
sys.exit("please run as client...")
| en | 0.611834 | #!/usr/bin/python #store itteration results for x^[n + 1] #store currant itteration jacobian inverse #store the results of the f(x^[n]) for a particular itteration | 3.259489 | 3 |
fonts/analysisunigram.py | anuragaroraaa/Sentiment-Analysis-of-Online-Reviews | 0 | 6620922 | import pickle
import re
import nltk
pos=negat=neu=0
def get_words_in_tweet(tweet):
wordList = re.sub("[^\w]", " ",tweet).split()
# print wordList
return wordList
def get_word_features(wordlist):
wordlist = nltk.FreqDist(wordlist)
word_features = wordlist.keys()
return word_features
def extract_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains(%s)' % word] = (word in document_words)
return features
f = open('my_classifier.pickle0089607', 'rb')
classifier = pickle.load(f)
open('classified.txt', 'w').close()
open('pre_processing.txt', 'w').close()
execfile('pre_processing.py')
File=open("twitDB46.txt")
File1=open("pre_processing.txt")
N=50
for i in range(N):
original_line=File.next().strip()
original_tweet=original_line
processed_line=File1.next().strip()
processed_tweet=processed_line
word_features = get_word_features(get_words_in_tweet(processed_tweet))
classified_score=classifier.classify(extract_features(processed_tweet.split()))
score=classified_score
if(score=='4'):
pos=pos+1
classified_score='POSITIVE'
if(score=='0'):
negat=negat+1
classified_score='NEGATIVE'
if(score=='2'):
neu=neu+1
classified_score='NEUTRAL '
classified_Tweet= classified_score+ ": "+original_tweet
saveFile=open('classified.txt','a')
saveFile.write(classified_Tweet)
saveFile.write('\n')
posfile=open('poscount.txt','w')
posfile.write(str(pos))
posfile.close()
negfile=open('negcount.txt','w')
negfile.write(str(negat))
negfile.close()
neutralfile=open('neutralcount.txt','w')
neutralfile.write(str(neu))
neutralfile.close()
f.close();
File.close();
File1.close();
saveFile.close()
| import pickle
import re
import nltk
pos=negat=neu=0
def get_words_in_tweet(tweet):
wordList = re.sub("[^\w]", " ",tweet).split()
# print wordList
return wordList
def get_word_features(wordlist):
wordlist = nltk.FreqDist(wordlist)
word_features = wordlist.keys()
return word_features
def extract_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains(%s)' % word] = (word in document_words)
return features
f = open('my_classifier.pickle0089607', 'rb')
classifier = pickle.load(f)
open('classified.txt', 'w').close()
open('pre_processing.txt', 'w').close()
execfile('pre_processing.py')
File=open("twitDB46.txt")
File1=open("pre_processing.txt")
N=50
for i in range(N):
original_line=File.next().strip()
original_tweet=original_line
processed_line=File1.next().strip()
processed_tweet=processed_line
word_features = get_word_features(get_words_in_tweet(processed_tweet))
classified_score=classifier.classify(extract_features(processed_tweet.split()))
score=classified_score
if(score=='4'):
pos=pos+1
classified_score='POSITIVE'
if(score=='0'):
negat=negat+1
classified_score='NEGATIVE'
if(score=='2'):
neu=neu+1
classified_score='NEUTRAL '
classified_Tweet= classified_score+ ": "+original_tweet
saveFile=open('classified.txt','a')
saveFile.write(classified_Tweet)
saveFile.write('\n')
posfile=open('poscount.txt','w')
posfile.write(str(pos))
posfile.close()
negfile=open('negcount.txt','w')
negfile.write(str(negat))
negfile.close()
neutralfile=open('neutralcount.txt','w')
neutralfile.write(str(neu))
neutralfile.close()
f.close();
File.close();
File1.close();
saveFile.close()
| en | 0.41179 | # print wordList | 2.981697 | 3 |
test/test_plug.py | piotrostr/disco_py | 0 | 6620923 | from discum import Plug
def test_plug_works():
plug = Plug()
assert plug.api_key
assert plug.url
res = plug.hello()
assert res == "eyo"
def test_plug_gets_and_returns_users():
plug = Plug()
user = plug.get_user()
assert user
assert user["token"]
res_code = plug.return_user(user)
assert res_code == 201
| from discum import Plug
def test_plug_works():
plug = Plug()
assert plug.api_key
assert plug.url
res = plug.hello()
assert res == "eyo"
def test_plug_gets_and_returns_users():
plug = Plug()
user = plug.get_user()
assert user
assert user["token"]
res_code = plug.return_user(user)
assert res_code == 201
| none | 1 | 2.277151 | 2 | |
src/rogerthat/bizz/maps/poi/models.py | goubertbrent/oca-backend | 0 | 6620924 | # -*- coding: utf-8 -*-
# Copyright 2021 Green Valley NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.5@@
from babel import Locale
from google.appengine.ext import ndb
from google.appengine.ext.ndb.model import GeoPtProperty, GeoPt, StringProperty, TextProperty, IntegerProperty, \
StructuredProperty, BooleanProperty
from typing import List
from mcfw.utils import Enum
from rogerthat.models import NdbModel, OpeningHours
from rogerthat.models.settings import MediaItem
class POILocation(NdbModel):
coordinates = GeoPtProperty(required=True) # type: GeoPt
google_maps_place_id = StringProperty()
country = TextProperty() # BE
locality = TextProperty() # Nazareth
postal_code = TextProperty() # 9810
street = TextProperty() # Steenweg Deinze
street_number = TextProperty() # 154
timezone = TextProperty(required=True)
class POIStatus(Enum):
# Not visible because incomplete (e.g. missing place type or location)
INCOMPLETE = 0
# Visible on map
VISIBLE = 1
# Not visible on map
INVISIBLE = 2
class PointOfInterest(NdbModel):
community_id = IntegerProperty(required=True)
title = TextProperty(required=True)
description = TextProperty()
location = StructuredProperty(POILocation, required=True, indexed=False) # type: POILocation
main_place_type = TextProperty()
place_types = TextProperty(repeated=True)
opening_hours = StructuredProperty(OpeningHours, required=True, indexed=False)
media = StructuredProperty(MediaItem, repeated=True, indexed=False) # type: List[MediaItem]
visible = BooleanProperty()
status = IntegerProperty(choices=POIStatus.all()) # status is only set by server (not by client/dashboard)
@property
def id(self):
return self.key.integer_id()
@property
def has_complete_info(self):
return all((self.title, self.location, self.location.coordinates, self.main_place_type, self.place_types))
def get_address_line(self, locale):
country_name = Locale(locale).territories[self.location.country]
parts = []
if self.location.street:
parts.append(self.location.street)
if self.location.street_number:
parts.append(self.location.street_number)
if parts:
parts[-1] += ','
parts.append(self.location.postal_code)
parts.append(self.location.locality + ',')
parts.append(country_name)
return ' '.join(parts)
@classmethod
def create_key(cls, poi_id):
# type: (int) -> ndb.Key
assert isinstance(poi_id, (int, long))
return ndb.Key(cls, poi_id)
@classmethod
def list_by_community(cls, community_id):
return cls.query().filter(cls.community_id == community_id)
| # -*- coding: utf-8 -*-
# Copyright 2021 Green Valley NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.5@@
from babel import Locale
from google.appengine.ext import ndb
from google.appengine.ext.ndb.model import GeoPtProperty, GeoPt, StringProperty, TextProperty, IntegerProperty, \
StructuredProperty, BooleanProperty
from typing import List
from mcfw.utils import Enum
from rogerthat.models import NdbModel, OpeningHours
from rogerthat.models.settings import MediaItem
class POILocation(NdbModel):
coordinates = GeoPtProperty(required=True) # type: GeoPt
google_maps_place_id = StringProperty()
country = TextProperty() # BE
locality = TextProperty() # Nazareth
postal_code = TextProperty() # 9810
street = TextProperty() # Steenweg Deinze
street_number = TextProperty() # 154
timezone = TextProperty(required=True)
class POIStatus(Enum):
# Not visible because incomplete (e.g. missing place type or location)
INCOMPLETE = 0
# Visible on map
VISIBLE = 1
# Not visible on map
INVISIBLE = 2
class PointOfInterest(NdbModel):
community_id = IntegerProperty(required=True)
title = TextProperty(required=True)
description = TextProperty()
location = StructuredProperty(POILocation, required=True, indexed=False) # type: POILocation
main_place_type = TextProperty()
place_types = TextProperty(repeated=True)
opening_hours = StructuredProperty(OpeningHours, required=True, indexed=False)
media = StructuredProperty(MediaItem, repeated=True, indexed=False) # type: List[MediaItem]
visible = BooleanProperty()
status = IntegerProperty(choices=POIStatus.all()) # status is only set by server (not by client/dashboard)
@property
def id(self):
return self.key.integer_id()
@property
def has_complete_info(self):
return all((self.title, self.location, self.location.coordinates, self.main_place_type, self.place_types))
def get_address_line(self, locale):
country_name = Locale(locale).territories[self.location.country]
parts = []
if self.location.street:
parts.append(self.location.street)
if self.location.street_number:
parts.append(self.location.street_number)
if parts:
parts[-1] += ','
parts.append(self.location.postal_code)
parts.append(self.location.locality + ',')
parts.append(country_name)
return ' '.join(parts)
@classmethod
def create_key(cls, poi_id):
# type: (int) -> ndb.Key
assert isinstance(poi_id, (int, long))
return ndb.Key(cls, poi_id)
@classmethod
def list_by_community(cls, community_id):
return cls.query().filter(cls.community_id == community_id)
| en | 0.787287 | # -*- coding: utf-8 -*- # Copyright 2021 Green Valley NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.5@@ # type: GeoPt # BE # Nazareth # 9810 # Steenweg Deinze # 154 # Not visible because incomplete (e.g. missing place type or location) # Visible on map # Not visible on map # type: POILocation # type: List[MediaItem] # status is only set by server (not by client/dashboard) # type: (int) -> ndb.Key | 2.016236 | 2 |
src/pytk/factory/model/softmax.py | bigblindbais/pytk | 0 | 6620925 | <gh_stars>0
from .model import Model
import numpy as np
import numpy.random as rnd
from scipy.misc import logsumexp
import string
class Softmax(Model):
def __init__(self, *yfactories, cond=None):
super().__init__(*yfactories, cond=cond)
self.xshape = tuple(f.nitems for f in self.xfactories)
self.yshape = tuple(f.nitems for f in self.yfactories)
self.shape = self.xshape + self.yshape
# NOTE np.prod would return float 1.0 if xshape is empty
self.xsize = np.prod(self.xshape, dtype=np.int64)
self.ysize = np.prod(self.yshape)
self.size = self.xsize * self.ysize
self.xaxis = tuple(range(self.nx))
self.yaxis = tuple(range(self.nx, self.nxy))
# subscripts for np.einsum
self.xss = string.ascii_lowercase[:self.nx]
self.yss = string.ascii_lowercase[self.nx:self.nxy]
self.xyss = string.ascii_lowercase[:self.nxy]
# precomputed once
self.__phi = np.eye(self.size).reshape(2 * self.shape)
self.reset()
def reset(self):
# self.params = np.zeros(self.shape)
self.params = rnd.normal(size=self.shape)
# self.params = 2 * rnd.normal(size=self.shape)
# self.params = 3 * (.5 - rnd.random_sample(self.shape))
@staticmethod
def index(item, *, keepdims=False):
if item is None:
return slice(None)
if item is Ellipsis:
return slice(None)
if isinstance(item, slice):
return item
if keepdims: # NOTE Not currently being used
return slice(item.i, item.i+1) # keeps dimensions when indexing
# Assumes item is an Item
return item.i
def indices(self, *items, keepdims=False):
items += (None,) * (self.nxy - len(items))
return tuple(self.index(item, keepdims=keepdims) for item in items)
def xyindices(self, *items):
idx = self.indices(*items)
return idx[:self.nx], idx[self.nx:]
def xyitems(self, *items):
return items[:self.nx], items[self.nx:]
def prefs(self, *items):
idx = self.indices(*items)
return self.params[idx]
def logprobs(self, *items, normalized=False):
idx = self.indices(*items)
prefs = self.params
logprobs = prefs
if normalized:
logprobs -= logsumexp(prefs, axis=self.yaxis, keepdims=True)
return logprobs[idx]
def probs(self, *items):
logprobs = self.logprobs(*items)
probs = np.exp(logprobs - logprobs.max())
# TODO future bug! only normalize the y axes which were not given!!
# TODO bug already happening...
# TODO BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG
return probs / probs.sum()
# all the next methods assume that all items are given!!
# generalize! this will simplify logprob!
def phi(self, *items):
idx = self.indices(*items)
return self.__phi[idx]
def dprefs(self, *items):
return self.phi(*items)
# def dlogprobs(self, *items):
# xidx, _ = self.xyindices(*items)
# idx = self.indices(*items)
# dprefs = self.dprefs()
# probs = self.probs()
# # subscripts = f'{self.xyss},{self.xyss}...->{self.xss}...'
# # Edprefs = np.einsum(subscripts, probs, dprefs)
# # dlogprobs = dprefs[idx] - Edprefs[xidx]
# # return dlogprobs
# subscripts = f'{self.yss},{self.yss}...->...'
# Edprefs = np.einsum(subscripts, probs[xidx], dprefs[xidx])
# dlogprobs = dprefs[idx] - Edprefs
# return dlogprobs
def dlogprobs(self, *items):
xitems, _ = self.xyitems(*items)
xidx, yidx = self.xyindices(*items)
idx = self.indices(*items)
dprefs = self.dprefs(*xitems)
probs = self.probs(*xitems)
dlogprobs = dprefs[yidx] - np.tensordot(probs, dprefs, axes=self.ny)
return dlogprobs
# TODO better interface; *items is not great...
def dprobs(self, *items):
probs = self.probs(*items))
dlogprobs = self.dlogprobs(*items)
# TODO just multiply these...
# def ddprefs(self, *items):
# idx = self.indices(*items)
# ddprefs = np.zeros(3 * self.shape)
# return ddprefs[idx]
# def ddlogprobs(self, *items):
# dprefs = self.dprefs()
# probs = self.probs()
# dprefs2 =
# Edprefs = np.tensordot(probs, dprefs, axes=(self.yaxis, self.yaxis))
# # collapsing xidx indices
# subscripts = f'{self.xss}{self.xss}...->{self.xss}...'
# Edprefs = np.einsum(subscripts, Edprefs)
# Edprefs_2 =
# E_dprefs2 =
# ddlogprobs = Edprefs * Edprefs - Edprefs_2
# return ddlogprobs[idx]
def dist(self, *xitems):
assert len(xitems) == self.nx
probs = self.probs(*xitems)
for yi in range(self.ysize):
yidx = np.unravel_index(yi, self.yshape)
yitems = tuple(f.item(i) for f, i in zip(self.yfactories, yidx))
yield yitems + (probs[yidx],)
def pr(self, *items):
assert len(items) == self.nxy
return self.probs(*items)
def sample(self, *xitems):
assert len(xitems) == self.nx
# TODO kinda like a JointFactory but without names; just indices?
probs = self.probs(*xitems).ravel()
# yi = rnd.choice(self.ysize, p=probs)
yi = rnd.multinomial(1, probs).argmax()
yidx = np.unravel_index(yi, self.yshape)
yitems = tuple(f.item(i) for f, i in zip(self.yfactories, yidx))
if len(yitems) == 1:
return yitems[0]
return yitems
| from .model import Model
import numpy as np
import numpy.random as rnd
from scipy.misc import logsumexp
import string
class Softmax(Model):
def __init__(self, *yfactories, cond=None):
super().__init__(*yfactories, cond=cond)
self.xshape = tuple(f.nitems for f in self.xfactories)
self.yshape = tuple(f.nitems for f in self.yfactories)
self.shape = self.xshape + self.yshape
# NOTE np.prod would return float 1.0 if xshape is empty
self.xsize = np.prod(self.xshape, dtype=np.int64)
self.ysize = np.prod(self.yshape)
self.size = self.xsize * self.ysize
self.xaxis = tuple(range(self.nx))
self.yaxis = tuple(range(self.nx, self.nxy))
# subscripts for np.einsum
self.xss = string.ascii_lowercase[:self.nx]
self.yss = string.ascii_lowercase[self.nx:self.nxy]
self.xyss = string.ascii_lowercase[:self.nxy]
# precomputed once
self.__phi = np.eye(self.size).reshape(2 * self.shape)
self.reset()
def reset(self):
# self.params = np.zeros(self.shape)
self.params = rnd.normal(size=self.shape)
# self.params = 2 * rnd.normal(size=self.shape)
# self.params = 3 * (.5 - rnd.random_sample(self.shape))
@staticmethod
def index(item, *, keepdims=False):
if item is None:
return slice(None)
if item is Ellipsis:
return slice(None)
if isinstance(item, slice):
return item
if keepdims: # NOTE Not currently being used
return slice(item.i, item.i+1) # keeps dimensions when indexing
# Assumes item is an Item
return item.i
def indices(self, *items, keepdims=False):
items += (None,) * (self.nxy - len(items))
return tuple(self.index(item, keepdims=keepdims) for item in items)
def xyindices(self, *items):
idx = self.indices(*items)
return idx[:self.nx], idx[self.nx:]
def xyitems(self, *items):
return items[:self.nx], items[self.nx:]
def prefs(self, *items):
idx = self.indices(*items)
return self.params[idx]
def logprobs(self, *items, normalized=False):
idx = self.indices(*items)
prefs = self.params
logprobs = prefs
if normalized:
logprobs -= logsumexp(prefs, axis=self.yaxis, keepdims=True)
return logprobs[idx]
def probs(self, *items):
logprobs = self.logprobs(*items)
probs = np.exp(logprobs - logprobs.max())
# TODO future bug! only normalize the y axes which were not given!!
# TODO bug already happening...
# TODO BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG
return probs / probs.sum()
# all the next methods assume that all items are given!!
# generalize! this will simplify logprob!
def phi(self, *items):
idx = self.indices(*items)
return self.__phi[idx]
def dprefs(self, *items):
return self.phi(*items)
# def dlogprobs(self, *items):
# xidx, _ = self.xyindices(*items)
# idx = self.indices(*items)
# dprefs = self.dprefs()
# probs = self.probs()
# # subscripts = f'{self.xyss},{self.xyss}...->{self.xss}...'
# # Edprefs = np.einsum(subscripts, probs, dprefs)
# # dlogprobs = dprefs[idx] - Edprefs[xidx]
# # return dlogprobs
# subscripts = f'{self.yss},{self.yss}...->...'
# Edprefs = np.einsum(subscripts, probs[xidx], dprefs[xidx])
# dlogprobs = dprefs[idx] - Edprefs
# return dlogprobs
def dlogprobs(self, *items):
xitems, _ = self.xyitems(*items)
xidx, yidx = self.xyindices(*items)
idx = self.indices(*items)
dprefs = self.dprefs(*xitems)
probs = self.probs(*xitems)
dlogprobs = dprefs[yidx] - np.tensordot(probs, dprefs, axes=self.ny)
return dlogprobs
# TODO better interface; *items is not great...
def dprobs(self, *items):
probs = self.probs(*items))
dlogprobs = self.dlogprobs(*items)
# TODO just multiply these...
# def ddprefs(self, *items):
# idx = self.indices(*items)
# ddprefs = np.zeros(3 * self.shape)
# return ddprefs[idx]
# def ddlogprobs(self, *items):
# dprefs = self.dprefs()
# probs = self.probs()
# dprefs2 =
# Edprefs = np.tensordot(probs, dprefs, axes=(self.yaxis, self.yaxis))
# # collapsing xidx indices
# subscripts = f'{self.xss}{self.xss}...->{self.xss}...'
# Edprefs = np.einsum(subscripts, Edprefs)
# Edprefs_2 =
# E_dprefs2 =
# ddlogprobs = Edprefs * Edprefs - Edprefs_2
# return ddlogprobs[idx]
def dist(self, *xitems):
assert len(xitems) == self.nx
probs = self.probs(*xitems)
for yi in range(self.ysize):
yidx = np.unravel_index(yi, self.yshape)
yitems = tuple(f.item(i) for f, i in zip(self.yfactories, yidx))
yield yitems + (probs[yidx],)
def pr(self, *items):
assert len(items) == self.nxy
return self.probs(*items)
def sample(self, *xitems):
assert len(xitems) == self.nx
# TODO kinda like a JointFactory but without names; just indices?
probs = self.probs(*xitems).ravel()
# yi = rnd.choice(self.ysize, p=probs)
yi = rnd.multinomial(1, probs).argmax()
yidx = np.unravel_index(yi, self.yshape)
yitems = tuple(f.item(i) for f, i in zip(self.yfactories, yidx))
if len(yitems) == 1:
return yitems[0]
return yitems | en | 0.324309 | # NOTE np.prod would return float 1.0 if xshape is empty # subscripts for np.einsum # precomputed once # self.params = np.zeros(self.shape) # self.params = 2 * rnd.normal(size=self.shape) # self.params = 3 * (.5 - rnd.random_sample(self.shape)) # NOTE Not currently being used # keeps dimensions when indexing # Assumes item is an Item # TODO future bug! only normalize the y axes which were not given!! # TODO bug already happening... # TODO BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG BUG # all the next methods assume that all items are given!! # generalize! this will simplify logprob! # def dlogprobs(self, *items): # xidx, _ = self.xyindices(*items) # idx = self.indices(*items) # dprefs = self.dprefs() # probs = self.probs() # # subscripts = f'{self.xyss},{self.xyss}...->{self.xss}...' # # Edprefs = np.einsum(subscripts, probs, dprefs) # # dlogprobs = dprefs[idx] - Edprefs[xidx] # # return dlogprobs # subscripts = f'{self.yss},{self.yss}...->...' # Edprefs = np.einsum(subscripts, probs[xidx], dprefs[xidx]) # dlogprobs = dprefs[idx] - Edprefs # return dlogprobs # TODO better interface; *items is not great... # TODO just multiply these... # def ddprefs(self, *items): # idx = self.indices(*items) # ddprefs = np.zeros(3 * self.shape) # return ddprefs[idx] # def ddlogprobs(self, *items): # dprefs = self.dprefs() # probs = self.probs() # dprefs2 = # Edprefs = np.tensordot(probs, dprefs, axes=(self.yaxis, self.yaxis)) # # collapsing xidx indices # subscripts = f'{self.xss}{self.xss}...->{self.xss}...' # Edprefs = np.einsum(subscripts, Edprefs) # Edprefs_2 = # E_dprefs2 = # ddlogprobs = Edprefs * Edprefs - Edprefs_2 # return ddlogprobs[idx] # TODO kinda like a JointFactory but without names; just indices? # yi = rnd.choice(self.ysize, p=probs) | 2.302471 | 2 |
src/db_util.py | restaumatic/scheduled-psql-script-runner | 3 | 6620926 | import psycopg2
def make_conn(host, db, user, password):
conn = None
try:
conn = psycopg2.connect("dbname='%s' user='%s' host='%s' password='%s'" % (db, user, host, password))
except:
print("ERROR: make_conn - unable to connect to the database")
return conn
def fetch_data(conn, query):
result = []
print("Running query: {}".format(query))
cursor = conn.cursor()
cursor.execute(query)
raw = cursor.fetchall()
for line in raw:
result.append(line)
return result
def fetch_data_to_file(conn, query, output_filename):
print("Running query: {}".format(query))
cursor = conn.cursor()
outputquery = "COPY ({0}) TO STDOUT WITH CSV HEADER".format(query)
with open(output_filename, 'w') as f:
cursor.copy_expert(outputquery, f) | import psycopg2
def make_conn(host, db, user, password):
conn = None
try:
conn = psycopg2.connect("dbname='%s' user='%s' host='%s' password='%s'" % (db, user, host, password))
except:
print("ERROR: make_conn - unable to connect to the database")
return conn
def fetch_data(conn, query):
result = []
print("Running query: {}".format(query))
cursor = conn.cursor()
cursor.execute(query)
raw = cursor.fetchall()
for line in raw:
result.append(line)
return result
def fetch_data_to_file(conn, query, output_filename):
print("Running query: {}".format(query))
cursor = conn.cursor()
outputquery = "COPY ({0}) TO STDOUT WITH CSV HEADER".format(query)
with open(output_filename, 'w') as f:
cursor.copy_expert(outputquery, f) | none | 1 | 3.287677 | 3 | |
netbox_ipmi_ovh/forms.py | sanecz/netbox-ipmi-ovh-plugin | 7 | 6620927 | from django import forms
from utilities.forms import BootstrapMixin
from netbox_ipmi_ovh.models import Ipmi
class UserIpmiCfgForm(BootstrapMixin, forms.ModelForm):
ssh_key_name = forms.CharField(
label="SSH key name",
help_text="Name of the ssh key added in the OVH Manager",
max_length=100,
required=False
)
ip_to_allow = forms.CharField(
label="IP to allow",
help_text=(
"Leave this value to empty if you want to ip from your"
"http request to be allowed for the IPMI connection. "
"If you're using a proxy or VPN, please set the correct IP"
"to be sent to allowed ip for the ipmi login."
),
max_length=100,
required=False
)
class Meta:
model = Ipmi
fields = ["ssh_key_name", "ip_to_allow"]
| from django import forms
from utilities.forms import BootstrapMixin
from netbox_ipmi_ovh.models import Ipmi
class UserIpmiCfgForm(BootstrapMixin, forms.ModelForm):
ssh_key_name = forms.CharField(
label="SSH key name",
help_text="Name of the ssh key added in the OVH Manager",
max_length=100,
required=False
)
ip_to_allow = forms.CharField(
label="IP to allow",
help_text=(
"Leave this value to empty if you want to ip from your"
"http request to be allowed for the IPMI connection. "
"If you're using a proxy or VPN, please set the correct IP"
"to be sent to allowed ip for the ipmi login."
),
max_length=100,
required=False
)
class Meta:
model = Ipmi
fields = ["ssh_key_name", "ip_to_allow"]
| none | 1 | 2.401164 | 2 | |
009_Motor/main.py | DaliSummer/MindstormsEV3_py | 0 | 6620928 | <gh_stars>0
#!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import Motor, InfraredSensor
from pybricks.parameters import Port, Stop, Direction, Button
from pybricks.tools import wait
from pybricks.media.ev3dev import Font
def printMotor(motor, screen):
screen.print('speed: ', motor.speed(), 'deg/s')
screen.print('angle: ', motor.angle(), 'deg')
return
def waiter(ir_sensor):
while True:
btn = ir_sensor.buttons(1)
wait(150)
if Button.LEFT_UP in btn:
return
big_font = Font(size=16, bold=True)
ev3 = EV3Brick()
ev3.screen.set_font(big_font)
# Initialize IR sensor
ir = InfraredSensor(Port.S4)
# Initialize motors
motorB = Motor(port=Port.B, positive_direction=Direction.CLOCKWISE, gears=None)
motorC = Motor(port=Port.C, positive_direction=Direction.COUNTERCLOCKWISE, gears=[36, 12])
ev3.screen.print('=Motor C=')
printMotor(motorC, ev3.screen)
waiter(ir)
ev3.screen.print('=run 500=')
motorC.run(500)
waiter(ir)
printMotor(motorC, ev3.screen)
motorC.stop()
waiter(ir)
ev3.screen.print('=Motor B=')
printMotor(motorB, ev3.screen)
waiter(ir)
ev3.screen.print('=run 500=')
motorB.run(500)
waiter(ir)
printMotor(motorB, ev3.screen)
motorB.stop()
waiter(ir)
ev3.screen.print('=run brake=')
motorB.run_time(speed=500, time=3000, then=Stop.BRAKE, wait=True)
waiter(ir)
ev3.screen.print('=run coast=')
motorB.run_time(speed=500, time=3000, then=Stop.COAST, wait=True)
waiter(ir)
ev3.screen.print('=run hold=')
motorB.run_time(speed=500, time=3000, then=Stop.HOLD, wait=True)
waiter(ir)
ev3.screen.print('=reset angle 0=')
motorB.reset_angle(0)
printMotor(motorB, ev3.screen)
waiter(ir)
ev3.screen.print('=angle 180=')
motorB.run_angle(speed=500, rotation_angle=180, then=Stop.HOLD, wait=True)
printMotor(motorB, ev3.screen)
waiter(ir)
ev3.screen.print('=run target 90=')
motorB.run_target(speed=500, target_angle=90, then=Stop.HOLD, wait=True)
printMotor(motorB, ev3.screen)
waiter(ir)
ev3.screen.print('=angle 90=')
motorB.run_angle(speed=500, rotation_angle=90, then=Stop.HOLD, wait=True)
printMotor(motorB, ev3.screen)
waiter(ir)
ev3.screen.print('=track target 90=')
motorB.track_target(target_angle=90)
waiter(ir)
ev3.screen.print('=until stalled=')
motorB.run_until_stalled(15, then=Stop.COAST, duty_limit=10)
printMotor(motorB, ev3.screen)
waiter(ir)
ev3.screen.print('=duty 100=')
motorB.dc(100)
waiter(ir)
printMotor(motorB, ev3.screen)
motorB.stop()
waiter(ir)
ev3.screen.print('=duty -100=')
motorB.dc(-100)
waiter(ir)
printMotor(motorB, ev3.screen)
motorB.stop()
waiter(ir) | #!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import Motor, InfraredSensor
from pybricks.parameters import Port, Stop, Direction, Button
from pybricks.tools import wait
from pybricks.media.ev3dev import Font
def printMotor(motor, screen):
screen.print('speed: ', motor.speed(), 'deg/s')
screen.print('angle: ', motor.angle(), 'deg')
return
def waiter(ir_sensor):
while True:
btn = ir_sensor.buttons(1)
wait(150)
if Button.LEFT_UP in btn:
return
big_font = Font(size=16, bold=True)
ev3 = EV3Brick()
ev3.screen.set_font(big_font)
# Initialize IR sensor
ir = InfraredSensor(Port.S4)
# Initialize motors
motorB = Motor(port=Port.B, positive_direction=Direction.CLOCKWISE, gears=None)
motorC = Motor(port=Port.C, positive_direction=Direction.COUNTERCLOCKWISE, gears=[36, 12])
ev3.screen.print('=Motor C=')
printMotor(motorC, ev3.screen)
waiter(ir)
ev3.screen.print('=run 500=')
motorC.run(500)
waiter(ir)
printMotor(motorC, ev3.screen)
motorC.stop()
waiter(ir)
ev3.screen.print('=Motor B=')
printMotor(motorB, ev3.screen)
waiter(ir)
ev3.screen.print('=run 500=')
motorB.run(500)
waiter(ir)
printMotor(motorB, ev3.screen)
motorB.stop()
waiter(ir)
ev3.screen.print('=run brake=')
motorB.run_time(speed=500, time=3000, then=Stop.BRAKE, wait=True)
waiter(ir)
ev3.screen.print('=run coast=')
motorB.run_time(speed=500, time=3000, then=Stop.COAST, wait=True)
waiter(ir)
ev3.screen.print('=run hold=')
motorB.run_time(speed=500, time=3000, then=Stop.HOLD, wait=True)
waiter(ir)
ev3.screen.print('=reset angle 0=')
motorB.reset_angle(0)
printMotor(motorB, ev3.screen)
waiter(ir)
ev3.screen.print('=angle 180=')
motorB.run_angle(speed=500, rotation_angle=180, then=Stop.HOLD, wait=True)
printMotor(motorB, ev3.screen)
waiter(ir)
ev3.screen.print('=run target 90=')
motorB.run_target(speed=500, target_angle=90, then=Stop.HOLD, wait=True)
printMotor(motorB, ev3.screen)
waiter(ir)
ev3.screen.print('=angle 90=')
motorB.run_angle(speed=500, rotation_angle=90, then=Stop.HOLD, wait=True)
printMotor(motorB, ev3.screen)
waiter(ir)
ev3.screen.print('=track target 90=')
motorB.track_target(target_angle=90)
waiter(ir)
ev3.screen.print('=until stalled=')
motorB.run_until_stalled(15, then=Stop.COAST, duty_limit=10)
printMotor(motorB, ev3.screen)
waiter(ir)
ev3.screen.print('=duty 100=')
motorB.dc(100)
waiter(ir)
printMotor(motorB, ev3.screen)
motorB.stop()
waiter(ir)
ev3.screen.print('=duty -100=')
motorB.dc(-100)
waiter(ir)
printMotor(motorB, ev3.screen)
motorB.stop()
waiter(ir) | en | 0.518597 | #!/usr/bin/env pybricks-micropython # Initialize IR sensor # Initialize motors | 2.653939 | 3 |
DH2URDF/dh2urdf.py | UTHAI-Humanoid/UTHAI-Tools | 1 | 6620929 | #!/usr/bin/env python3
from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree
import numpy as np
from math import pi, atan2, sqrt
# <NAME> to URDF converter
def DHMat(theta, d, a, alpha):
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
cos_alpha = np.cos(alpha)
sin_alpha = np.sin(alpha)
return np.matrix([
[cos_theta, -sin_theta * cos_alpha,
sin_theta * sin_alpha, a * cos_theta],
[sin_theta, cos_theta * cos_alpha,
-cos_theta * sin_alpha, a * sin_theta],
[0, sin_alpha, cos_alpha, d],
[0, 0, 0, 1],
])
def homo2euler(Homo):
r = atan2(Homo[2, 1], Homo[2, 2])
p = atan2(-Homo[2, 0], sqrt(Homo[2, 1]**2 + Homo[2, 2]**2))
w = atan2(Homo[1, 0], Homo[0, 0])
x = Homo[0, 3]
y = Homo[1, 3]
z = Homo[2, 3]
return (" ".join([str(x), str(y), str(z)]), " ".join([str(r), str(p), str(w)]))
class URDF(object):
def __init__(self, name):
self.name = name
self.robot = Element(
'robot', {'name': self.name, 'xmlns:xacro': 'http://www.ros.org/wiki/xacro'})
self.comment(self.robot, "Uthai Humanoid, FIBO, KMUTT, Thailand, 2017")
self.comment(self.robot, "This file generated by <NAME>")
def addProps(self, props):
for prop in props:
SubElement(self.robot, 'xacro:property', {
'name': prop[0], 'value': prop[1]})
@staticmethod
def Link(name, stl=None):
link = Element('link', {'name': name})
if stl != None:
visual = SubElement(link, 'visual')
geometry = SubElement(visual, 'geometry')
SubElement(
geometry, 'mesh', {'filename': "package://uthai_description/meshes/" + stl})
collision = SubElement(link, 'collision')
geometry = SubElement(collision, 'geometry')
SubElement(
geometry, 'mesh', {'filename': "package://uthai_description/meshes/" + stl})
return link
@staticmethod
def Joint(name, jtype, parent, child, xyz=None, rpy=None, axis="0 0 1", limit=None):
joint = Element('joint', {'name': name, 'type': jtype})
SubElement(joint, 'parent', {'link': parent})
SubElement(joint, 'child', {'link': child})
if (xyz != None) and (rpy != None):
SubElement(joint, 'origin', {'xyz': xyz, 'rpy': rpy})
if jtype == 'revolute':
SubElement(joint, 'axis', {'xyz': axis})
if limit is None:
defaut_limit = {
'effort': '10',
'lower': '-2.6179939',
'upper': '2.6179939',
'velocity': '5.6548668'
}
SubElement(joint, 'limit', defaut_limit)
else:
SubElement(joint, 'limit', limit)
return joint
def addLink(self, name, stl=None):
self.robot.append(self.Link(name, stl))
def addJoint(self, name, jtype, parent, child, xyz=None, rpy=None, axis="0 0 1", limit=None):
self.robot.append(
self.Joint(name, jtype, parent, child, xyz, rpy, axis, limit))
@staticmethod
def Macro(name, params):
macro = Element('macro', {'name': name, 'params': params})
return macro
@staticmethod
def comment(element, text):
element.append(Comment(text))
def write(self):
ElementTree(self.robot).write(
"uthai_utils/DH2URDF/" + self.name + ".xacro")
class DH2URDF(object):
def __init__(self, name, filename):
self.urdf = URDF(name)
self.DH_fixed_macro()
self.DH_F_macro()
self.DH_R_macro()
self.urdf.comment(self.urdf.robot, "End Macro to make dummy link")
self.num_joint = [1, 1]
self.urdf.addLink('base_link')
self.urdf.addLink('fd_base_link')
self.urdf.addJoint('fixed_fd', 'fixed', 'base_link', 'fd_base_link')
print("======================================")
print("Generating...........Loading Parameter")
print("======================================")
with open(filename, 'r') as data_file:
dh_file = data_file.read().split('\n\n')
dh_props = dh_file[0].split('\n')
for dh_prop in dh_props:
exec(dh_prop)
print(dh_prop)
print("")
print("======================================")
print("Generating......Calculate DH Parameter")
print("======================================")
dh_tables = dh_file[2:]
for dh_table in dh_tables:
dh_datas = []
for dhs in dh_table.split('\n'):
dh = dhs.split('|')
dh_datas.append([dh[0].split(','), dh[1]])
for i, dhs in enumerate(dh_datas):
tf = eval('homo2euler(DHMat(' + dhs[1] + '))')
dh_data = {
'type': 'F' if (i + 1) == len(dh_datas) else dh_datas[i + 1][0][2],
'xyz': tf[0],
'rpy': tf[1],
'parent': dhs[0][0],
'child': dhs[0][1]
}
self.DH_macro(dh_data)
print(dh_data)
print("")
print("======================================")
print("Generate URDF From DH Table Complete..")
print("======================================\n")
self.urdf.write()
def DH_macro(self, dh):
if dh['type'] == 'F':
del dh['type']
dh['id'] = str(self.num_joint[1])
self.num_joint[1] += 1
self.urdf.robot.append(Element('DH_F', dh))
elif dh['type'] == 'R':
del dh['type']
dh['id'] = str(self.num_joint[0])
self.num_joint[0] += 1
self.urdf.robot.append(Element('DH_R', dh))
def DH_fixed_macro(self):
macro = self.urdf.Macro('DH_fixed', 'parent child xyz rpy')
joint = self.urdf.Joint(
'jd_${child}', 'fixed', 'fd_${parent}', 'f_${child}', '${xyz}', '${rpy}')
macro.append(joint)
link = self.urdf.Link('f_${child}')
macro.append(link)
link = self.urdf.Link('fd_${child}')
macro.append(link)
self.urdf.robot.append(macro)
def DH_F_macro(self):
macro = self.urdf.Macro('DH_F', 'parent child xyz rpy id')
data = {
'parent': '${parent}',
'child': '${child}',
'xyz': '${xyz}',
'rpy': '${rpy}'
}
macro.append(Element('DH_fixed', data))
joint = self.urdf.Joint(
'jf_${id}', 'fixed', 'f_${child}', 'fd_${child}')
macro.append(joint)
self.urdf.robot.append(macro)
def DH_R_macro(self):
macro = self.urdf.Macro('DH_R', 'parent child xyz rpy id')
data = {
'parent': '${parent}',
'child': '${child}',
'xyz': '${xyz}',
'rpy': '${rpy}'
}
macro.append(Element('DH_fixed', data))
joint = self.urdf.Joint(
'j_${id}', 'revolute', 'f_${child}', 'fd_${child}')
macro.append(joint)
self.urdf.robot.append(macro)
if __name__ == '__main__':
DH2URDF('myRobot', 'uthai_utils/DH2URDF/dh_param_file_uthai.ldh')
| #!/usr/bin/env python3
from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree
import numpy as np
from math import pi, atan2, sqrt
# <NAME> to URDF converter
def DHMat(theta, d, a, alpha):
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
cos_alpha = np.cos(alpha)
sin_alpha = np.sin(alpha)
return np.matrix([
[cos_theta, -sin_theta * cos_alpha,
sin_theta * sin_alpha, a * cos_theta],
[sin_theta, cos_theta * cos_alpha,
-cos_theta * sin_alpha, a * sin_theta],
[0, sin_alpha, cos_alpha, d],
[0, 0, 0, 1],
])
def homo2euler(Homo):
r = atan2(Homo[2, 1], Homo[2, 2])
p = atan2(-Homo[2, 0], sqrt(Homo[2, 1]**2 + Homo[2, 2]**2))
w = atan2(Homo[1, 0], Homo[0, 0])
x = Homo[0, 3]
y = Homo[1, 3]
z = Homo[2, 3]
return (" ".join([str(x), str(y), str(z)]), " ".join([str(r), str(p), str(w)]))
class URDF(object):
def __init__(self, name):
self.name = name
self.robot = Element(
'robot', {'name': self.name, 'xmlns:xacro': 'http://www.ros.org/wiki/xacro'})
self.comment(self.robot, "Uthai Humanoid, FIBO, KMUTT, Thailand, 2017")
self.comment(self.robot, "This file generated by <NAME>")
def addProps(self, props):
for prop in props:
SubElement(self.robot, 'xacro:property', {
'name': prop[0], 'value': prop[1]})
@staticmethod
def Link(name, stl=None):
link = Element('link', {'name': name})
if stl != None:
visual = SubElement(link, 'visual')
geometry = SubElement(visual, 'geometry')
SubElement(
geometry, 'mesh', {'filename': "package://uthai_description/meshes/" + stl})
collision = SubElement(link, 'collision')
geometry = SubElement(collision, 'geometry')
SubElement(
geometry, 'mesh', {'filename': "package://uthai_description/meshes/" + stl})
return link
@staticmethod
def Joint(name, jtype, parent, child, xyz=None, rpy=None, axis="0 0 1", limit=None):
joint = Element('joint', {'name': name, 'type': jtype})
SubElement(joint, 'parent', {'link': parent})
SubElement(joint, 'child', {'link': child})
if (xyz != None) and (rpy != None):
SubElement(joint, 'origin', {'xyz': xyz, 'rpy': rpy})
if jtype == 'revolute':
SubElement(joint, 'axis', {'xyz': axis})
if limit is None:
defaut_limit = {
'effort': '10',
'lower': '-2.6179939',
'upper': '2.6179939',
'velocity': '5.6548668'
}
SubElement(joint, 'limit', defaut_limit)
else:
SubElement(joint, 'limit', limit)
return joint
def addLink(self, name, stl=None):
self.robot.append(self.Link(name, stl))
def addJoint(self, name, jtype, parent, child, xyz=None, rpy=None, axis="0 0 1", limit=None):
self.robot.append(
self.Joint(name, jtype, parent, child, xyz, rpy, axis, limit))
@staticmethod
def Macro(name, params):
macro = Element('macro', {'name': name, 'params': params})
return macro
@staticmethod
def comment(element, text):
element.append(Comment(text))
def write(self):
ElementTree(self.robot).write(
"uthai_utils/DH2URDF/" + self.name + ".xacro")
class DH2URDF(object):
def __init__(self, name, filename):
self.urdf = URDF(name)
self.DH_fixed_macro()
self.DH_F_macro()
self.DH_R_macro()
self.urdf.comment(self.urdf.robot, "End Macro to make dummy link")
self.num_joint = [1, 1]
self.urdf.addLink('base_link')
self.urdf.addLink('fd_base_link')
self.urdf.addJoint('fixed_fd', 'fixed', 'base_link', 'fd_base_link')
print("======================================")
print("Generating...........Loading Parameter")
print("======================================")
with open(filename, 'r') as data_file:
dh_file = data_file.read().split('\n\n')
dh_props = dh_file[0].split('\n')
for dh_prop in dh_props:
exec(dh_prop)
print(dh_prop)
print("")
print("======================================")
print("Generating......Calculate DH Parameter")
print("======================================")
dh_tables = dh_file[2:]
for dh_table in dh_tables:
dh_datas = []
for dhs in dh_table.split('\n'):
dh = dhs.split('|')
dh_datas.append([dh[0].split(','), dh[1]])
for i, dhs in enumerate(dh_datas):
tf = eval('homo2euler(DHMat(' + dhs[1] + '))')
dh_data = {
'type': 'F' if (i + 1) == len(dh_datas) else dh_datas[i + 1][0][2],
'xyz': tf[0],
'rpy': tf[1],
'parent': dhs[0][0],
'child': dhs[0][1]
}
self.DH_macro(dh_data)
print(dh_data)
print("")
print("======================================")
print("Generate URDF From DH Table Complete..")
print("======================================\n")
self.urdf.write()
def DH_macro(self, dh):
if dh['type'] == 'F':
del dh['type']
dh['id'] = str(self.num_joint[1])
self.num_joint[1] += 1
self.urdf.robot.append(Element('DH_F', dh))
elif dh['type'] == 'R':
del dh['type']
dh['id'] = str(self.num_joint[0])
self.num_joint[0] += 1
self.urdf.robot.append(Element('DH_R', dh))
def DH_fixed_macro(self):
macro = self.urdf.Macro('DH_fixed', 'parent child xyz rpy')
joint = self.urdf.Joint(
'jd_${child}', 'fixed', 'fd_${parent}', 'f_${child}', '${xyz}', '${rpy}')
macro.append(joint)
link = self.urdf.Link('f_${child}')
macro.append(link)
link = self.urdf.Link('fd_${child}')
macro.append(link)
self.urdf.robot.append(macro)
def DH_F_macro(self):
macro = self.urdf.Macro('DH_F', 'parent child xyz rpy id')
data = {
'parent': '${parent}',
'child': '${child}',
'xyz': '${xyz}',
'rpy': '${rpy}'
}
macro.append(Element('DH_fixed', data))
joint = self.urdf.Joint(
'jf_${id}', 'fixed', 'f_${child}', 'fd_${child}')
macro.append(joint)
self.urdf.robot.append(macro)
def DH_R_macro(self):
macro = self.urdf.Macro('DH_R', 'parent child xyz rpy id')
data = {
'parent': '${parent}',
'child': '${child}',
'xyz': '${xyz}',
'rpy': '${rpy}'
}
macro.append(Element('DH_fixed', data))
joint = self.urdf.Joint(
'j_${id}', 'revolute', 'f_${child}', 'fd_${child}')
macro.append(joint)
self.urdf.robot.append(macro)
if __name__ == '__main__':
DH2URDF('myRobot', 'uthai_utils/DH2URDF/dh_param_file_uthai.ldh')
| en | 0.21347 | #!/usr/bin/env python3 # <NAME> to URDF converter | 3.040612 | 3 |
simp_py_examples/course/t301.py | kcfkwok2003/Simp_py | 0 | 6620930 | # t301.py
from machine import Pin
from simp_py import tft
p21= Pin(21, Pin.IN)
while True:
if p21.value()==0:
tft.tft.text(0,100," on")
else:
tft.tft.text(0,100,"off")
time.sleep(0.1)
| # t301.py
from machine import Pin
from simp_py import tft
p21= Pin(21, Pin.IN)
while True:
if p21.value()==0:
tft.tft.text(0,100," on")
else:
tft.tft.text(0,100,"off")
time.sleep(0.1)
| none | 1 | 2.50604 | 3 | |
fake.py | ECMGIU/ReportSubmission | 0 | 6620931 | import random
from faker import Faker
from app import db, Report, Team
NUM_REPORTS = 40
fake = Faker()
teams = [
Team(name='Domestic Equity', manager='pwgould', color='red'),
Team(name='International Equity', manager='spsahoo', color='yellow'),
Team(name='Sustainable Investing', manager='madamann', color='green'),
Team(name='Commodities', manager='sahmehta', color='purple'),
Team(name='Real Estate', manager='evanhunt', color='pink'),
Team(name='Macro Research', manager='ypande', color='blue')
]
db.session.add_all(teams)
db.session.commit()
reports = [
Report(
username=fake.pystr(8, 8).lower(),
title=fake.sentence(7),
ticker=fake.pystr(4, 4).upper(),
date=fake.date_this_month(),
downloads=fake.pyint(0, 100),
team=db.session.query(Team)[random.randrange(0, db.session.query(Team).count())]
)
for x in range(NUM_REPORTS)
]
db.session.add_all(reports)
db.session.commit()
| import random
from faker import Faker
from app import db, Report, Team
NUM_REPORTS = 40
fake = Faker()
teams = [
Team(name='Domestic Equity', manager='pwgould', color='red'),
Team(name='International Equity', manager='spsahoo', color='yellow'),
Team(name='Sustainable Investing', manager='madamann', color='green'),
Team(name='Commodities', manager='sahmehta', color='purple'),
Team(name='Real Estate', manager='evanhunt', color='pink'),
Team(name='Macro Research', manager='ypande', color='blue')
]
db.session.add_all(teams)
db.session.commit()
reports = [
Report(
username=fake.pystr(8, 8).lower(),
title=fake.sentence(7),
ticker=fake.pystr(4, 4).upper(),
date=fake.date_this_month(),
downloads=fake.pyint(0, 100),
team=db.session.query(Team)[random.randrange(0, db.session.query(Team).count())]
)
for x in range(NUM_REPORTS)
]
db.session.add_all(reports)
db.session.commit()
| none | 1 | 2.574309 | 3 | |
dds/core/migrations/0005_systemuser_is_active.py | htrueman/data-delivery-system | 0 | 6620932 | # Generated by Django 2.0.1 on 2018-01-31 20:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20180131_2007'),
]
operations = [
migrations.AddField(
model_name='systemuser',
name='is_active',
field=models.BooleanField(default=True),
),
]
| # Generated by Django 2.0.1 on 2018-01-31 20:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20180131_2007'),
]
operations = [
migrations.AddField(
model_name='systemuser',
name='is_active',
field=models.BooleanField(default=True),
),
]
| en | 0.79464 | # Generated by Django 2.0.1 on 2018-01-31 20:31 | 1.654783 | 2 |
models/TextCNN.py | VascoLopes/Text-Classification | 1 | 6620933 | <gh_stars>1-10
# _*_ coding: utf-8 _*_
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from sklearn.metrics import f1_score
class TextCNN(nn.Module):
# Based on Convolutional Neural Networks for Sentence Classification
# https://arxiv.org/abs/1408.5882
def __init__(self, batch_size, output_size, hidden_size, vocab_size, embedding_length, weights):
super(TextCNN, self).__init__()
filter_sizes = [1,2,3,5]
num_filters = 36
self.embedding = nn.Embedding(vocab_size, embedding_length)
self.embedding.weight = nn.Parameter(torch.tensor(weights, dtype=torch.float32), requires_grad=False)
self.convs1 = nn.ModuleList([nn.Conv2d(1, num_filters, (K, embedding_length)) for K in filter_sizes])
self.dropout = nn.Dropout(0.1) # might be 0.2
#fixed_length = 50
#self.maxpools = [nn.MaxPool2d((fixed_length+1-i,1)) for i in filter_sizes] # might have maxpoll
self.fc1 = nn.Linear(len(filter_sizes)*num_filters, output_size)
def forward(self, x):
x = self.embedding(x)
x = x.unsqueeze(1)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1]
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]
x = torch.cat(x, 1)
x = self.dropout(x)
logit = self.fc1(x)
return logit | # _*_ coding: utf-8 _*_
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from sklearn.metrics import f1_score
class TextCNN(nn.Module):
# Based on Convolutional Neural Networks for Sentence Classification
# https://arxiv.org/abs/1408.5882
def __init__(self, batch_size, output_size, hidden_size, vocab_size, embedding_length, weights):
super(TextCNN, self).__init__()
filter_sizes = [1,2,3,5]
num_filters = 36
self.embedding = nn.Embedding(vocab_size, embedding_length)
self.embedding.weight = nn.Parameter(torch.tensor(weights, dtype=torch.float32), requires_grad=False)
self.convs1 = nn.ModuleList([nn.Conv2d(1, num_filters, (K, embedding_length)) for K in filter_sizes])
self.dropout = nn.Dropout(0.1) # might be 0.2
#fixed_length = 50
#self.maxpools = [nn.MaxPool2d((fixed_length+1-i,1)) for i in filter_sizes] # might have maxpoll
self.fc1 = nn.Linear(len(filter_sizes)*num_filters, output_size)
def forward(self, x):
x = self.embedding(x)
x = x.unsqueeze(1)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1]
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]
x = torch.cat(x, 1)
x = self.dropout(x)
logit = self.fc1(x)
return logit | en | 0.708233 | # _*_ coding: utf-8 _*_ # Based on Convolutional Neural Networks for Sentence Classification # https://arxiv.org/abs/1408.5882 # might be 0.2 #fixed_length = 50 #self.maxpools = [nn.MaxPool2d((fixed_length+1-i,1)) for i in filter_sizes] # might have maxpoll | 2.966756 | 3 |
minigest/fisco/apps.py | ctrlmaniac/minigest | 0 | 6620934 | <gh_stars>0
from django.apps import AppConfig
class FiscoConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "minigest.fisco"
verbose_name = "Fisco"
| from django.apps import AppConfig
class FiscoConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "minigest.fisco"
verbose_name = "Fisco" | none | 1 | 1.166226 | 1 | |
exporters/export_formatter/base_export_formatter.py | scrapinghub/exporters | 41 | 6620935 | <filename>exporters/export_formatter/base_export_formatter.py
from exporters.pipeline.base_pipeline_item import BasePipelineItem
class BaseExportFormatter(BasePipelineItem):
file_extension = None
item_separator = '\n'
def __init__(self, options, metadata=None):
super(BaseExportFormatter, self).__init__(options, metadata)
def format(self, item):
raise NotImplementedError
def format_header(self):
return ''
def format_footer(self):
return ''
| <filename>exporters/export_formatter/base_export_formatter.py
from exporters.pipeline.base_pipeline_item import BasePipelineItem
class BaseExportFormatter(BasePipelineItem):
file_extension = None
item_separator = '\n'
def __init__(self, options, metadata=None):
super(BaseExportFormatter, self).__init__(options, metadata)
def format(self, item):
raise NotImplementedError
def format_header(self):
return ''
def format_footer(self):
return ''
| none | 1 | 2.274693 | 2 | |
tjtestingpythonpackaging/bird.py | tjlee0909/tj-testing-python-packaging | 0 | 6620936 | class Bird(object):
def __init__(self):
pass
def tweet(self):
print "Bird sound!"
| class Bird(object):
def __init__(self):
pass
def tweet(self):
print "Bird sound!"
| none | 1 | 2.527458 | 3 | |
tests/test_empty.py | ondevice/ondevice-client | 2 | 6620937 | from unittest import TestCase
class EmptyTest(TestCase):
def testMinimalRockerfile(self):
return True
| from unittest import TestCase
class EmptyTest(TestCase):
def testMinimalRockerfile(self):
return True
| none | 1 | 2.151135 | 2 | |
tools/orb_stats.py | clarencedglee/circleci-orbs | 44 | 6620938 | <filename>tools/orb_stats.py
from typing import Dict, Optional
import json
import datetime
import urllib.request
def datadog_metric(name: str, value, tags: Optional[Dict] = None) -> Dict:
if tags is None:
tags = {}
return json.dumps({
'm': name,
'v': value,
'e': int(datetime.datetime.now().timestamp()),
't': [f'{k}:{v}' for k, v in tags.items()]
})
def graphql_request(url: str, query: str, operationName: str='', variables: Dict[str, str]=None) -> Dict:
body = {
'query': query,
'operationnName': operationName,
'variables': variables if variables is not None else {}
}
request = urllib.request.Request(
url,
data=json.dumps(body).encode(),
headers={
'Content-Type': 'application/json'
},
method='POST'
)
response = urllib.request.urlopen(request)
return json.loads(response.read())
def get_stats(orb: str) -> Dict[str, int]:
query = '{orb(name: "' + orb + '''") {
name,
statistics {
last30DaysBuildCount,
last30DaysOrganizationCount,
last30DaysProjectCount
}
}
}'''
response = graphql_request('https://circleci.com/graphql-unstable', query)
return response['data']['orb']['statistics']
def log_orb_stats(orb):
stats = get_stats(orb)
for name, value in stats.items():
print(datadog_metric(f'circleci.orb.{name}', value, tags={'orb': orb}))
def handler(event, context):
log_orb_stats('ovotech/terraform')
log_orb_stats('ovotech/clair-scanner')
if __name__ == '__main__':
handler({}, None)
| <filename>tools/orb_stats.py
from typing import Dict, Optional
import json
import datetime
import urllib.request
def datadog_metric(name: str, value, tags: Optional[Dict] = None) -> Dict:
if tags is None:
tags = {}
return json.dumps({
'm': name,
'v': value,
'e': int(datetime.datetime.now().timestamp()),
't': [f'{k}:{v}' for k, v in tags.items()]
})
def graphql_request(url: str, query: str, operationName: str='', variables: Dict[str, str]=None) -> Dict:
body = {
'query': query,
'operationnName': operationName,
'variables': variables if variables is not None else {}
}
request = urllib.request.Request(
url,
data=json.dumps(body).encode(),
headers={
'Content-Type': 'application/json'
},
method='POST'
)
response = urllib.request.urlopen(request)
return json.loads(response.read())
def get_stats(orb: str) -> Dict[str, int]:
query = '{orb(name: "' + orb + '''") {
name,
statistics {
last30DaysBuildCount,
last30DaysOrganizationCount,
last30DaysProjectCount
}
}
}'''
response = graphql_request('https://circleci.com/graphql-unstable', query)
return response['data']['orb']['statistics']
def log_orb_stats(orb):
stats = get_stats(orb)
for name, value in stats.items():
print(datadog_metric(f'circleci.orb.{name}', value, tags={'orb': orb}))
def handler(event, context):
log_orb_stats('ovotech/terraform')
log_orb_stats('ovotech/clair-scanner')
if __name__ == '__main__':
handler({}, None)
| en | 0.273735 | ") { name, statistics { last30DaysBuildCount, last30DaysOrganizationCount, last30DaysProjectCount } } } | 2.460191 | 2 |
scripts/vector.py | Hopson97/AppleFall | 14 | 6620939 | <gh_stars>10-100
import graphics as gfx
import math
'''
Vector mathematics
'''
def distance(x1, y1, x2, y2):
'''Returns distance between two points'''
dx = abs(x1 - x2)
dy = abs(y1 - y2)
return math.sqrt(dx ** 2 + dy ** 2)
def distanceBetween(p1, p2):
'''Returns distance between two points'''
dx = abs(p1.x - p2.x)
dy = abs(p1.y - p2.y)
return math.sqrt(dx ** 2 + dy ** 2)
def normalise(vect):
'''Returns a normilised version of the vector passed in'''
x = vect.x
y = vect.y
length = math.sqrt(x * x + y * y)
return gfx.Point(-(x / length), -(y / length))
def getPointDifference(p1, p2):
'''Returns dx, dy between two points'''
return p1.x - p2.x, p1.y - p2.y | import graphics as gfx
import math
'''
Vector mathematics
'''
def distance(x1, y1, x2, y2):
'''Returns distance between two points'''
dx = abs(x1 - x2)
dy = abs(y1 - y2)
return math.sqrt(dx ** 2 + dy ** 2)
def distanceBetween(p1, p2):
'''Returns distance between two points'''
dx = abs(p1.x - p2.x)
dy = abs(p1.y - p2.y)
return math.sqrt(dx ** 2 + dy ** 2)
def normalise(vect):
'''Returns a normilised version of the vector passed in'''
x = vect.x
y = vect.y
length = math.sqrt(x * x + y * y)
return gfx.Point(-(x / length), -(y / length))
def getPointDifference(p1, p2):
'''Returns dx, dy between two points'''
return p1.x - p2.x, p1.y - p2.y | en | 0.890354 | Vector mathematics Returns distance between two points Returns distance between two points Returns a normilised version of the vector passed in Returns dx, dy between two points | 3.911316 | 4 |
envdsys/envdatasystem/migrations/0003_auto_20210226_1835.py | NOAA-PMEL/envDataSystem | 1 | 6620940 | <reponame>NOAA-PMEL/envDataSystem<gh_stars>1-10
# Generated by Django 3.1.7 on 2021-02-26 18:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('envdatasystem', '0002_auto_20210226_1815'),
]
operations = [
migrations.AlterModelOptions(
name='datasystem',
options={'verbose_name': 'Data System', 'verbose_name_plural': 'Data Systems'},
),
migrations.AlterModelOptions(
name='platform',
options={'verbose_name': 'Platform', 'verbose_name_plural': 'Platforms'},
),
migrations.AlterModelOptions(
name='project',
options={'verbose_name': 'Project', 'verbose_name_plural': 'Projects'},
),
migrations.AlterModelOptions(
name='samplingsystem',
options={'verbose_name': 'Sampling System', 'verbose_name_plural': 'Sampling Systems'},
),
migrations.AlterModelOptions(
name='samplingsystemlocation',
options={'verbose_name': 'Sampling Location', 'verbose_name_plural': 'Sampling Locations'},
),
migrations.AlterModelOptions(
name='samplingsystemsamplepoint',
options={'verbose_name': 'Sample Point', 'verbose_name_plural': 'Sample Points'},
),
migrations.AlterField(
model_name='datasystem',
name='platform',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='datasystems', to='envdatasystem.platform', verbose_name='Platform'),
),
migrations.AlterField(
model_name='datasystem',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='datasystems', to='envdatasystem.project', verbose_name='Project'),
),
migrations.AlterField(
model_name='platform',
name='parent_platform',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='platforms', to='envdatasystem.platform'),
),
migrations.AlterField(
model_name='samplingsystem',
name='locations',
field=models.ManyToManyField(blank=True, related_name='samplingsystems', to='envdatasystem.SamplingSystemLocation', verbose_name='sampling Locations'),
),
migrations.AlterField(
model_name='samplingsystem',
name='sample_points',
field=models.ManyToManyField(blank=True, related_name='samplingsystems', to='envdatasystem.SamplingSystemSamplePoint', verbose_name='Sampling Points'),
),
migrations.AlterField(
model_name='samplingsystemlocation',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='samplelocations', to='envdatasystem.samplingsystemlocation', verbose_name='Parent Location'),
),
migrations.AlterField(
model_name='samplingsystemlocation',
name='sampling_system',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='samplelocations', to='envdatasystem.samplingsystem', verbose_name='Sampling System'),
),
migrations.AlterField(
model_name='samplingsystemsamplepoint',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='samplepoints', to='envdatasystem.samplingsystemsamplepoint', verbose_name='Parent Sampling Point'),
),
migrations.AlterField(
model_name='samplingsystemsamplepoint',
name='sampling_system',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='samplepoints', to='envdatasystem.samplingsystem', verbose_name='Sample System'),
),
]
| # Generated by Django 3.1.7 on 2021-02-26 18:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('envdatasystem', '0002_auto_20210226_1815'),
]
operations = [
migrations.AlterModelOptions(
name='datasystem',
options={'verbose_name': 'Data System', 'verbose_name_plural': 'Data Systems'},
),
migrations.AlterModelOptions(
name='platform',
options={'verbose_name': 'Platform', 'verbose_name_plural': 'Platforms'},
),
migrations.AlterModelOptions(
name='project',
options={'verbose_name': 'Project', 'verbose_name_plural': 'Projects'},
),
migrations.AlterModelOptions(
name='samplingsystem',
options={'verbose_name': 'Sampling System', 'verbose_name_plural': 'Sampling Systems'},
),
migrations.AlterModelOptions(
name='samplingsystemlocation',
options={'verbose_name': 'Sampling Location', 'verbose_name_plural': 'Sampling Locations'},
),
migrations.AlterModelOptions(
name='samplingsystemsamplepoint',
options={'verbose_name': 'Sample Point', 'verbose_name_plural': 'Sample Points'},
),
migrations.AlterField(
model_name='datasystem',
name='platform',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='datasystems', to='envdatasystem.platform', verbose_name='Platform'),
),
migrations.AlterField(
model_name='datasystem',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='datasystems', to='envdatasystem.project', verbose_name='Project'),
),
migrations.AlterField(
model_name='platform',
name='parent_platform',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='platforms', to='envdatasystem.platform'),
),
migrations.AlterField(
model_name='samplingsystem',
name='locations',
field=models.ManyToManyField(blank=True, related_name='samplingsystems', to='envdatasystem.SamplingSystemLocation', verbose_name='sampling Locations'),
),
migrations.AlterField(
model_name='samplingsystem',
name='sample_points',
field=models.ManyToManyField(blank=True, related_name='samplingsystems', to='envdatasystem.SamplingSystemSamplePoint', verbose_name='Sampling Points'),
),
migrations.AlterField(
model_name='samplingsystemlocation',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='samplelocations', to='envdatasystem.samplingsystemlocation', verbose_name='Parent Location'),
),
migrations.AlterField(
model_name='samplingsystemlocation',
name='sampling_system',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='samplelocations', to='envdatasystem.samplingsystem', verbose_name='Sampling System'),
),
migrations.AlterField(
model_name='samplingsystemsamplepoint',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='samplepoints', to='envdatasystem.samplingsystemsamplepoint', verbose_name='Parent Sampling Point'),
),
migrations.AlterField(
model_name='samplingsystemsamplepoint',
name='sampling_system',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='samplepoints', to='envdatasystem.samplingsystem', verbose_name='Sample System'),
),
] | en | 0.820508 | # Generated by Django 3.1.7 on 2021-02-26 18:35 | 1.679599 | 2 |
fast.py | mavi0/upgraded-rotary-telephone | 1 | 6620941 | import time
import configparser
import iperf3
import json
import logging
from subprocess import check_output
from datetime import datetime
from time import sleep
## TO DO: also save json files to unique file, for logs. ALSO retry iperf when can't connect
config = configparser.ConfigParser()
time = datetime.now()
config.sections()
config.read('main.conf')
config.sections()
logging.basicConfig(format='[%(levelname)s] %(asctime)s %(message)s ',filename='ping.log',level=logging.DEBUG)
base_port = int(config['DEFAULT']['Port'])
server_hostname = config['DEFAULT']['Hostname']
logging.info("Performing fast.com test....")
download = 0
try:
download = check_output("fast")
except:
d = 0
# print(check_output("fast", shell=True, stderr=subprocess.STDOUT))
# download = check_output("fast")
print("{download:", download, "}")
logging.info("Complete!\n")
| import time
import configparser
import iperf3
import json
import logging
from subprocess import check_output
from datetime import datetime
from time import sleep
## TO DO: also save json files to unique file, for logs. ALSO retry iperf when can't connect
config = configparser.ConfigParser()
time = datetime.now()
config.sections()
config.read('main.conf')
config.sections()
logging.basicConfig(format='[%(levelname)s] %(asctime)s %(message)s ',filename='ping.log',level=logging.DEBUG)
base_port = int(config['DEFAULT']['Port'])
server_hostname = config['DEFAULT']['Hostname']
logging.info("Performing fast.com test....")
download = 0
try:
download = check_output("fast")
except:
d = 0
# print(check_output("fast", shell=True, stderr=subprocess.STDOUT))
# download = check_output("fast")
print("{download:", download, "}")
logging.info("Complete!\n")
| en | 0.672012 | ## TO DO: also save json files to unique file, for logs. ALSO retry iperf when can't connect # print(check_output("fast", shell=True, stderr=subprocess.STDOUT)) # download = check_output("fast") | 2.441532 | 2 |
Apr_17.py | keiraaaaa/Leetcode | 0 | 6620942 | '''
#########################
# 64. Minimum Path Sum
#########################
class Solution:
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if not grid:
return 0
for i in range(len(grid)):
for j in range (len(grid[0])):
if i==0 and j!=0:
grid[i][j] = grid[i][j-1]+grid[i][j]
if j==0 and i!=0:
grid[i][j] = grid[i-1][j]+grid[i][j]
if i!=0 and j!=0:
grid[i][j] = min(grid[i-1][j],grid[i][j-1])+grid[i][j]
return grid[-1][-1]
grid = [[1,3,1], [1,5,1], [4,2,1]]
solu = Solution()
print (solu.minPathSum(grid))
'''
#########################
# 75. Sort Colors
#########################
class Solution:
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
if nums:
p0, p2 = 0, len(nums)-1
i = 0
while i<=p2:
while i<p2 and nums[i]==2:
nums[i], nums[p2] = nums[p2], nums[i]
p2 -= 1
while i>p0 and nums[i]==0:
nums[i], nums[p0] = nums[p0], nums[i]
p0 += 1
i += 1
return nums
nums = [2,0,1]
solu = Solution()
print (solu.sortColors(nums))
| '''
#########################
# 64. Minimum Path Sum
#########################
class Solution:
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if not grid:
return 0
for i in range(len(grid)):
for j in range (len(grid[0])):
if i==0 and j!=0:
grid[i][j] = grid[i][j-1]+grid[i][j]
if j==0 and i!=0:
grid[i][j] = grid[i-1][j]+grid[i][j]
if i!=0 and j!=0:
grid[i][j] = min(grid[i-1][j],grid[i][j-1])+grid[i][j]
return grid[-1][-1]
grid = [[1,3,1], [1,5,1], [4,2,1]]
solu = Solution()
print (solu.minPathSum(grid))
'''
#########################
# 75. Sort Colors
#########################
class Solution:
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
if nums:
p0, p2 = 0, len(nums)-1
i = 0
while i<=p2:
while i<p2 and nums[i]==2:
nums[i], nums[p2] = nums[p2], nums[i]
p2 -= 1
while i>p0 and nums[i]==0:
nums[i], nums[p0] = nums[p0], nums[i]
p0 += 1
i += 1
return nums
nums = [2,0,1]
solu = Solution()
print (solu.sortColors(nums))
| en | 0.46745 | ######################### # 64. Minimum Path Sum ######################### class Solution: def minPathSum(self, grid): """ :type grid: List[List[int]] :rtype: int """ if not grid: return 0 for i in range(len(grid)): for j in range (len(grid[0])): if i==0 and j!=0: grid[i][j] = grid[i][j-1]+grid[i][j] if j==0 and i!=0: grid[i][j] = grid[i-1][j]+grid[i][j] if i!=0 and j!=0: grid[i][j] = min(grid[i-1][j],grid[i][j-1])+grid[i][j] return grid[-1][-1] grid = [[1,3,1], [1,5,1], [4,2,1]] solu = Solution() print (solu.minPathSum(grid)) ######################### # 75. Sort Colors ######################### :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. | 3.651648 | 4 |
dayone2joplin.py | brandonlou/DayOne2Joplin | 7 | 6620943 | #!/usr/local/bin/python3
import datetime, json, os, pathlib, sys, textwrap, uuid
# Check for less than two arguments passed.
def check_arguments() -> None:
if len(sys.argv) < 3:
print("Usage: python3 dayone2joplin.py [source dir] [target dir]")
exit()
def get_source_json() -> str:
cwd = os.getcwd() # Get current working directory.
source_dir = sys.argv[1] # Use first argument.
return cwd + "/" + source_dir + "/Journal.json"
def get_target_dir() -> str:
cwd = os.getcwd() # Get current working directory.
target_dir = cwd + "/" + sys.argv[2] + "/" # Use second argument.
pathlib.Path(target_dir).mkdir(parents=True, exist_ok=True) # Create directory if it doesn't exist.
return target_dir
def generate_uuid() -> str:
new_uuid = str(uuid.uuid4())
new_uuid = new_uuid.replace("-", "") # Remove dashes
return new_uuid
def write_file(filename: str, content: str) -> None:
new_file = open(filename, "w+")
new_file.write(content)
new_file.close()
def get_location(entry: dict) -> (float, float):
location = entry.get('location', {'longitude': 0.0, 'latitude': 0.0})
return location['longitude'], location['latitude']
def get_dates(entry: dict) -> (str, str):
current_date = datetime.datetime.now() # Get current date and time.
creation_date = entry.get('creationDate', current_date)
modified_date = entry.get('modifiedDate', current_date)
return creation_date, modified_date
def get_self_uuid(entry: dict) -> str:
default_uuid = generate_uuid()
uuid = entry.get('uuid', default_uuid).lower()
return uuid
def get_content(entry: dict) -> str:
content = entry.get('text', "") # Set entry contents to empty string if not available.
content = content.replace("\\", "") # Remove all single backslashes displayed.
return content
def get_title(text: str) -> str:
title = text.partition('\n')[0] # Get the first line as the title.
title = title.replace("#", "").lstrip() # Remove all pound signs and leading whitespace.
title += "\n\n"
return title
def get_metainfo(self_id: str, parent_id: str, latitude: float, longitude: float, creation_date: str, modified_date: str) -> str:
metainfo = """\n
id: {self_id}
parent_id: {parent_id}
created_time: {creation_date}
updated_time: {modified_date}
is_conflict: 0
latitude: {latitude}
longitude: {longitude}
altitude: 0.0000
author:
source_url:
is_todo: 0
todo_due: 0
todo_completed: 0
source: joplin_desktop
source_application: net.cozic.joplin-desktop
application_data:
order: 0
user_created_time: {creation_date}
user_updated_time: {modified_date}
encryption_cipher_text:
encryption_applied: 0
markup_language: 1
is_shared: 0
type_: 1""".format(self_id=self_id, parent_id=parent_id, creation_date=creation_date, modified_date=modified_date, longitude=longitude, latitude=latitude)
metainfo = textwrap.dedent(metainfo) # Remove added indentation from multiline string.
return metainfo
def convert_to_markdown(entry: dict, target_dir: str, parent_id: str) -> None:
longitude, latitude = get_location(entry) # Get location.
creation_date, modified_date = get_dates(entry) # Get dates.
self_id = get_self_uuid(entry) # Get ID.
text = get_content(entry) # Get text content.
title = get_title(text) # Get title.
metainfo = get_metainfo(self_id, parent_id, latitude, longitude, creation_date, modified_date) # Get metainfo
text = title + text + metainfo # Prepend title and append metainfo to text
# Write to new markdown file.
new_md = target_dir + self_id + ".md"
write_file(new_md, text)
if __name__ == '__main__':
check_arguments()
source_json = get_source_json()
target_dir = get_target_dir()
# Generate parent UUID.
parent_id = generate_uuid()
# Open and load Journal.json.
with open(source_json) as json_file:
data = json.load(json_file)
entries = data.get('entries')
if not entries:
sys.exit("Error: No entries found in Journal.json")
for entry in entries: # Go through all journal entries.
convert_to_markdown(entry, target_dir, parent_id)
# Create resources folder.
# Future: Enable automatic convertion of images.
resource_dir = target_dir + "resources"
pathlib.Path(resource_dir).mkdir(parents=True, exist_ok=True)
print("Success!")
| #!/usr/local/bin/python3
import datetime, json, os, pathlib, sys, textwrap, uuid
# Check for less than two arguments passed.
def check_arguments() -> None:
if len(sys.argv) < 3:
print("Usage: python3 dayone2joplin.py [source dir] [target dir]")
exit()
def get_source_json() -> str:
cwd = os.getcwd() # Get current working directory.
source_dir = sys.argv[1] # Use first argument.
return cwd + "/" + source_dir + "/Journal.json"
def get_target_dir() -> str:
cwd = os.getcwd() # Get current working directory.
target_dir = cwd + "/" + sys.argv[2] + "/" # Use second argument.
pathlib.Path(target_dir).mkdir(parents=True, exist_ok=True) # Create directory if it doesn't exist.
return target_dir
def generate_uuid() -> str:
new_uuid = str(uuid.uuid4())
new_uuid = new_uuid.replace("-", "") # Remove dashes
return new_uuid
def write_file(filename: str, content: str) -> None:
new_file = open(filename, "w+")
new_file.write(content)
new_file.close()
def get_location(entry: dict) -> (float, float):
location = entry.get('location', {'longitude': 0.0, 'latitude': 0.0})
return location['longitude'], location['latitude']
def get_dates(entry: dict) -> (str, str):
current_date = datetime.datetime.now() # Get current date and time.
creation_date = entry.get('creationDate', current_date)
modified_date = entry.get('modifiedDate', current_date)
return creation_date, modified_date
def get_self_uuid(entry: dict) -> str:
default_uuid = generate_uuid()
uuid = entry.get('uuid', default_uuid).lower()
return uuid
def get_content(entry: dict) -> str:
content = entry.get('text', "") # Set entry contents to empty string if not available.
content = content.replace("\\", "") # Remove all single backslashes displayed.
return content
def get_title(text: str) -> str:
title = text.partition('\n')[0] # Get the first line as the title.
title = title.replace("#", "").lstrip() # Remove all pound signs and leading whitespace.
title += "\n\n"
return title
def get_metainfo(self_id: str, parent_id: str, latitude: float, longitude: float, creation_date: str, modified_date: str) -> str:
metainfo = """\n
id: {self_id}
parent_id: {parent_id}
created_time: {creation_date}
updated_time: {modified_date}
is_conflict: 0
latitude: {latitude}
longitude: {longitude}
altitude: 0.0000
author:
source_url:
is_todo: 0
todo_due: 0
todo_completed: 0
source: joplin_desktop
source_application: net.cozic.joplin-desktop
application_data:
order: 0
user_created_time: {creation_date}
user_updated_time: {modified_date}
encryption_cipher_text:
encryption_applied: 0
markup_language: 1
is_shared: 0
type_: 1""".format(self_id=self_id, parent_id=parent_id, creation_date=creation_date, modified_date=modified_date, longitude=longitude, latitude=latitude)
metainfo = textwrap.dedent(metainfo) # Remove added indentation from multiline string.
return metainfo
def convert_to_markdown(entry: dict, target_dir: str, parent_id: str) -> None:
longitude, latitude = get_location(entry) # Get location.
creation_date, modified_date = get_dates(entry) # Get dates.
self_id = get_self_uuid(entry) # Get ID.
text = get_content(entry) # Get text content.
title = get_title(text) # Get title.
metainfo = get_metainfo(self_id, parent_id, latitude, longitude, creation_date, modified_date) # Get metainfo
text = title + text + metainfo # Prepend title and append metainfo to text
# Write to new markdown file.
new_md = target_dir + self_id + ".md"
write_file(new_md, text)
if __name__ == '__main__':
check_arguments()
source_json = get_source_json()
target_dir = get_target_dir()
# Generate parent UUID.
parent_id = generate_uuid()
# Open and load Journal.json.
with open(source_json) as json_file:
data = json.load(json_file)
entries = data.get('entries')
if not entries:
sys.exit("Error: No entries found in Journal.json")
for entry in entries: # Go through all journal entries.
convert_to_markdown(entry, target_dir, parent_id)
# Create resources folder.
# Future: Enable automatic convertion of images.
resource_dir = target_dir + "resources"
pathlib.Path(resource_dir).mkdir(parents=True, exist_ok=True)
print("Success!")
| en | 0.626028 | #!/usr/local/bin/python3 # Check for less than two arguments passed. # Get current working directory. # Use first argument. # Get current working directory. # Use second argument. # Create directory if it doesn't exist. # Remove dashes # Get current date and time. # Set entry contents to empty string if not available. # Remove all single backslashes displayed. # Get the first line as the title. # Remove all pound signs and leading whitespace. \n id: {self_id} parent_id: {parent_id} created_time: {creation_date} updated_time: {modified_date} is_conflict: 0 latitude: {latitude} longitude: {longitude} altitude: 0.0000 author: source_url: is_todo: 0 todo_due: 0 todo_completed: 0 source: joplin_desktop source_application: net.cozic.joplin-desktop application_data: order: 0 user_created_time: {creation_date} user_updated_time: {modified_date} encryption_cipher_text: encryption_applied: 0 markup_language: 1 is_shared: 0 type_: 1 # Remove added indentation from multiline string. # Get location. # Get dates. # Get ID. # Get text content. # Get title. # Get metainfo # Prepend title and append metainfo to text # Write to new markdown file. # Generate parent UUID. # Open and load Journal.json. # Go through all journal entries. # Create resources folder. # Future: Enable automatic convertion of images. | 2.91457 | 3 |
resumeparser/api/migrations/0003_auto_20170410_1053.py | jaffyadhav/django-resume-parser | 0 | 6620944 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-10 10:53
from __future__ import unicode_literals
from django.db import migrations, models
import resumeparser.utils.validator
class Migration(migrations.Migration):
dependencies = [
('api', '0002_resumearchive'),
]
operations = [
migrations.AlterField(
model_name='resumearchive',
name='datafile',
field=models.FileField(upload_to='resumes/%Y/%m/%d', validators=[resumeparser.utils.validator.validate_file_extension]),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-10 10:53
from __future__ import unicode_literals
from django.db import migrations, models
import resumeparser.utils.validator
class Migration(migrations.Migration):
dependencies = [
('api', '0002_resumearchive'),
]
operations = [
migrations.AlterField(
model_name='resumearchive',
name='datafile',
field=models.FileField(upload_to='resumes/%Y/%m/%d', validators=[resumeparser.utils.validator.validate_file_extension]),
),
]
| en | 0.806483 | # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-04-10 10:53 | 1.721109 | 2 |
shrdr/types.py | Skielex/shrdr | 2 | 6620945 | """Module with shrdr template type mappings."""
capacity_types_lookup = {
'int16': 'CapInt16',
'int32': 'CapInt32',
'int64': 'CapInt64',
'float32': 'CapFloat32',
'float64': 'CapFloat64',
}
arc_index_types_lookup = {
'uint32': 'ArcIdxUInt32',
'uint64': 'ArcIdxUInt64',
}
node_index_types_lookup = {
'uint32': 'NodeIdxUInt32',
'uint64': 'NodeIdxUInt64',
}
| """Module with shrdr template type mappings."""
capacity_types_lookup = {
'int16': 'CapInt16',
'int32': 'CapInt32',
'int64': 'CapInt64',
'float32': 'CapFloat32',
'float64': 'CapFloat64',
}
arc_index_types_lookup = {
'uint32': 'ArcIdxUInt32',
'uint64': 'ArcIdxUInt64',
}
node_index_types_lookup = {
'uint32': 'NodeIdxUInt32',
'uint64': 'NodeIdxUInt64',
}
| en | 0.708946 | Module with shrdr template type mappings. | 1.609921 | 2 |
recognize.py | maxwnewcomer/OpenCVFacialRecognition | 0 | 6620946 | <reponame>maxwnewcomer/OpenCVFacialRecognition
import numpy as np
import argparse
import imutils
import pickle
import cv2
import os
def recognize(imagePath, detectorPath, embeddingsPath, recognizerPath, label, confidenceLim):
print('[INFO] loading face detector...')
protoPath = os.path.sep.join([detectorPath, "deploy.prototxt"])
modelPath = os.path.sep.join([detectorPath, 'res10_300x300_ssd_iter_140000.caffemodel'])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
print('[INFO] loading face recognizer...')
embedder = cv2.dnn.readNetFromTorch(embeddingsPath)
recognizer = pickle.loads(open(recognizerPath, "rb").read())
l = pickle.loads(open(label, "rb").read())
image = cv2.imread(imagePath)
image = imutils.resize(image, width=600)
(h, w) = image.shape[:2]
imageBlob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0), swapRB=False, crop=False)
detector.setInput(imageBlob)
detections = detector.forward()
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > confidenceLim:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = image[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
if fW < 20 or fH < 20:
continue
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = l.classes_[j]
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 100, 255), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
print('[DONE]')
| import numpy as np
import argparse
import imutils
import pickle
import cv2
import os
def recognize(imagePath, detectorPath, embeddingsPath, recognizerPath, label, confidenceLim):
print('[INFO] loading face detector...')
protoPath = os.path.sep.join([detectorPath, "deploy.prototxt"])
modelPath = os.path.sep.join([detectorPath, 'res10_300x300_ssd_iter_140000.caffemodel'])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
print('[INFO] loading face recognizer...')
embedder = cv2.dnn.readNetFromTorch(embeddingsPath)
recognizer = pickle.loads(open(recognizerPath, "rb").read())
l = pickle.loads(open(label, "rb").read())
image = cv2.imread(imagePath)
image = imutils.resize(image, width=600)
(h, w) = image.shape[:2]
imageBlob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0), swapRB=False, crop=False)
detector.setInput(imageBlob)
detections = detector.forward()
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > confidenceLim:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = image[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
if fW < 20 or fH < 20:
continue
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = l.classes_[j]
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 100, 255), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
print('[DONE]') | en | 0.57371 | # show the output image | 2.631193 | 3 |
formatic/walkers/class_injection_walker.py | welchbj/formatic | 2 | 6620947 | """Implementation of ClassInjectionWalker."""
from __future__ import annotations
from typing import (
Iterator,
List,
Optional,
Set)
from .abstract_injection_walker import (
AbstractInjectionWalker)
from .attribute_injection_walker import (
AttributeInjectionWalker)
from .doc_string_injection_walker import (
DocStringInjectionWalker)
from .failed_injection_walker import (
FailedInjectionWalker)
from .function_injection_walker import (
FunctionInjectionWalker)
from .module_injection_walker import (
ModuleInjectionWalker)
from .name_injection_walker import (
NameInjectionWalker)
from ..utils import (
indent_lines,
parse_dict_top_level_keys)
class ClassInjectionWalker(AbstractInjectionWalker):
"""An injection walker for recovering class source code and other data."""
INJECTION_RE = None
RESPONSE_RE = r'^<class .+>'
def __extra_init__(
self
) -> None:
super().__extra_init__()
self._raw_dict_str: Optional[str] = None
self._docstring_walker = self.empty_instance(DocStringInjectionWalker)
self._name_walker = self.empty_instance(NameInjectionWalker)
self._module_name_walker = self.empty_instance(NameInjectionWalker)
self._base_class_walkers: List[ClassInjectionWalker] = []
self._attribute_walkers: List[AttributeInjectionWalker] = []
self._function_walkers: List[FunctionInjectionWalker] = []
self._src_code: Optional[str] = None
def walk(
self
) -> Iterator[AbstractInjectionWalker]:
yield from self._walk_name()
if not self._name_walker.is_default:
if self._name_walker.value in self._engine.class_blacklist:
return
self._engine.class_blacklist.add(self._name_walker.value)
yield from self._walk_module_name()
if not self._module_name_walker.is_default:
if self._module_name_walker.value in self._engine.module_blacklist:
return
yield from self._walk_doc()
yield from self._walk_base_classes()
yield from self._walk_dict()
self._gen_src_code()
yield self
yield from self._walk_globals()
def _walk_name(
self
) -> Iterator[AbstractInjectionWalker]:
"""Recover the class's __name__."""
name_injection = f'{self._injection_str}.__name__!r'
result = self._harness.send_injection(name_injection)
if result is None:
yield FailedInjectionWalker.msg(
f'Unable to read response from injection {name_injection}')
return
walker = self.next_walker(name_injection, result)
if not isinstance(walker, NameInjectionWalker):
yield FailedInjectionWalker.msg(
f'Expected a name response when sending {name_injection}; '
f'got {walker.__class__.__qualname__} instead')
return
yield from walker.walk()
self._name_walker = walker
def _walk_module_name(
self
) -> Iterator[AbstractInjectionWalker]:
"""Recover the class's __module__ name."""
module_name_injection = f'{self._injection_str}.__module__!r'
result = self._harness.send_injection(module_name_injection)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to inject __module__ name for class '
f'{self._name_walker.value} with injection '
f'{module_name_injection}')
return
walker = self.next_walker(module_name_injection, result)
if not isinstance(walker, NameInjectionWalker):
yield FailedInjectionWalker.msg(
'Expected a name injection walker response when injecting '
f'__module__ of class {self._name_walker.value} with '
f'injection {module_name_injection} but got instance of '
f'{walker.__class__.__qualname__} instead')
return
yield from walker.walk()
self._module_name_walker = walker
def _walk_doc(
self
) -> Iterator[AbstractInjectionWalker]:
"""Recover the class's __doc__."""
docstring_injection = f'{self._injection_str}.__doc__!r'
result = self._harness.send_injection(docstring_injection)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to retrieve injection response from string '
f'{docstring_injection}')
return
walker = self.next_walker(docstring_injection, result)
if not isinstance(walker, DocStringInjectionWalker):
yield FailedInjectionWalker.msg(
'Expected a docstring response when sending injection '
f'{docstring_injection}; got '
f'{walker.__class__.__qualname__} instead')
return
yield from walker.walk()
self._docstring_walker = walker
def _walk_base_classes(
self
) -> Iterator[AbstractInjectionWalker]:
"""Walk the class's base classes via __bases__."""
base_classes_injection = f'{self._injection_str}.__bases__'
result = self._harness.send_injection(base_classes_injection)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to retrieve injection response from string '
f'{base_classes_injection}')
return
i = 0
while True:
base_class_indexed_injection = (
f'{self._injection_str}.__bases__[{i}]')
result = self._harness.send_injection(base_class_indexed_injection)
if result is None:
break
base_class_walker = self.next_walker(
base_class_indexed_injection, result)
if not isinstance(base_class_walker, ClassInjectionWalker):
yield FailedInjectionWalker.msg(
'Expected class injection walker from response but got '
f'{base_class_walker.__class__.__qualname__} instead')
return
base_class_name_injection = (
f'{base_class_indexed_injection}.__name__!r')
result = self._harness.send_injection(base_class_name_injection)
if result is None:
break
base_class_name_walker = self.next_walker(
base_class_name_injection, result)
if not isinstance(base_class_name_walker, NameInjectionWalker):
yield FailedInjectionWalker.msg(
'Expected name injection walker from injection '
f'{base_class_name_injection} but got'
f'{base_class_name_walker.__class__.__qualname__} instead')
return
yield from base_class_name_walker.walk()
base_class_name = base_class_name_walker.value
if (base_class_name is None or
base_class_name in self._engine.class_blacklist):
i += 1
continue
yield from base_class_walker.walk()
self._base_class_walkers.append(base_class_walker)
i += 1
def _walk_dict(
self
) -> Iterator[AbstractInjectionWalker]:
"""Walk the class's attrs, funcs, and other fields via __dict__."""
key_blacklist: Set[str] = set(self._engine.attribute_blacklist)
# below fields are visited manually
key_blacklist |= {
'__name__',
'__doc__',
'__bases__',
'__dict__',
'__module__',
}
dict_injection = f'{self._injection_str}.__dict__'
result = self._harness.send_injection(dict_injection)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to recover __dict__ from class with '
f'injection {dict_injection}')
return
self._raw_dict_str = result
# now that we have the top-level __dict__ keys, we will iterate over
# them and inspect any interesting attributes that we get back
top_level_keys = parse_dict_top_level_keys(self._raw_dict_str)
for key in top_level_keys:
if key in key_blacklist:
continue
injection_str = f'{self._injection_str}.{key}!r'
result = self._harness.send_injection(injection_str)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to read injection response with string '
f'{injection_str}')
continue
snipped_injection_str = injection_str.rstrip('!r')
next_walker = self.next_walker(snipped_injection_str, result)
if next_walker is None:
next_walker = AttributeInjectionWalker(
self._harness,
snipped_injection_str,
result,
self._bytecode_version,
self._engine)
yield from next_walker.walk()
if isinstance(next_walker, FunctionInjectionWalker):
self._function_walkers.append(next_walker)
elif isinstance(next_walker, AttributeInjectionWalker):
self._attribute_walkers.append(next_walker)
def _gen_src_code(
self
) -> None:
"""Populate this class's :data:`src_code` property."""
cls_name = self._name_walker.value
self._src_code = 'class '
if cls_name is None:
self._src_code += '<UNKNOWN>'
else:
self._src_code += cls_name
base_cls_names = [
base_cls_walker.name_walker.value for
base_cls_walker in self._base_class_walkers if
base_cls_walker.name_walker is not None and
base_cls_walker.name_walker.value is not None]
self._src_code += '('
self._src_code += ', '.join(base_cls_names)
self._src_code += '):\n'
doc_string = self._docstring_walker.value
if doc_string:
self._src_code += f' """{doc_string}"""\n\n'
for attr_walker in self._attribute_walkers:
self._src_code += f' {attr_walker.name} = {attr_walker.value}\n'
for func_walker in self._function_walkers:
if func_walker.src_code is not None:
self._src_code += f'\n{indent_lines(func_walker.src_code)}\n'
def _walk_globals(
self
) -> Iterator[AbstractInjectionWalker]:
"""Walk the __globals__ dict, escaping into the above module."""
if not self._function_walkers:
return
# any of our function walkers should give us access to __globals__
func_walker: FunctionInjectionWalker = self._function_walkers[-1]
globals_injection_str = (
f'{func_walker.injection_str.rstrip("!r")}.__globals__')
result = self._harness.send_injection(globals_injection_str)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to recover injection response with string '
f'{globals_injection_str}')
return
top_level_dict_keys = parse_dict_top_level_keys(result)
if not top_level_dict_keys:
yield FailedInjectionWalker.msg(
'Expected dump of global namespace as dict, but got '
f'{top_level_dict_keys} instead')
return
module_injection_walker = ModuleInjectionWalker(
self._harness,
globals_injection_str,
result,
self._bytecode_version,
self._engine)
yield from module_injection_walker.walk()
@property
def raw_dict_str(
self
) -> Optional[str]:
"""The raw __dict__ injection response for the injected class."""
return self._raw_dict_str
@property
def docstring_walker(
self
) -> DocStringInjectionWalker:
"""The walker used to recover the injected class's docstring."""
return self._docstring_walker
@property
def name_walker(
self
) -> NameInjectionWalker:
"""The walker used to recover the injected class's __name__."""
return self._name_walker
@property
def module_name_walker(
self
) -> NameInjectionWalker:
"""The walker used to recover the injected class's __module__."""
return self._module_name_walker
@property
def base_class_walkers(
self
) -> List[ClassInjectionWalker]:
"""The walkers used to enumerate this class's bases."""
return self._base_class_walkers
@property
def attribute_walkers(
self
) -> List[AttributeInjectionWalker]:
"""The walkers used to recover this class's attributes."""
return self._attribute_walkers
@property
def function_walkers(
self
) -> List[FunctionInjectionWalker]:
"""The walkers used to recover this class's functions."""
return self._function_walkers
@property
def src_code(
self
) -> Optional[str]:
"""The recovered source code from the injected class."""
return self._src_code
def __str__(
self
) -> str:
return f'Injected class with string {self._injection_str}'
| """Implementation of ClassInjectionWalker."""
from __future__ import annotations
from typing import (
Iterator,
List,
Optional,
Set)
from .abstract_injection_walker import (
AbstractInjectionWalker)
from .attribute_injection_walker import (
AttributeInjectionWalker)
from .doc_string_injection_walker import (
DocStringInjectionWalker)
from .failed_injection_walker import (
FailedInjectionWalker)
from .function_injection_walker import (
FunctionInjectionWalker)
from .module_injection_walker import (
ModuleInjectionWalker)
from .name_injection_walker import (
NameInjectionWalker)
from ..utils import (
indent_lines,
parse_dict_top_level_keys)
class ClassInjectionWalker(AbstractInjectionWalker):
"""An injection walker for recovering class source code and other data."""
INJECTION_RE = None
RESPONSE_RE = r'^<class .+>'
def __extra_init__(
self
) -> None:
super().__extra_init__()
self._raw_dict_str: Optional[str] = None
self._docstring_walker = self.empty_instance(DocStringInjectionWalker)
self._name_walker = self.empty_instance(NameInjectionWalker)
self._module_name_walker = self.empty_instance(NameInjectionWalker)
self._base_class_walkers: List[ClassInjectionWalker] = []
self._attribute_walkers: List[AttributeInjectionWalker] = []
self._function_walkers: List[FunctionInjectionWalker] = []
self._src_code: Optional[str] = None
def walk(
self
) -> Iterator[AbstractInjectionWalker]:
yield from self._walk_name()
if not self._name_walker.is_default:
if self._name_walker.value in self._engine.class_blacklist:
return
self._engine.class_blacklist.add(self._name_walker.value)
yield from self._walk_module_name()
if not self._module_name_walker.is_default:
if self._module_name_walker.value in self._engine.module_blacklist:
return
yield from self._walk_doc()
yield from self._walk_base_classes()
yield from self._walk_dict()
self._gen_src_code()
yield self
yield from self._walk_globals()
def _walk_name(
self
) -> Iterator[AbstractInjectionWalker]:
"""Recover the class's __name__."""
name_injection = f'{self._injection_str}.__name__!r'
result = self._harness.send_injection(name_injection)
if result is None:
yield FailedInjectionWalker.msg(
f'Unable to read response from injection {name_injection}')
return
walker = self.next_walker(name_injection, result)
if not isinstance(walker, NameInjectionWalker):
yield FailedInjectionWalker.msg(
f'Expected a name response when sending {name_injection}; '
f'got {walker.__class__.__qualname__} instead')
return
yield from walker.walk()
self._name_walker = walker
def _walk_module_name(
self
) -> Iterator[AbstractInjectionWalker]:
"""Recover the class's __module__ name."""
module_name_injection = f'{self._injection_str}.__module__!r'
result = self._harness.send_injection(module_name_injection)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to inject __module__ name for class '
f'{self._name_walker.value} with injection '
f'{module_name_injection}')
return
walker = self.next_walker(module_name_injection, result)
if not isinstance(walker, NameInjectionWalker):
yield FailedInjectionWalker.msg(
'Expected a name injection walker response when injecting '
f'__module__ of class {self._name_walker.value} with '
f'injection {module_name_injection} but got instance of '
f'{walker.__class__.__qualname__} instead')
return
yield from walker.walk()
self._module_name_walker = walker
def _walk_doc(
self
) -> Iterator[AbstractInjectionWalker]:
"""Recover the class's __doc__."""
docstring_injection = f'{self._injection_str}.__doc__!r'
result = self._harness.send_injection(docstring_injection)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to retrieve injection response from string '
f'{docstring_injection}')
return
walker = self.next_walker(docstring_injection, result)
if not isinstance(walker, DocStringInjectionWalker):
yield FailedInjectionWalker.msg(
'Expected a docstring response when sending injection '
f'{docstring_injection}; got '
f'{walker.__class__.__qualname__} instead')
return
yield from walker.walk()
self._docstring_walker = walker
def _walk_base_classes(
self
) -> Iterator[AbstractInjectionWalker]:
"""Walk the class's base classes via __bases__."""
base_classes_injection = f'{self._injection_str}.__bases__'
result = self._harness.send_injection(base_classes_injection)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to retrieve injection response from string '
f'{base_classes_injection}')
return
i = 0
while True:
base_class_indexed_injection = (
f'{self._injection_str}.__bases__[{i}]')
result = self._harness.send_injection(base_class_indexed_injection)
if result is None:
break
base_class_walker = self.next_walker(
base_class_indexed_injection, result)
if not isinstance(base_class_walker, ClassInjectionWalker):
yield FailedInjectionWalker.msg(
'Expected class injection walker from response but got '
f'{base_class_walker.__class__.__qualname__} instead')
return
base_class_name_injection = (
f'{base_class_indexed_injection}.__name__!r')
result = self._harness.send_injection(base_class_name_injection)
if result is None:
break
base_class_name_walker = self.next_walker(
base_class_name_injection, result)
if not isinstance(base_class_name_walker, NameInjectionWalker):
yield FailedInjectionWalker.msg(
'Expected name injection walker from injection '
f'{base_class_name_injection} but got'
f'{base_class_name_walker.__class__.__qualname__} instead')
return
yield from base_class_name_walker.walk()
base_class_name = base_class_name_walker.value
if (base_class_name is None or
base_class_name in self._engine.class_blacklist):
i += 1
continue
yield from base_class_walker.walk()
self._base_class_walkers.append(base_class_walker)
i += 1
def _walk_dict(
self
) -> Iterator[AbstractInjectionWalker]:
"""Walk the class's attrs, funcs, and other fields via __dict__."""
key_blacklist: Set[str] = set(self._engine.attribute_blacklist)
# below fields are visited manually
key_blacklist |= {
'__name__',
'__doc__',
'__bases__',
'__dict__',
'__module__',
}
dict_injection = f'{self._injection_str}.__dict__'
result = self._harness.send_injection(dict_injection)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to recover __dict__ from class with '
f'injection {dict_injection}')
return
self._raw_dict_str = result
# now that we have the top-level __dict__ keys, we will iterate over
# them and inspect any interesting attributes that we get back
top_level_keys = parse_dict_top_level_keys(self._raw_dict_str)
for key in top_level_keys:
if key in key_blacklist:
continue
injection_str = f'{self._injection_str}.{key}!r'
result = self._harness.send_injection(injection_str)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to read injection response with string '
f'{injection_str}')
continue
snipped_injection_str = injection_str.rstrip('!r')
next_walker = self.next_walker(snipped_injection_str, result)
if next_walker is None:
next_walker = AttributeInjectionWalker(
self._harness,
snipped_injection_str,
result,
self._bytecode_version,
self._engine)
yield from next_walker.walk()
if isinstance(next_walker, FunctionInjectionWalker):
self._function_walkers.append(next_walker)
elif isinstance(next_walker, AttributeInjectionWalker):
self._attribute_walkers.append(next_walker)
def _gen_src_code(
self
) -> None:
"""Populate this class's :data:`src_code` property."""
cls_name = self._name_walker.value
self._src_code = 'class '
if cls_name is None:
self._src_code += '<UNKNOWN>'
else:
self._src_code += cls_name
base_cls_names = [
base_cls_walker.name_walker.value for
base_cls_walker in self._base_class_walkers if
base_cls_walker.name_walker is not None and
base_cls_walker.name_walker.value is not None]
self._src_code += '('
self._src_code += ', '.join(base_cls_names)
self._src_code += '):\n'
doc_string = self._docstring_walker.value
if doc_string:
self._src_code += f' """{doc_string}"""\n\n'
for attr_walker in self._attribute_walkers:
self._src_code += f' {attr_walker.name} = {attr_walker.value}\n'
for func_walker in self._function_walkers:
if func_walker.src_code is not None:
self._src_code += f'\n{indent_lines(func_walker.src_code)}\n'
def _walk_globals(
self
) -> Iterator[AbstractInjectionWalker]:
"""Walk the __globals__ dict, escaping into the above module."""
if not self._function_walkers:
return
# any of our function walkers should give us access to __globals__
func_walker: FunctionInjectionWalker = self._function_walkers[-1]
globals_injection_str = (
f'{func_walker.injection_str.rstrip("!r")}.__globals__')
result = self._harness.send_injection(globals_injection_str)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to recover injection response with string '
f'{globals_injection_str}')
return
top_level_dict_keys = parse_dict_top_level_keys(result)
if not top_level_dict_keys:
yield FailedInjectionWalker.msg(
'Expected dump of global namespace as dict, but got '
f'{top_level_dict_keys} instead')
return
module_injection_walker = ModuleInjectionWalker(
self._harness,
globals_injection_str,
result,
self._bytecode_version,
self._engine)
yield from module_injection_walker.walk()
@property
def raw_dict_str(
self
) -> Optional[str]:
"""The raw __dict__ injection response for the injected class."""
return self._raw_dict_str
@property
def docstring_walker(
self
) -> DocStringInjectionWalker:
"""The walker used to recover the injected class's docstring."""
return self._docstring_walker
@property
def name_walker(
self
) -> NameInjectionWalker:
"""The walker used to recover the injected class's __name__."""
return self._name_walker
@property
def module_name_walker(
self
) -> NameInjectionWalker:
"""The walker used to recover the injected class's __module__."""
return self._module_name_walker
@property
def base_class_walkers(
self
) -> List[ClassInjectionWalker]:
"""The walkers used to enumerate this class's bases."""
return self._base_class_walkers
@property
def attribute_walkers(
self
) -> List[AttributeInjectionWalker]:
"""The walkers used to recover this class's attributes."""
return self._attribute_walkers
@property
def function_walkers(
self
) -> List[FunctionInjectionWalker]:
"""The walkers used to recover this class's functions."""
return self._function_walkers
@property
def src_code(
self
) -> Optional[str]:
"""The recovered source code from the injected class."""
return self._src_code
def __str__(
self
) -> str:
return f'Injected class with string {self._injection_str}'
| en | 0.795145 | Implementation of ClassInjectionWalker. An injection walker for recovering class source code and other data. Recover the class's __name__. Recover the class's __module__ name. Recover the class's __doc__. Walk the class's base classes via __bases__. Walk the class's attrs, funcs, and other fields via __dict__. # below fields are visited manually # now that we have the top-level __dict__ keys, we will iterate over # them and inspect any interesting attributes that we get back Populate this class's :data:`src_code` property. {doc_string} Walk the __globals__ dict, escaping into the above module. # any of our function walkers should give us access to __globals__ The raw __dict__ injection response for the injected class. The walker used to recover the injected class's docstring. The walker used to recover the injected class's __name__. The walker used to recover the injected class's __module__. The walkers used to enumerate this class's bases. The walkers used to recover this class's attributes. The walkers used to recover this class's functions. The recovered source code from the injected class. | 2.644445 | 3 |
cvax/models/yolo/blocks.py | toru34/cvax | 0 | 6620948 | import jax.nn as nn
from nmax import Module
from cvax.modules import Conv2d, BatchNorm2d
class YOLOConvBlock(Module):
conv: Module
bn: Module
def __init__(self,
key,
kernel_shape: tuple[int, int, int, int],
stride: int = 1,
batch_norm: bool = True,
activation: bool = True,
):
self.conv = Conv2d(key, kernel_shape=kernel_shape, stride=stride)
if batch_norm:
self.bn = BatchNorm2d(n_channels=kernel_shape[0])
else:
self.bn = None
self.activation = activation
def forward(self, x):
x = self.conv(x)
if self.bn:
x = self.bn(x)
if self.activation:
x = nn.leaky_relu(x, negative_slope=0.1) # TODO: add activation option
return x
class DarknetResBlock(Module):
conv_in: Module
conv_out: Module
def __init__(self,
key,
in_channels: int,
):
keys = jax.random.split(key)
self.conv_in = YOLOConvBlock(keys[0], kernel_shape=(in_channels, in_channels, 1, 1))
self.conv_out = YOLOConvBlock(keys[1], kernel_shape=(in_channels, in_channels, 3, 3))
def forward(self, x):
residual = x
x = self.conv_in(x)
x = self.conv_out(x)
x += residual
return x | import jax.nn as nn
from nmax import Module
from cvax.modules import Conv2d, BatchNorm2d
class YOLOConvBlock(Module):
conv: Module
bn: Module
def __init__(self,
key,
kernel_shape: tuple[int, int, int, int],
stride: int = 1,
batch_norm: bool = True,
activation: bool = True,
):
self.conv = Conv2d(key, kernel_shape=kernel_shape, stride=stride)
if batch_norm:
self.bn = BatchNorm2d(n_channels=kernel_shape[0])
else:
self.bn = None
self.activation = activation
def forward(self, x):
x = self.conv(x)
if self.bn:
x = self.bn(x)
if self.activation:
x = nn.leaky_relu(x, negative_slope=0.1) # TODO: add activation option
return x
class DarknetResBlock(Module):
conv_in: Module
conv_out: Module
def __init__(self,
key,
in_channels: int,
):
keys = jax.random.split(key)
self.conv_in = YOLOConvBlock(keys[0], kernel_shape=(in_channels, in_channels, 1, 1))
self.conv_out = YOLOConvBlock(keys[1], kernel_shape=(in_channels, in_channels, 3, 3))
def forward(self, x):
residual = x
x = self.conv_in(x)
x = self.conv_out(x)
x += residual
return x | ru | 0.116921 | # TODO: add activation option | 2.349079 | 2 |
utils.py | Leroll/torch-models | 6 | 6620949 | from time import time
import torch
import random
import numpy as np
import os
from pypinyin import lazy_pinyin
import jieba
def time_cost(func):
def wrapper(*arg, **kargs):
t0 = time()
res = func(*arg, **kargs)
t1 = time()
print(f'[{func.__name__}] cost {t1 - t0:.2f}s')
return res
return wrapper
def reverse_dict(x: dict):
"""交换字典的 key-value, 得到 value-key 的新字典
需保证value无重复项
"""
if isinstance(x, dict):
k, v = list(zip(*list(x.items())))
x_reverse = {}
for i in range(len(k)):
x_reverse[v[i]] = k[i] # k-v 反转字典
return x_reverse
else:
raise TypeError('arg needs to be dict')
class ModelConfig(dict):
"""config类
"""
def __init__(self, name):
super().__init__()
self['name'] = name
def __getattr__(self, item):
if item in self:
return self[item]
else:
raise AttributeError(f'No such attribute: {item}')
def __setattr__(self, key, value):
self[key] = value
def set_deterministic(seed=42):
"""
让pytorch训练过程可复现,固定各种随机化的操作。
"""
# seed
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
# 禁止hash随机化
os.environ['PYTHONHASHSEED'] = str(seed)
# https://pytorch.org/docs/stable/notes/randomness.html
# cudnn那边,使用相同的算法,而不是经过benchmark后在几个中选取最快的。
torch.backends.cudnn.benchmark = False
# torch内部一些方法使用确定型的算法,具体list见函数文档
# torch.use_deterministic_algorithms(True)
# 当上述cudnn使用同一算法时,有可能算法本身不是确定性的,因此需要下述设定
# 但是该设定已经被上面的设定包含了。
torch.backends.cudnn.deterministic = True
# dataloader在多进程时也会有reproducibility的问题
# 这部分暂时不涉及。
def compare_pinyi(s1: str, s2: str):
"""
对比两句话是否拼音完全一致。
"""
s1_pinyin = ""
s2_pinyin = ""
for w in jieba.cut(s1):
s1_pinyin += ''.join(lazy_pinyin(w))
for w in jieba.cut(s2):
s2_pinyin += ''.join(lazy_pinyin(w))
return s1_pinyin == s2_pinyin
# TODO 腾讯word vector, 后续考虑放到别的地方
from gensim.models.word2vec import KeyedVectors
def load_word2vec(path=None):
if path is None:
path = '../nlp_resource/Tencent_AILab_ChineseEmbedding/Tencent_AILab_ChineseEmbedding.txt'
word2vec = KeyedVectors.load_word2vec_format(path,
binary=False)
return word2vec
def sentence_vector_by_word2vec(q):
"""
average word2vec
"""
word2vec = load_word2vec() # TODO
res = np.zeros(200)
cnt = 0
for w in q:
res += word2vec[w] if w in word2vec else 0
cnt += 1
res /= cnt
return res | from time import time
import torch
import random
import numpy as np
import os
from pypinyin import lazy_pinyin
import jieba
def time_cost(func):
def wrapper(*arg, **kargs):
t0 = time()
res = func(*arg, **kargs)
t1 = time()
print(f'[{func.__name__}] cost {t1 - t0:.2f}s')
return res
return wrapper
def reverse_dict(x: dict):
"""交换字典的 key-value, 得到 value-key 的新字典
需保证value无重复项
"""
if isinstance(x, dict):
k, v = list(zip(*list(x.items())))
x_reverse = {}
for i in range(len(k)):
x_reverse[v[i]] = k[i] # k-v 反转字典
return x_reverse
else:
raise TypeError('arg needs to be dict')
class ModelConfig(dict):
"""config类
"""
def __init__(self, name):
super().__init__()
self['name'] = name
def __getattr__(self, item):
if item in self:
return self[item]
else:
raise AttributeError(f'No such attribute: {item}')
def __setattr__(self, key, value):
self[key] = value
def set_deterministic(seed=42):
"""
让pytorch训练过程可复现,固定各种随机化的操作。
"""
# seed
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
# 禁止hash随机化
os.environ['PYTHONHASHSEED'] = str(seed)
# https://pytorch.org/docs/stable/notes/randomness.html
# cudnn那边,使用相同的算法,而不是经过benchmark后在几个中选取最快的。
torch.backends.cudnn.benchmark = False
# torch内部一些方法使用确定型的算法,具体list见函数文档
# torch.use_deterministic_algorithms(True)
# 当上述cudnn使用同一算法时,有可能算法本身不是确定性的,因此需要下述设定
# 但是该设定已经被上面的设定包含了。
torch.backends.cudnn.deterministic = True
# dataloader在多进程时也会有reproducibility的问题
# 这部分暂时不涉及。
def compare_pinyi(s1: str, s2: str):
"""
对比两句话是否拼音完全一致。
"""
s1_pinyin = ""
s2_pinyin = ""
for w in jieba.cut(s1):
s1_pinyin += ''.join(lazy_pinyin(w))
for w in jieba.cut(s2):
s2_pinyin += ''.join(lazy_pinyin(w))
return s1_pinyin == s2_pinyin
# TODO 腾讯word vector, 后续考虑放到别的地方
from gensim.models.word2vec import KeyedVectors
def load_word2vec(path=None):
if path is None:
path = '../nlp_resource/Tencent_AILab_ChineseEmbedding/Tencent_AILab_ChineseEmbedding.txt'
word2vec = KeyedVectors.load_word2vec_format(path,
binary=False)
return word2vec
def sentence_vector_by_word2vec(q):
"""
average word2vec
"""
word2vec = load_word2vec() # TODO
res = np.zeros(200)
cnt = 0
for w in q:
res += word2vec[w] if w in word2vec else 0
cnt += 1
res /= cnt
return res | zh | 0.936917 | 交换字典的 key-value, 得到 value-key 的新字典 需保证value无重复项 # k-v 反转字典 config类 让pytorch训练过程可复现,固定各种随机化的操作。 # seed # 禁止hash随机化 # https://pytorch.org/docs/stable/notes/randomness.html # cudnn那边,使用相同的算法,而不是经过benchmark后在几个中选取最快的。 # torch内部一些方法使用确定型的算法,具体list见函数文档 # torch.use_deterministic_algorithms(True) # 当上述cudnn使用同一算法时,有可能算法本身不是确定性的,因此需要下述设定 # 但是该设定已经被上面的设定包含了。 # dataloader在多进程时也会有reproducibility的问题 # 这部分暂时不涉及。 对比两句话是否拼音完全一致。 # TODO 腾讯word vector, 后续考虑放到别的地方 average word2vec # TODO | 2.522186 | 3 |
exo_changelog/loader.py | marfyl/django-changelog | 3 | 6620950 | from __future__ import unicode_literals
import os
import sys
from importlib import import_module
from django.apps import apps
from django.utils import six
from .graph import ChangeGraph
from .recorder import ChangeRecorder
from .exceptions import (
AmbiguityError, BadChangeError, InconsistentChangeHistory,
NodeNotFoundError,
)
CHANGELOG_MODULE_NAME = 'changelog'
class ChangeLoader(object):
"""
Loads changes files from disk, and their status from the database.
Change files are expected to live in the "changelog" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the python files, looking for a class called Change, which should
inherit from exo_changelog.change.Change. See
exo_changelog.change for what that looks like.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True, ignore_no_changes=False):
self.connection = connection
self.disk_changes = None
self.applied_changes = None
self.ignore_no_changes = ignore_no_changes
if load:
self.build_graph()
@classmethod
def changes_module(cls, app_label):
"""
Return the path to the changes module for the specified app_label
and a boolean indicating if the module is specified in
settings.CHANGE_MODULE.
"""
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, CHANGELOG_MODULE_NAME), False
def load_disk(self):
"""
Loads the changes from all INSTALLED_APPS from disk.
"""
self.disk_changes = {}
self.unchanged_apps = set()
self.changed_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name, explicit = self.changes_module(app_config.label)
if module_name is None:
self.unchanged_apps.add(app_config.label)
continue
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if ((explicit and self.ignore_no_changes) or (
not explicit and 'No module named' in str(e) and CHANGELOG_MODULE_NAME in str(e))):
self.unchanged_apps.add(app_config.label)
continue
raise
else:
# PY3 will happily import empty dirs as namespaces.
if not hasattr(module, '__file__'):
self.unchanged_apps.add(app_config.label)
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
self.unchanged_apps.add(app_config.label)
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
six.moves.reload_module(module)
self.changed_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py files
change_names = set()
for name in os.listdir(directory):
if name.endswith('.py'):
import_name = name.rsplit('.', 1)[0]
if import_name[0] not in '_.~':
change_names.add(import_name)
# Load them
for change_name in change_names:
change_module = import_module('%s.%s' % (module_name, change_name))
if not hasattr(change_module, 'Change'):
raise BadChangeError(
'Change %s in app %s has no Change class' % (change_name, app_config.label)
)
self.disk_changes[app_config.label, change_name] = change_module.Change(
change_name,
app_config.label,
)
def get_change(self, app_label, name_prefix):
'Gets the change exactly named, or raises `graph.NodeNotFoundError`'
return self.graph.nodes[app_label, name_prefix]
def get_change_by_prefix(self, app_label, name_prefix):
'Returns the change(s) which match the given app label and name _prefix_'
# Do the search
results = []
for change_app_label, change_name in self.disk_changes:
if change_app_label == app_label and change_name.startswith(name_prefix):
results.append((change_app_label, change_name))
if len(results) > 1:
raise AmbiguityError(
"There is more than one change for '%s' with the prefix '%s'" % (app_label, name_prefix)
)
elif len(results) == 0:
raise KeyError("There no change for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_changes[results[0]]
def check_key(self, key, current_app):
if (key[1] != '__first__' and key[1] != '__latest__') or key in self.graph:
return key
# Special-case __first__, which means "the first change" for
# changed apps, and is ignored for unchanged apps. It allows
# makechanges to declare dependencies on apps before they even have
# changes.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unchanged_apps:
# This app isn't changes, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.changed_apps:
try:
if key[1] == '__first__':
return list(self.graph.root_nodes(key[0]))[0]
else: # "__latest__"
return list(self.graph.leaf_nodes(key[0]))[0]
except IndexError:
if self.ignore_no_changes:
return None
else:
raise ValueError('Dependency on app with no changes: %s' % key[0])
raise ValueError('Dependency on unknown app: %s' % key[0])
def add_internal_dependencies(self, key, change):
"""
Internal dependencies need to be added first to ensure `__first__`
dependencies find the correct root node.
"""
for parent in change.dependencies:
if parent[0] != key[0] or parent[1] == '__first__':
# Ignore __first__ references to the same app (#22325).
continue
self.graph.add_dependency(change, key, parent, skip_validation=True)
def add_external_dependencies(self, key, change):
for parent in change.dependencies:
# Skip internal dependencies
if key[0] == parent[0]:
continue
parent = self.check_key(parent, key[0])
if parent is not None:
self.graph.add_dependency(change, key, parent, skip_validation=True)
for child in change.run_before:
child = self.check_key(child, key[0])
if child is not None:
self.graph.add_dependency(change, child, key, skip_validation=True)
def build_graph(self):
"""
Builds a change dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply change. This isn't
usually a problem as generally change stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_changes = set()
else:
recorder = ChangeRecorder(self.connection)
self.applied_changes = recorder.applied_changes()
# To start, populate the migration graph with nodes for ALL migrations
# and their dependencies. Also make note of replacing migrations at this step.
self.graph = ChangeGraph()
self.replacements = {}
for key, change in self.disk_changes.items():
self.graph.add_node(key, change)
# Internal (aka same-app) dependencies.
self.add_internal_dependencies(key, change)
# Add external dependencies now that the internal ones have been resolved.
for key, change in self.disk_changes.items():
self.add_external_dependencies(key, change)
# Ensure the graph is consistent.
try:
self.graph.validate_consistency()
except NodeNotFoundError as exc:
raise exc
def check_consistent_history(self, connection):
"""
Raise InconsistentChangeHistory if any applied changes have
unapplied dependencies.
"""
recorder = ChangeRecorder(connection)
applied = recorder.applied_changes()
for change in applied:
# If the migration is unknown, skip it.
if change not in self.graph.nodes:
continue
for parent in self.graph.node_map[change].parents:
if parent not in applied:
raise InconsistentChangeHistory(
'Change {}.{} is applied before its dependency '
"{}.{} on database '{}'.".format(
change[0], change[1], parent[0], parent[1],
connection.alias,
)
)
def detect_conflicts(self):
"""
Looks through the loaded graph and detects any conflicts - apps
with more than one leaf migration. Returns a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, change_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(change_name)
return {app_label: seen_apps[app_label] for app_label in conflicting_apps}
def project_state(self, nodes=None, at_end=True):
"""
Returns a ProjectState object representing the most recent state
that the migrations we loaded represent.
See graph.make_state for the meaning of "nodes" and "at_end"
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unchanged_apps))
| from __future__ import unicode_literals
import os
import sys
from importlib import import_module
from django.apps import apps
from django.utils import six
from .graph import ChangeGraph
from .recorder import ChangeRecorder
from .exceptions import (
AmbiguityError, BadChangeError, InconsistentChangeHistory,
NodeNotFoundError,
)
CHANGELOG_MODULE_NAME = 'changelog'
class ChangeLoader(object):
"""
Loads changes files from disk, and their status from the database.
Change files are expected to live in the "changelog" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the python files, looking for a class called Change, which should
inherit from exo_changelog.change.Change. See
exo_changelog.change for what that looks like.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True, ignore_no_changes=False):
self.connection = connection
self.disk_changes = None
self.applied_changes = None
self.ignore_no_changes = ignore_no_changes
if load:
self.build_graph()
@classmethod
def changes_module(cls, app_label):
"""
Return the path to the changes module for the specified app_label
and a boolean indicating if the module is specified in
settings.CHANGE_MODULE.
"""
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, CHANGELOG_MODULE_NAME), False
def load_disk(self):
"""
Loads the changes from all INSTALLED_APPS from disk.
"""
self.disk_changes = {}
self.unchanged_apps = set()
self.changed_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name, explicit = self.changes_module(app_config.label)
if module_name is None:
self.unchanged_apps.add(app_config.label)
continue
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if ((explicit and self.ignore_no_changes) or (
not explicit and 'No module named' in str(e) and CHANGELOG_MODULE_NAME in str(e))):
self.unchanged_apps.add(app_config.label)
continue
raise
else:
# PY3 will happily import empty dirs as namespaces.
if not hasattr(module, '__file__'):
self.unchanged_apps.add(app_config.label)
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
self.unchanged_apps.add(app_config.label)
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
six.moves.reload_module(module)
self.changed_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py files
change_names = set()
for name in os.listdir(directory):
if name.endswith('.py'):
import_name = name.rsplit('.', 1)[0]
if import_name[0] not in '_.~':
change_names.add(import_name)
# Load them
for change_name in change_names:
change_module = import_module('%s.%s' % (module_name, change_name))
if not hasattr(change_module, 'Change'):
raise BadChangeError(
'Change %s in app %s has no Change class' % (change_name, app_config.label)
)
self.disk_changes[app_config.label, change_name] = change_module.Change(
change_name,
app_config.label,
)
def get_change(self, app_label, name_prefix):
'Gets the change exactly named, or raises `graph.NodeNotFoundError`'
return self.graph.nodes[app_label, name_prefix]
def get_change_by_prefix(self, app_label, name_prefix):
'Returns the change(s) which match the given app label and name _prefix_'
# Do the search
results = []
for change_app_label, change_name in self.disk_changes:
if change_app_label == app_label and change_name.startswith(name_prefix):
results.append((change_app_label, change_name))
if len(results) > 1:
raise AmbiguityError(
"There is more than one change for '%s' with the prefix '%s'" % (app_label, name_prefix)
)
elif len(results) == 0:
raise KeyError("There no change for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_changes[results[0]]
def check_key(self, key, current_app):
if (key[1] != '__first__' and key[1] != '__latest__') or key in self.graph:
return key
# Special-case __first__, which means "the first change" for
# changed apps, and is ignored for unchanged apps. It allows
# makechanges to declare dependencies on apps before they even have
# changes.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unchanged_apps:
# This app isn't changes, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.changed_apps:
try:
if key[1] == '__first__':
return list(self.graph.root_nodes(key[0]))[0]
else: # "__latest__"
return list(self.graph.leaf_nodes(key[0]))[0]
except IndexError:
if self.ignore_no_changes:
return None
else:
raise ValueError('Dependency on app with no changes: %s' % key[0])
raise ValueError('Dependency on unknown app: %s' % key[0])
def add_internal_dependencies(self, key, change):
"""
Internal dependencies need to be added first to ensure `__first__`
dependencies find the correct root node.
"""
for parent in change.dependencies:
if parent[0] != key[0] or parent[1] == '__first__':
# Ignore __first__ references to the same app (#22325).
continue
self.graph.add_dependency(change, key, parent, skip_validation=True)
def add_external_dependencies(self, key, change):
for parent in change.dependencies:
# Skip internal dependencies
if key[0] == parent[0]:
continue
parent = self.check_key(parent, key[0])
if parent is not None:
self.graph.add_dependency(change, key, parent, skip_validation=True)
for child in change.run_before:
child = self.check_key(child, key[0])
if child is not None:
self.graph.add_dependency(change, child, key, skip_validation=True)
def build_graph(self):
"""
Builds a change dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply change. This isn't
usually a problem as generally change stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_changes = set()
else:
recorder = ChangeRecorder(self.connection)
self.applied_changes = recorder.applied_changes()
# To start, populate the migration graph with nodes for ALL migrations
# and their dependencies. Also make note of replacing migrations at this step.
self.graph = ChangeGraph()
self.replacements = {}
for key, change in self.disk_changes.items():
self.graph.add_node(key, change)
# Internal (aka same-app) dependencies.
self.add_internal_dependencies(key, change)
# Add external dependencies now that the internal ones have been resolved.
for key, change in self.disk_changes.items():
self.add_external_dependencies(key, change)
# Ensure the graph is consistent.
try:
self.graph.validate_consistency()
except NodeNotFoundError as exc:
raise exc
def check_consistent_history(self, connection):
"""
Raise InconsistentChangeHistory if any applied changes have
unapplied dependencies.
"""
recorder = ChangeRecorder(connection)
applied = recorder.applied_changes()
for change in applied:
# If the migration is unknown, skip it.
if change not in self.graph.nodes:
continue
for parent in self.graph.node_map[change].parents:
if parent not in applied:
raise InconsistentChangeHistory(
'Change {}.{} is applied before its dependency '
"{}.{} on database '{}'.".format(
change[0], change[1], parent[0], parent[1],
connection.alias,
)
)
def detect_conflicts(self):
"""
Looks through the loaded graph and detects any conflicts - apps
with more than one leaf migration. Returns a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, change_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(change_name)
return {app_label: seen_apps[app_label] for app_label in conflicting_apps}
def project_state(self, nodes=None, at_end=True):
"""
Returns a ProjectState object representing the most recent state
that the migrations we loaded represent.
See graph.make_state for the meaning of "nodes" and "at_end"
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unchanged_apps))
| en | 0.900939 | Loads changes files from disk, and their status from the database. Change files are expected to live in the "changelog" directory of an app. Their names are entirely unimportant from a code perspective, but will probably follow the 1234_name.py convention. On initialization, this class will scan those directories, and open and read the python files, looking for a class called Change, which should inherit from exo_changelog.change.Change. See exo_changelog.change for what that looks like. This does mean that this class MUST also talk to the database as well as to disk, but this is probably fine. We're already not just operating in memory. Return the path to the changes module for the specified app_label and a boolean indicating if the module is specified in settings.CHANGE_MODULE. Loads the changes from all INSTALLED_APPS from disk. # Get the migrations module directory # I hate doing this, but I don't want to squash other import errors. # Might be better to try a directory check directly. # PY3 will happily import empty dirs as namespaces. # Module is not a package (e.g. migrations.py). # Force a reload if it's already loaded (tests need this) # Scan for .py files # Load them # Do the search # Special-case __first__, which means "the first change" for # changed apps, and is ignored for unchanged apps. It allows # makechanges to declare dependencies on apps before they even have # changes. # Ignore __first__ references to the same app (#22325) # This app isn't changes, but something depends on it. # The models will get auto-added into the state, though # so we're fine. # "__latest__" Internal dependencies need to be added first to ensure `__first__` dependencies find the correct root node. # Ignore __first__ references to the same app (#22325). # Skip internal dependencies Builds a change dependency graph using both the disk and database. You'll need to rebuild the graph if you apply change. This isn't usually a problem as generally change stuff runs in a one-shot process. # Load disk data # Load database data # To start, populate the migration graph with nodes for ALL migrations # and their dependencies. Also make note of replacing migrations at this step. # Internal (aka same-app) dependencies. # Add external dependencies now that the internal ones have been resolved. # Ensure the graph is consistent. Raise InconsistentChangeHistory if any applied changes have unapplied dependencies. # If the migration is unknown, skip it. Looks through the loaded graph and detects any conflicts - apps with more than one leaf migration. Returns a dict of the app labels that conflict with the migration names that conflict. Returns a ProjectState object representing the most recent state that the migrations we loaded represent. See graph.make_state for the meaning of "nodes" and "at_end" | 2.237036 | 2 |