max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
results/nim_1b_table_of_bad_lm.py
|
SourCherries/auto-face-align
| 13
|
6628851
|
<reponame>SourCherries/auto-face-align
import os
import csv
expression = ["an", "ca", "di", "fe", "ha", "ne", "sa", "sp"]
mouth = ["o", "c"]
databases = []
my_project_path = os.path.dirname(os.path.abspath(__file__))
# Output to long format CSV with everything broken down.
# MOUTH [o c]
# EXPRESSION [an ... sp]
# STRICT [YES, NO]
# NUMBER The number of excluded
with open('table-DLIB-bad-landmarks-NIM.csv', 'w') as writer:
# writer.write("Mouth,Expression,Strict,Number\n")
writer.write("Strict,Mouth,Expression,Number\n")
for mo in mouth:
for ex in expression:
dbase = "NIM-" + ex + "-" + mo
my_faces_path = my_project_path + os.path.sep + dbase + os.path.sep
with open(my_faces_path + "bad-landmarks-strict.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
print("")
num_exc_strict = len(row)
writer.write('%s,%s,%s,%d\n' % ("yes", mo, ex, num_exc_strict))
with open(my_faces_path + "bad-landmarks.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
print("")
num_exc_strict = len(row)
writer.write('%s,%s,%s,%d\n' % ("no", mo, ex, num_exc_strict))
# END -------------------------------------------------------------------------
# -----------------------------------------------------------------------------
|
import os
import csv
expression = ["an", "ca", "di", "fe", "ha", "ne", "sa", "sp"]
mouth = ["o", "c"]
databases = []
my_project_path = os.path.dirname(os.path.abspath(__file__))
# Output to long format CSV with everything broken down.
# MOUTH [o c]
# EXPRESSION [an ... sp]
# STRICT [YES, NO]
# NUMBER The number of excluded
with open('table-DLIB-bad-landmarks-NIM.csv', 'w') as writer:
# writer.write("Mouth,Expression,Strict,Number\n")
writer.write("Strict,Mouth,Expression,Number\n")
for mo in mouth:
for ex in expression:
dbase = "NIM-" + ex + "-" + mo
my_faces_path = my_project_path + os.path.sep + dbase + os.path.sep
with open(my_faces_path + "bad-landmarks-strict.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
print("")
num_exc_strict = len(row)
writer.write('%s,%s,%s,%d\n' % ("yes", mo, ex, num_exc_strict))
with open(my_faces_path + "bad-landmarks.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
print("")
num_exc_strict = len(row)
writer.write('%s,%s,%s,%d\n' % ("no", mo, ex, num_exc_strict))
# END -------------------------------------------------------------------------
# -----------------------------------------------------------------------------
|
en
| 0.51467
|
# Output to long format CSV with everything broken down. # MOUTH [o c] # EXPRESSION [an ... sp] # STRICT [YES, NO] # NUMBER The number of excluded # writer.write("Mouth,Expression,Strict,Number\n") # END ------------------------------------------------------------------------- # -----------------------------------------------------------------------------
| 3.152941
| 3
|
src/controles.py
|
neviim/ytbviews
| 0
|
6628852
|
"""
Modulo com a classe Canais
Esta classe é para instanciar e ler cada canal no youtube
o qual sera especificado em um arquivo json de reverencia
de leitura, para o processamento
Os dados desta instancia sera usado para gerar e gravar um
arquivo de saida com as novas referncias capturadas no youtube.
"""
import datetime
import requests
import json
class Canais():
""" Classe Canais"""
def __init__(self, tvid="", views=0, subscritos=0):
"""
Entra com os valores iniciais. váriaveis de sistema:
tvid => nome do canal no youtube
views => quantidade de views do canal
subscritos => quantidade de inscritos no canal.
"""
super(Canais, self).__init__()
self.tvid = tvid
self.views = views
self.subscritos = subscritos
def ler_tvid(self):
""" Retorna o nome do canal do youtube """
return self.tvid
def __str__(self):
""" Retorna uma descrição amigável do objeto """
return "{}/{}/{}".format(self.tvid, self.views, self.subscritos)
def __repr__(self):
""" Retorna uma descrição precisa e única do objeto """
return "tvid()={} views()=int({}) sudscritos()=int({})".format(self.tvid, self.views, self.subscritos)
class Countyt():
"""
Classe para capturar dados do site youtube
Captura de variaveis:
views => visualizações
subscritos => inscritos
"""
html_string = ""
def __init__(self, contaID):
""" Variaveis de uso do sistema """
url = "http://youtube.de/user/" + contaID + "/about"
html = requests.get(url)
self.html_string = html.text
def getSubscritos(self):
""" Captura subscritos de um canal especifico """
abo = self.html_string.find("Abonnenten")
raw = self.html_string[(abo - 10):abo]
abonnenten = ""
for x in raw:
if (x.isdigit()):
abonnenten += x
return int(abonnenten)
def getViews(self):
""" Capitura views de um canal especifico """
abo = self.html_string.find("Aufrufe")
raw = self.html_string[(abo - 16):abo]
aufrufe = ""
for x in raw:
if (x.isdigit()):
aufrufe += x
return int(aufrufe)
class DatetimeEncoder(json.JSONEncoder):
'''
Classe para formatar datatime
Transforma data em:
ano => %Y
mes => %m
dia => %s
Transforma data hora:
ano => %Y
mes => %m
dia => %dT
hora => %H:
minuto => %M:
segundo => %SZ
clamada para uso: json.dumps(dict,cls=DatetimeEncoder)
'''
def default(self, obj):
''' chamada por default, defini se é uma instancia de datatime ou data '''
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
# base class default caso não tenha as opções retorna TypeError
return json.JSONEncoder.default(self, obj)
|
"""
Modulo com a classe Canais
Esta classe é para instanciar e ler cada canal no youtube
o qual sera especificado em um arquivo json de reverencia
de leitura, para o processamento
Os dados desta instancia sera usado para gerar e gravar um
arquivo de saida com as novas referncias capturadas no youtube.
"""
import datetime
import requests
import json
class Canais():
""" Classe Canais"""
def __init__(self, tvid="", views=0, subscritos=0):
"""
Entra com os valores iniciais. váriaveis de sistema:
tvid => nome do canal no youtube
views => quantidade de views do canal
subscritos => quantidade de inscritos no canal.
"""
super(Canais, self).__init__()
self.tvid = tvid
self.views = views
self.subscritos = subscritos
def ler_tvid(self):
""" Retorna o nome do canal do youtube """
return self.tvid
def __str__(self):
""" Retorna uma descrição amigável do objeto """
return "{}/{}/{}".format(self.tvid, self.views, self.subscritos)
def __repr__(self):
""" Retorna uma descrição precisa e única do objeto """
return "tvid()={} views()=int({}) sudscritos()=int({})".format(self.tvid, self.views, self.subscritos)
class Countyt():
"""
Classe para capturar dados do site youtube
Captura de variaveis:
views => visualizações
subscritos => inscritos
"""
html_string = ""
def __init__(self, contaID):
""" Variaveis de uso do sistema """
url = "http://youtube.de/user/" + contaID + "/about"
html = requests.get(url)
self.html_string = html.text
def getSubscritos(self):
""" Captura subscritos de um canal especifico """
abo = self.html_string.find("Abonnenten")
raw = self.html_string[(abo - 10):abo]
abonnenten = ""
for x in raw:
if (x.isdigit()):
abonnenten += x
return int(abonnenten)
def getViews(self):
""" Capitura views de um canal especifico """
abo = self.html_string.find("Aufrufe")
raw = self.html_string[(abo - 16):abo]
aufrufe = ""
for x in raw:
if (x.isdigit()):
aufrufe += x
return int(aufrufe)
class DatetimeEncoder(json.JSONEncoder):
'''
Classe para formatar datatime
Transforma data em:
ano => %Y
mes => %m
dia => %s
Transforma data hora:
ano => %Y
mes => %m
dia => %dT
hora => %H:
minuto => %M:
segundo => %SZ
clamada para uso: json.dumps(dict,cls=DatetimeEncoder)
'''
def default(self, obj):
''' chamada por default, defini se é uma instancia de datatime ou data '''
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
# base class default caso não tenha as opções retorna TypeError
return json.JSONEncoder.default(self, obj)
|
pt
| 0.942461
|
Modulo com a classe Canais Esta classe é para instanciar e ler cada canal no youtube o qual sera especificado em um arquivo json de reverencia de leitura, para o processamento Os dados desta instancia sera usado para gerar e gravar um arquivo de saida com as novas referncias capturadas no youtube. Classe Canais Entra com os valores iniciais. váriaveis de sistema: tvid => nome do canal no youtube views => quantidade de views do canal subscritos => quantidade de inscritos no canal. Retorna o nome do canal do youtube Retorna uma descrição amigável do objeto Retorna uma descrição precisa e única do objeto Classe para capturar dados do site youtube Captura de variaveis: views => visualizações subscritos => inscritos Variaveis de uso do sistema Captura subscritos de um canal especifico Capitura views de um canal especifico Classe para formatar datatime Transforma data em: ano => %Y mes => %m dia => %s Transforma data hora: ano => %Y mes => %m dia => %dT hora => %H: minuto => %M: segundo => %SZ clamada para uso: json.dumps(dict,cls=DatetimeEncoder) chamada por default, defini se é uma instancia de datatime ou data # base class default caso não tenha as opções retorna TypeError
| 4.07272
| 4
|
class2/exercises/exercise1.py
|
twin-bridges/netmiko_course
| 11
|
6628853
|
import os
from getpass import getpass
from netmiko import ConnectHandler
import logging
logging.basicConfig(filename="test.log", level=logging.DEBUG)
logger = logging.getLogger("netmiko")
# Code so automated tests will run properly
password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
arista1 = {
"device_type": "arista_eos",
"host": "arista1.lasthop.io",
"username": "pyclass",
"password": password,
}
arista2 = {
"device_type": "arista_eos",
"host": "arista2.lasthop.io",
"username": "pyclass",
"password": password,
}
arista3 = {
"device_type": "arista_eos",
"host": "arista3.lasthop.io",
"username": "pyclass",
"password": password,
}
arista4 = {
"device_type": "arista_eos",
"host": "arista4.lasthop.io",
"username": "pyclass",
"password": password,
}
for device in (arista1, arista2, arista3, arista4):
with ConnectHandler(**device) as net_connect:
device_name = net_connect.find_prompt()
output = net_connect.send_command("show ip arp")
print(f"\nDevice: {device_name}:")
print(output)
print()
|
import os
from getpass import getpass
from netmiko import ConnectHandler
import logging
logging.basicConfig(filename="test.log", level=logging.DEBUG)
logger = logging.getLogger("netmiko")
# Code so automated tests will run properly
password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
arista1 = {
"device_type": "arista_eos",
"host": "arista1.lasthop.io",
"username": "pyclass",
"password": password,
}
arista2 = {
"device_type": "arista_eos",
"host": "arista2.lasthop.io",
"username": "pyclass",
"password": password,
}
arista3 = {
"device_type": "arista_eos",
"host": "arista3.lasthop.io",
"username": "pyclass",
"password": password,
}
arista4 = {
"device_type": "arista_eos",
"host": "arista4.lasthop.io",
"username": "pyclass",
"password": password,
}
for device in (arista1, arista2, arista3, arista4):
with ConnectHandler(**device) as net_connect:
device_name = net_connect.find_prompt()
output = net_connect.send_command("show ip arp")
print(f"\nDevice: {device_name}:")
print(output)
print()
|
en
| 0.656469
|
# Code so automated tests will run properly
| 2.319171
| 2
|
time_tree.py
|
pyensemble/wildwood
| 22
|
6628854
|
import logging
import numpy as np
import pandas as pd
from sklearn.datasets import make_circles, make_moons
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier as SkDecisionTreeClassifier
from sklearn.tree import ExtraTreeClassifier as SkExtraTreeClassifier
from wildwood._classes import DecisionTreeClassifier
from time import time
import cProfile
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
np.set_printoptions(precision=2)
print("JIT compilation...")
tic = time()
X, y = make_circles(n_samples=5, noise=0.2, factor=0.5, random_state=1)
clf = DecisionTreeClassifier(min_samples_split=3)
clf.fit(X, y)
clf.predict_proba(X)
toc = time()
print("Spent {time} compiling.".format(time=toc - tic))
n_samples = 200_000
# n_samples = 100_000
random_state = 42
datasets = [
(
"circles",
make_circles(n_samples=n_samples, noise=0.2, factor=0.5, random_state=1),
),
("moons", make_moons(n_samples=n_samples, noise=0.3, random_state=0)),
]
clf_kwargs = {"min_samples_split": 2, "random_state": random_state}
# classifiers = [
# ("tree", DecisionTreeClassifier(**clf_kwargs)),
# ("sk_tree", SkDecisionTreeClassifier(**clf_kwargs)),
# ("sk_extra", SkExtraTreeClassifier(**clf_kwargs))
# ]
classifiers = [
("tree", DecisionTreeClassifier),
("sk_tree", SkDecisionTreeClassifier),
("sk_extra", SkExtraTreeClassifier)
]
dataset = []
classifier = []
timings = []
task = []
# cprofile
# n_samples = 1_000_000
# X, y = make_circles(n_samples=n_samples, noise=0.2, factor=0.5, random_state=1)
#
# cProfile.run("clf.fit(X, y)", "main_tree2")
#
# exit(0)
for data_name, (X, y) in datasets:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
for clf_name, Clf in classifiers:
clf = Clf(**clf_kwargs)
tic = time()
clf.fit(X_train, y_train)
toc = time()
logging.info("%s had %d nodes" % (clf_name, clf.tree_.node_count))
dataset.append(data_name)
classifier.append(clf_name)
timings.append(toc - tic)
task.append("fit")
tic = time()
clf.predict_proba(X_test)
toc = time()
dataset.append(data_name)
classifier.append(clf_name)
timings.append(toc - tic)
task.append("predict")
results = pd.DataFrame(
{"datasets": dataset, "task": task, "classifier": classifier, "timings": timings}
)
print(results.pivot(index=["datasets", "task"], columns="classifier"))
|
import logging
import numpy as np
import pandas as pd
from sklearn.datasets import make_circles, make_moons
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier as SkDecisionTreeClassifier
from sklearn.tree import ExtraTreeClassifier as SkExtraTreeClassifier
from wildwood._classes import DecisionTreeClassifier
from time import time
import cProfile
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
np.set_printoptions(precision=2)
print("JIT compilation...")
tic = time()
X, y = make_circles(n_samples=5, noise=0.2, factor=0.5, random_state=1)
clf = DecisionTreeClassifier(min_samples_split=3)
clf.fit(X, y)
clf.predict_proba(X)
toc = time()
print("Spent {time} compiling.".format(time=toc - tic))
n_samples = 200_000
# n_samples = 100_000
random_state = 42
datasets = [
(
"circles",
make_circles(n_samples=n_samples, noise=0.2, factor=0.5, random_state=1),
),
("moons", make_moons(n_samples=n_samples, noise=0.3, random_state=0)),
]
clf_kwargs = {"min_samples_split": 2, "random_state": random_state}
# classifiers = [
# ("tree", DecisionTreeClassifier(**clf_kwargs)),
# ("sk_tree", SkDecisionTreeClassifier(**clf_kwargs)),
# ("sk_extra", SkExtraTreeClassifier(**clf_kwargs))
# ]
classifiers = [
("tree", DecisionTreeClassifier),
("sk_tree", SkDecisionTreeClassifier),
("sk_extra", SkExtraTreeClassifier)
]
dataset = []
classifier = []
timings = []
task = []
# cprofile
# n_samples = 1_000_000
# X, y = make_circles(n_samples=n_samples, noise=0.2, factor=0.5, random_state=1)
#
# cProfile.run("clf.fit(X, y)", "main_tree2")
#
# exit(0)
for data_name, (X, y) in datasets:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
for clf_name, Clf in classifiers:
clf = Clf(**clf_kwargs)
tic = time()
clf.fit(X_train, y_train)
toc = time()
logging.info("%s had %d nodes" % (clf_name, clf.tree_.node_count))
dataset.append(data_name)
classifier.append(clf_name)
timings.append(toc - tic)
task.append("fit")
tic = time()
clf.predict_proba(X_test)
toc = time()
dataset.append(data_name)
classifier.append(clf_name)
timings.append(toc - tic)
task.append("predict")
results = pd.DataFrame(
{"datasets": dataset, "task": task, "classifier": classifier, "timings": timings}
)
print(results.pivot(index=["datasets", "task"], columns="classifier"))
|
en
| 0.318606
|
# n_samples = 100_000 # classifiers = [ # ("tree", DecisionTreeClassifier(**clf_kwargs)), # ("sk_tree", SkDecisionTreeClassifier(**clf_kwargs)), # ("sk_extra", SkExtraTreeClassifier(**clf_kwargs)) # ] # cprofile # n_samples = 1_000_000 # X, y = make_circles(n_samples=n_samples, noise=0.2, factor=0.5, random_state=1) # # cProfile.run("clf.fit(X, y)", "main_tree2") # # exit(0)
| 2.473091
| 2
|
lambda/run-step-functions-lambda.py
|
sg09/aws
| 0
|
6628855
|
<reponame>sg09/aws
import json
import boto3
import json
import os
stepfunctions_client = boto3.client('stepfunctions')
def lambda_handler(event, context):
s3event = event['Records'][0]['s3']
s3bucket = s3event['bucket']['name']
s3key = s3event['object']['key']
step_state = {
"s3_bucket": s3bucket,
"s3_video_key": s3key
}
response = stepfunctions_client.start_execution(
stateMachineArn=os.environ['STATEMACHINEARN'],
input=json.dumps(step_state)
)
return json.dumps(response, default=str)
|
import json
import boto3
import json
import os
stepfunctions_client = boto3.client('stepfunctions')
def lambda_handler(event, context):
s3event = event['Records'][0]['s3']
s3bucket = s3event['bucket']['name']
s3key = s3event['object']['key']
step_state = {
"s3_bucket": s3bucket,
"s3_video_key": s3key
}
response = stepfunctions_client.start_execution(
stateMachineArn=os.environ['STATEMACHINEARN'],
input=json.dumps(step_state)
)
return json.dumps(response, default=str)
|
none
| 1
| 1.937354
| 2
|
|
paddlex/cv/transforms/seg_transforms.py
|
SunAhong1993/PaddleX
| 1
|
6628856
|
# coding: utf8
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .ops import *
from .imgaug_support import execute_imgaug
import random
import os.path as osp
import numpy as np
from PIL import Image
import cv2
import imghdr
import six
import sys
from collections import OrderedDict
import paddlex.utils.logging as logging
class SegTransform:
""" 分割transform基类
"""
def __init__(self):
pass
class Compose(SegTransform):
"""根据数据预处理/增强算子对输入数据进行操作。
所有操作的输入图像流形状均是[H, W, C],其中H为图像高,W为图像宽,C为图像通道数。
Args:
transforms (list): 数据预处理/增强算子。
Raises:
TypeError: transforms不是list对象
ValueError: transforms元素个数小于1。
"""
def __init__(self, transforms):
if not isinstance(transforms, list):
raise TypeError('The transforms must be a list!')
if len(transforms) < 1:
raise ValueError('The length of transforms ' + \
'must be equal or larger than 1!')
self.transforms = transforms
self.batch_transforms = None
self.to_rgb = False
# 检查transforms里面的操作,目前支持PaddleX定义的或者是imgaug操作
for op in self.transforms:
if not isinstance(op, SegTransform):
import imgaug.augmenters as iaa
if not isinstance(op, iaa.Augmenter):
raise Exception(
"Elements in transforms should be defined in 'paddlex.seg.transforms' or class of imgaug.augmenters.Augmenter, see docs here: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/"
)
@staticmethod
def read_img(img_path, input_channel=3):
img_format = imghdr.what(img_path)
name, ext = osp.splitext(img_path)
if img_format == 'tiff' or ext == '.img':
try:
import gdal
except:
six.reraise(*sys.exc_info())
raise Exception(
"Please refer to https://github.com/PaddlePaddle/PaddleX/tree/develop/examples/multi-channel_remote_sensing/README.md to install gdal"
)
dataset = gdal.Open(img_path)
if dataset == None:
raise Exception('Can not open', img_path)
im_data = dataset.ReadAsArray()
return im_data.transpose((1, 2, 0))
elif img_format in ['jpeg', 'bmp', 'png']:
if input_channel == 3:
return cv2.imread(img_path)
else:
return cv2.imread(im_file, cv2.IMREAD_UNCHANGED)
elif ext == '.npy':
return np.load(img_path)
else:
raise Exception('Image format {} is not supported!'.format(ext))
@staticmethod
def decode_image(im_path, label, input_channel=3):
if isinstance(im_path, np.ndarray):
if len(im_path.shape) != 3:
raise Exception(
"im should be 3-dimensions, but now is {}-dimensions".
format(len(im_path.shape)))
im = im_path
else:
try:
im = Compose.read_img(im_path, input_channel).astype('float32')
except:
raise ValueError('Can\'t read The image file {}!'.format(
im_path))
im = im.astype('float32')
if label is not None:
if isinstance(label, np.ndarray):
if len(label.shape) != 2:
raise Exception(
"label should be 2-dimensions, but now is {}-dimensions".
format(len(label.shape)))
else:
try:
label = np.asarray(Image.open(label))
except:
ValueError('Can\'t read The label file {}!'.format(label))
im_height, im_width, _ = im.shape
label_height, label_width = label.shape
if im_height != label_height or im_width != label_width:
raise Exception(
"The height or width of the image is not same as the label")
return (im, label)
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (str/np.ndarray): 图像路径/图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (str/np.ndarray): 标注图像路径/标注图像np.ndarray数据。
Returns:
tuple: 根据网络所需字段所组成的tuple;字段由transforms中的最后一个数据预处理操作决定。
"""
input_channel = getattr(self, 'input_channel', 3)
im, label = self.decode_image(im, label, input_channel)
if self.to_rgb and input_channel == 3:
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if im_info is None:
im_info = [('origin_shape', im.shape[0:2])]
if label is not None:
origin_label = label.copy()
for op in self.transforms:
if isinstance(op, SegTransform):
outputs = op(im, im_info, label)
im = outputs[0]
if len(outputs) >= 2:
im_info = outputs[1]
if len(outputs) == 3:
label = outputs[2]
else:
im = execute_imgaug(op, im)
if label is not None:
outputs = (im, im_info, label)
else:
outputs = (im, im_info)
if self.transforms[-1].__class__.__name__ == 'ArrangeSegmenter':
if self.transforms[-1].mode == 'eval':
if label is not None:
outputs = (im, im_info, origin_label)
return outputs
def add_augmenters(self, augmenters):
if not isinstance(augmenters, list):
raise Exception(
"augmenters should be list type in func add_augmenters()")
transform_names = [type(x).__name__ for x in self.transforms]
for aug in augmenters:
if type(aug).__name__ in transform_names:
logging.error(
"{} is already in ComposedTransforms, need to remove it from add_augmenters().".
format(type(aug).__name__))
self.transforms = augmenters + self.transforms
class RandomHorizontalFlip(SegTransform):
"""以一定的概率对图像进行水平翻转。当存在标注图像时,则同步进行翻转。
Args:
prob (float): 随机水平翻转的概率。默认值为0.5。
"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if random.random() < self.prob:
im = horizontal_flip(im)
if label is not None:
label = horizontal_flip(label)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class RandomVerticalFlip(SegTransform):
"""以一定的概率对图像进行垂直翻转。当存在标注图像时,则同步进行翻转。
Args:
prob (float): 随机垂直翻转的概率。默认值为0.1。
"""
def __init__(self, prob=0.1):
self.prob = prob
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if random.random() < self.prob:
im = vertical_flip(im)
if label is not None:
label = vertical_flip(label)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class Resize(SegTransform):
"""调整图像大小(resize),当存在标注图像时,则同步进行处理。
- 当目标大小(target_size)类型为int时,根据插值方式,
将图像resize为[target_size, target_size]。
- 当目标大小(target_size)类型为list或tuple时,根据插值方式,
将图像resize为target_size, target_size的输入应为[w, h]或(w, h)。
Args:
target_size (int|list|tuple): 目标大小。
interp (str): resize的插值方式,与opencv的插值方式对应,
可选的值为['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4'],默认为"LINEAR"。
Raises:
TypeError: target_size不是int/list/tuple。
ValueError: target_size为list/tuple时元素个数不等于2。
AssertionError: interp的取值不在['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4']之内。
"""
# The interpolation mode
interp_dict = {
'NEAREST': cv2.INTER_NEAREST,
'LINEAR': cv2.INTER_LINEAR,
'CUBIC': cv2.INTER_CUBIC,
'AREA': cv2.INTER_AREA,
'LANCZOS4': cv2.INTER_LANCZOS4
}
def __init__(self, target_size, interp='LINEAR'):
self.interp = interp
assert interp in self.interp_dict, "interp should be one of {}".format(
interp_dict.keys())
if isinstance(target_size, list) or isinstance(target_size, tuple):
if len(target_size) != 2:
raise ValueError(
'when target is list or tuple, it should include 2 elements, but it is {}'
.format(target_size))
elif not isinstance(target_size, int):
raise TypeError(
"Type of target_size is invalid. Must be Integer or List or tuple, now is {}"
.format(type(target_size)))
self.target_size = target_size
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
其中,im_info跟新字段为:
-shape_before_resize (tuple): 保存resize之前图像的形状(h, w)。
Raises:
ZeroDivisionError: im的短边为0。
TypeError: im不是np.ndarray数据。
ValueError: im不是3维nd.ndarray。
"""
if im_info is None:
im_info = OrderedDict()
im_info.append(('resize', im.shape[:2]))
if not isinstance(im, np.ndarray):
raise TypeError("ResizeImage: image type is not np.ndarray.")
if len(im.shape) != 3:
raise ValueError('ResizeImage: image is not 3-dimensional.')
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
if float(im_size_min) == 0:
raise ZeroDivisionError('ResizeImage: min size of image is 0')
if isinstance(self.target_size, int):
resize_w = self.target_size
resize_h = self.target_size
else:
resize_w = self.target_size[0]
resize_h = self.target_size[1]
im_scale_x = float(resize_w) / float(im_shape[1])
im_scale_y = float(resize_h) / float(im_shape[0])
im = cv2.resize(
im,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp_dict[self.interp])
if im.ndim < 3:
im = np.expand_dims(im, axis=-1)
if label is not None:
label = cv2.resize(
label,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp_dict['NEAREST'])
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class ResizeByLong(SegTransform):
"""对图像长边resize到固定值,短边按比例进行缩放。当存在标注图像时,则同步进行处理。
Args:
long_size (int): resize后图像的长边大小。
"""
def __init__(self, long_size):
self.long_size = long_size
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
其中,im_info新增字段为:
-shape_before_resize (tuple): 保存resize之前图像的形状(h, w)。
"""
if im_info is None:
im_info = OrderedDict()
im_info.append(('resize', im.shape[:2]))
im = resize_long(im, self.long_size)
if label is not None:
label = resize_long(label, self.long_size, cv2.INTER_NEAREST)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class ResizeByShort(SegTransform):
"""根据图像的短边调整图像大小(resize)。
1. 获取图像的长边和短边长度。
2. 根据短边与short_size的比例,计算长边的目标长度,
此时高、宽的resize比例为short_size/原图短边长度。
3. 如果max_size>0,调整resize比例:
如果长边的目标长度>max_size,则高、宽的resize比例为max_size/原图长边长度。
4. 根据调整大小的比例对图像进行resize。
Args:
target_size (int): 短边目标长度。默认为800。
max_size (int): 长边目标长度的最大限制。默认为1333。
Raises:
TypeError: 形参数据类型不满足需求。
"""
def __init__(self, short_size=800, max_size=1333):
self.max_size = int(max_size)
if not isinstance(short_size, int):
raise TypeError(
"Type of short_size is invalid. Must be Integer, now is {}".
format(type(short_size)))
self.short_size = short_size
if not (isinstance(self.max_size, int)):
raise TypeError("max_size: input type is invalid.")
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (numnp.ndarraypy): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
其中,im_info更新字段为:
-shape_before_resize (tuple): 保存resize之前图像的形状(h, w)。
Raises:
TypeError: 形参数据类型不满足需求。
ValueError: 数据长度不匹配。
"""
if im_info is None:
im_info = OrderedDict()
if not isinstance(im, np.ndarray):
raise TypeError("ResizeByShort: image type is not numpy.")
if len(im.shape) != 3:
raise ValueError('ResizeByShort: image is not 3-dimensional.')
im_info.append(('resize', im.shape[:2]))
im_short_size = min(im.shape[0], im.shape[1])
im_long_size = max(im.shape[0], im.shape[1])
scale = float(self.short_size) / im_short_size
if self.max_size > 0 and np.round(scale *
im_long_size) > self.max_size:
scale = float(self.max_size) / float(im_long_size)
resized_width = int(round(im.shape[1] * scale))
resized_height = int(round(im.shape[0] * scale))
im = cv2.resize(
im, (resized_width, resized_height),
interpolation=cv2.INTER_NEAREST)
if im.ndim < 3:
im = np.expand_dims(im, axis=-1)
if label is not None:
im = cv2.resize(
label, (resized_width, resized_height),
interpolation=cv2.INTER_NEAREST)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class ResizeRangeScaling(SegTransform):
"""对图像长边随机resize到指定范围内,短边按比例进行缩放。当存在标注图像时,则同步进行处理。
Args:
min_value (int): 图像长边resize后的最小值。默认值400。
max_value (int): 图像长边resize后的最大值。默认值600。
Raises:
ValueError: min_value大于max_value
"""
def __init__(self, min_value=400, max_value=600):
if min_value > max_value:
raise ValueError('min_value must be less than max_value, '
'but they are {} and {}.'.format(min_value,
max_value))
self.min_value = min_value
self.max_value = max_value
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if self.min_value == self.max_value:
random_size = self.max_value
else:
random_size = int(
np.random.uniform(self.min_value, self.max_value) + 0.5)
im = resize_long(im, random_size, cv2.INTER_LINEAR)
if label is not None:
label = resize_long(label, random_size, cv2.INTER_NEAREST)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class ResizeStepScaling(SegTransform):
"""对图像按照某一个比例resize,这个比例以scale_step_size为步长
在[min_scale_factor, max_scale_factor]随机变动。当存在标注图像时,则同步进行处理。
Args:
min_scale_factor(float), resize最小尺度。默认值0.75。
max_scale_factor (float), resize最大尺度。默认值1.25。
scale_step_size (float), resize尺度范围间隔。默认值0.25。
Raises:
ValueError: min_scale_factor大于max_scale_factor
"""
def __init__(self,
min_scale_factor=0.75,
max_scale_factor=1.25,
scale_step_size=0.25):
if min_scale_factor > max_scale_factor:
raise ValueError(
'min_scale_factor must be less than max_scale_factor, '
'but they are {} and {}.'.format(min_scale_factor,
max_scale_factor))
self.min_scale_factor = min_scale_factor
self.max_scale_factor = max_scale_factor
self.scale_step_size = scale_step_size
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if self.min_scale_factor == self.max_scale_factor:
scale_factor = self.min_scale_factor
elif self.scale_step_size == 0:
scale_factor = np.random.uniform(self.min_scale_factor,
self.max_scale_factor)
else:
num_steps = int((self.max_scale_factor - self.min_scale_factor) /
self.scale_step_size + 1)
scale_factors = np.linspace(self.min_scale_factor,
self.max_scale_factor,
num_steps).tolist()
np.random.shuffle(scale_factors)
scale_factor = scale_factors[0]
im = cv2.resize(
im, (0, 0),
fx=scale_factor,
fy=scale_factor,
interpolation=cv2.INTER_LINEAR)
if im.ndim < 3:
im = np.expand_dims(im, axis=-1)
if label is not None:
label = cv2.resize(
label, (0, 0),
fx=scale_factor,
fy=scale_factor,
interpolation=cv2.INTER_NEAREST)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class Normalize(SegTransform):
"""对图像进行标准化。
1.像素值减去min_val
2.像素值除以(max_val-min_val)
3.对图像进行减均值除以标准差操作。
Args:
mean (list): 图像数据集的均值。默认值[0.5, 0.5, 0.5]。
std (list): 图像数据集的标准差。默认值[0.5, 0.5, 0.5]。
min_val (list): 图像数据集的最小值。默认值[0, 0, 0]。
max_val (list): 图像数据集的最大值。默认值[255.0, 255.0, 255.0]。
Raises:
ValueError: mean或std不是list对象。std包含0。
"""
def __init__(self,
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5],
min_val=[0, 0, 0],
max_val=[255.0, 255.0, 255.0]):
self.min_val = min_val
self.max_val = max_val
self.mean = mean
self.std = std
if not (isinstance(self.mean, list) and isinstance(self.std, list)):
raise ValueError("{}: input type is invalid.".format(self))
if not (isinstance(self.min_val, list) and isinstance(self.max_val,
list)):
raise ValueError("{}: input type is invalid.".format(self))
from functools import reduce
if reduce(lambda x, y: x * y, self.std) == 0:
raise ValueError('{}: std is invalid!'.format(self))
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
im = normalize(im, mean, std, self.min_val, self.max_val)
im = im.astype('float32')
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class Padding(SegTransform):
"""对图像或标注图像进行padding,padding方向为右和下。
根据提供的值对图像或标注图像进行padding操作。
Args:
target_size (int|list|tuple): padding后图像的大小。
im_padding_value (list): 图像padding的值。默认为[127.5, 127.5, 127.5]。
label_padding_value (int): 标注图像padding的值。默认值为255。
Raises:
TypeError: target_size不是int|list|tuple。
ValueError: target_size为list|tuple时元素个数不等于2。
"""
def __init__(self,
target_size,
im_padding_value=[127.5, 127.5, 127.5],
label_padding_value=255):
if isinstance(target_size, list) or isinstance(target_size, tuple):
if len(target_size) != 2:
raise ValueError(
'when target is list or tuple, it should include 2 elements, but it is {}'
.format(target_size))
elif not isinstance(target_size, int):
raise TypeError(
"Type of target_size is invalid. Must be Integer or List or tuple, now is {}"
.format(type(target_size)))
self.target_size = target_size
self.im_padding_value = im_padding_value
self.label_padding_value = label_padding_value
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
其中,im_info新增字段为:
-shape_before_padding (tuple): 保存padding之前图像的形状(h, w)。
Raises:
ValueError: 输入图像im或label的形状大于目标值
"""
if im_info is None:
im_info = OrderedDict()
im_info.append(('padding', im.shape[:2]))
im_height, im_width = im.shape[0], im.shape[1]
if isinstance(self.target_size, int):
target_height = self.target_size
target_width = self.target_size
else:
target_height = self.target_size[1]
target_width = self.target_size[0]
pad_height = target_height - im_height
pad_width = target_width - im_width
pad_height = max(pad_height, 0)
pad_width = max(pad_width, 0)
if (pad_height > 0 or pad_width > 0):
im_channel = im.shape[2]
import copy
orig_im = copy.deepcopy(im)
im = np.zeros((im_height + pad_height, im_width + pad_width,
im_channel)).astype(orig_im.dtype)
for i in range(im_channel):
im[:, :, i] = np.pad(
orig_im[:, :, i],
pad_width=((0, pad_height), (0, pad_width)),
mode='constant',
constant_values=(self.im_padding_value[i],
self.im_padding_value[i]))
if label is not None:
label = np.pad(label,
pad_width=((0, pad_height), (0, pad_width)),
mode='constant',
constant_values=(self.label_padding_value,
self.label_padding_value))
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class RandomPaddingCrop(SegTransform):
"""对图像和标注图进行随机裁剪,当所需要的裁剪尺寸大于原图时,则进行padding操作。
Args:
crop_size (int|list|tuple): 裁剪图像大小。默认为512。
im_padding_value (list): 图像padding的值。默认为[127.5, 127.5, 127.5]。
label_padding_value (int): 标注图像padding的值。默认值为255。
Raises:
TypeError: crop_size不是int/list/tuple。
ValueError: target_size为list/tuple时元素个数不等于2。
"""
def __init__(self,
crop_size=512,
im_padding_value=[127.5, 127.5, 127.5],
label_padding_value=255):
if isinstance(crop_size, list) or isinstance(crop_size, tuple):
if len(crop_size) != 2:
raise ValueError(
'when crop_size is list or tuple, it should include 2 elements, but it is {}'
.format(crop_size))
elif not isinstance(crop_size, int):
raise TypeError(
"Type of crop_size is invalid. Must be Integer or List or tuple, now is {}"
.format(type(crop_size)))
self.crop_size = crop_size
self.im_padding_value = im_padding_value
self.label_padding_value = label_padding_value
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if isinstance(self.crop_size, int):
crop_width = self.crop_size
crop_height = self.crop_size
else:
crop_width = self.crop_size[0]
crop_height = self.crop_size[1]
img_height = im.shape[0]
img_width = im.shape[1]
if img_height == crop_height and img_width == crop_width:
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
else:
pad_height = max(crop_height - img_height, 0)
pad_width = max(crop_width - img_width, 0)
if (pad_height > 0 or pad_width > 0):
img_channel = im.shape[2]
import copy
orig_im = copy.deepcopy(im)
im = np.zeros((img_height + pad_height, img_width + pad_width,
img_channel)).astype(orig_im.dtype)
for i in range(img_channel):
im[:, :, i] = np.pad(
orig_im[:, :, i],
pad_width=((0, pad_height), (0, pad_width)),
mode='constant',
constant_values=(self.im_padding_value[i],
self.im_padding_value[i]))
if label is not None:
label = np.pad(label,
pad_width=((0, pad_height), (0, pad_width)),
mode='constant',
constant_values=(self.label_padding_value,
self.label_padding_value))
img_height = im.shape[0]
img_width = im.shape[1]
if crop_height > 0 and crop_width > 0:
h_off = np.random.randint(img_height - crop_height + 1)
w_off = np.random.randint(img_width - crop_width + 1)
im = im[h_off:(crop_height + h_off), w_off:(w_off + crop_width
), :]
if label is not None:
label = label[h_off:(crop_height + h_off), w_off:(
w_off + crop_width)]
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class RandomBlur(SegTransform):
"""以一定的概率对图像进行高斯模糊。
Args:
prob (float): 图像模糊概率。默认为0.1。
"""
def __init__(self, prob=0.1):
self.prob = prob
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if self.prob <= 0:
n = 0
elif self.prob >= 1:
n = 1
else:
n = int(1.0 / self.prob)
if n > 0:
if np.random.randint(0, n) == 0:
radius = np.random.randint(3, 10)
if radius % 2 != 1:
radius = radius + 1
if radius > 9:
radius = 9
im = cv2.GaussianBlur(im, (radius, radius), 0, 0)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class RandomRotate(SegTransform):
"""对图像进行随机旋转, 模型训练时的数据增强操作。
在旋转区间[-rotate_range, rotate_range]内,对图像进行随机旋转,当存在标注图像时,同步进行,
并对旋转后的图像和标注图像进行相应的padding。
Args:
rotate_range (float): 最大旋转角度。默认为15度。
im_padding_value (list): 图像padding的值。默认为[127.5, 127.5, 127.5]。
label_padding_value (int): 标注图像padding的值。默认为255。
"""
def __init__(self,
rotate_range=15,
im_padding_value=[127.5, 127.5, 127.5],
label_padding_value=255):
self.rotate_range = rotate_range
self.im_padding_value = im_padding_value
self.label_padding_value = label_padding_value
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if self.rotate_range > 0:
h, w, c = im.shape
do_rotation = np.random.uniform(-self.rotate_range,
self.rotate_range)
pc = (w // 2, h // 2)
r = cv2.getRotationMatrix2D(pc, do_rotation, 1.0)
cos = np.abs(r[0, 0])
sin = np.abs(r[0, 1])
nw = int((h * sin) + (w * cos))
nh = int((h * cos) + (w * sin))
(cx, cy) = pc
r[0, 2] += (nw / 2) - cx
r[1, 2] += (nh / 2) - cy
dsize = (nw, nh)
rot_ims = list()
for i in range(0, c, 3):
ori_im = im[:, :, i:i + 3]
rot_im = cv2.warpAffine(
ori_im,
r,
dsize=dsize,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=self.im_padding_value[i:i + 3])
rot_ims.append(rot_im)
im = np.concatenate(rot_ims, axis=-1)
label = cv2.warpAffine(
label,
r,
dsize=dsize,
flags=cv2.INTER_NEAREST,
borderMode=cv2.BORDER_CONSTANT,
borderValue=self.label_padding_value)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class RandomScaleAspect(SegTransform):
"""裁剪并resize回原始尺寸的图像和标注图像。
按照一定的面积比和宽高比对图像进行裁剪,并reszie回原始图像的图像,当存在标注图时,同步进行。
Args:
min_scale (float):裁取图像占原始图像的面积比,取值[0,1],为0时则返回原图。默认为0.5。
aspect_ratio (float): 裁取图像的宽高比范围,非负值,为0时返回原图。默认为0.33。
"""
def __init__(self, min_scale=0.5, aspect_ratio=0.33):
self.min_scale = min_scale
self.aspect_ratio = aspect_ratio
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if self.min_scale != 0 and self.aspect_ratio != 0:
img_height = im.shape[0]
img_width = im.shape[1]
for i in range(0, 10):
area = img_height * img_width
target_area = area * np.random.uniform(self.min_scale, 1.0)
aspectRatio = np.random.uniform(self.aspect_ratio,
1.0 / self.aspect_ratio)
dw = int(np.sqrt(target_area * 1.0 * aspectRatio))
dh = int(np.sqrt(target_area * 1.0 / aspectRatio))
if (np.random.randint(10) < 5):
tmp = dw
dw = dh
dh = tmp
if (dh < img_height and dw < img_width):
h1 = np.random.randint(0, img_height - dh)
w1 = np.random.randint(0, img_width - dw)
im = im[h1:(h1 + dh), w1:(w1 + dw), :]
label = label[h1:(h1 + dh), w1:(w1 + dw)]
im = cv2.resize(
im, (img_width, img_height),
interpolation=cv2.INTER_LINEAR)
if im.ndim < 3:
im = np.expand_dims(im, axis=-1)
label = cv2.resize(
label, (img_width, img_height),
interpolation=cv2.INTER_NEAREST)
break
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class RandomDistort(SegTransform):
"""对图像进行随机失真。
1. 对变换的操作顺序进行随机化操作。
2. 按照1中的顺序以一定的概率对图像进行随机像素内容变换。
Args:
brightness_range (float): 明亮度因子的范围。默认为0.5。
brightness_prob (float): 随机调整明亮度的概率。默认为0.5。
contrast_range (float): 对比度因子的范围。默认为0.5。
contrast_prob (float): 随机调整对比度的概率。默认为0.5。
saturation_range (float): 饱和度因子的范围。默认为0.5。
saturation_prob (float): 随机调整饱和度的概率。默认为0.5。
hue_range (int): 色调因子的范围。默认为18。
hue_prob (float): 随机调整色调的概率。默认为0.5。
"""
def __init__(self,
brightness_range=0.5,
brightness_prob=0.5,
contrast_range=0.5,
contrast_prob=0.5,
saturation_range=0.5,
saturation_prob=0.5,
hue_range=18,
hue_prob=0.5):
self.brightness_range = brightness_range
self.brightness_prob = brightness_prob
self.contrast_range = contrast_range
self.contrast_prob = contrast_prob
self.saturation_range = saturation_range
self.saturation_prob = saturation_prob
self.hue_range = hue_range
self.hue_prob = hue_prob
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
brightness_lower = 1 - self.brightness_range
brightness_upper = 1 + self.brightness_range
contrast_lower = 1 - self.contrast_range
contrast_upper = 1 + self.contrast_range
saturation_lower = 1 - self.saturation_range
saturation_upper = 1 + self.saturation_range
hue_lower = -self.hue_range
hue_upper = self.hue_range
ops = [brightness, contrast, saturation, hue]
random.shuffle(ops)
params_dict = {
'brightness': {
'brightness_lower': brightness_lower,
'brightness_upper': brightness_upper
},
'contrast': {
'contrast_lower': contrast_lower,
'contrast_upper': contrast_upper
},
'saturation': {
'saturation_lower': saturation_lower,
'saturation_upper': saturation_upper
},
'hue': {
'hue_lower': hue_lower,
'hue_upper': hue_upper
}
}
prob_dict = {
'brightness': self.brightness_prob,
'contrast': self.contrast_prob,
'saturation': self.saturation_prob,
'hue': self.hue_prob
}
dis_ims = list()
h, w, c = im.shape
for i in range(0, c, 3):
ori_im = im[:, :, i:i + 3]
for id in range(4):
params = params_dict[ops[id].__name__]
prob = prob_dict[ops[id].__name__]
params['im'] = ori_im
if np.random.uniform(0, 1) < prob:
ori_im = ops[id](**params)
dis_ims.append(ori_im)
im = np.concatenate(dis_ims, axis=-1)
im = im.astype('float32')
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class Clip(SegTransform):
"""
对图像上超出一定范围的数据进行截断。
Args:
min_val (list): 裁剪的下限,小于min_val的数值均设为min_val. 默认值0.
max_val (list): 裁剪的上限,大于max_val的数值均设为max_val. 默认值255.0.
"""
def __init__(self, min_val=[0, 0, 0], max_val=[255.0, 255.0, 255.0]):
self.min_val = min_val
self.max_val = max_val
if not (isinstance(self.min_val, list) and isinstance(self.max_val,
list)):
raise ValueError("{}: input type is invalid.".format(self))
def __call__(self, im, im_info=None, label=None):
for k in range(im.shape[2]):
np.clip(
im[:, :, k], self.min_val[k], self.max_val[k], out=im[:, :, k])
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class ArrangeSegmenter(SegTransform):
"""获取训练/验证/预测所需的信息。
Args:
mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
Raises:
ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内
"""
def __init__(self, mode):
if mode not in ['train', 'eval', 'test', 'quant']:
raise ValueError(
"mode should be defined as one of ['train', 'eval', 'test', 'quant']!"
)
self.mode = mode
def __call__(self, im, im_info, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当mode为'train'或'eval'时,返回的tuple为(im, label),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当mode为'test'时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;当mode为
'quant'时,返回的tuple为(im,),为图像np.ndarray数据。
"""
im = permute(im, False)
if self.mode == 'train':
label = label[np.newaxis, :, :]
return (im, label)
if self.mode == 'eval':
label = label[np.newaxis, :, :]
return (im, im_info, label)
elif self.mode == 'test':
return (im, im_info)
else:
return (im, )
class ComposedSegTransforms(Compose):
""" 语义分割模型(UNet/DeepLabv3p)的图像处理流程,具体如下
训练阶段:
1. 随机对图像以0.5的概率水平翻转,若random_horizontal_flip为False,则跳过此步骤
2. 按不同的比例随机Resize原图, 处理方式参考[paddlex.seg.transforms.ResizeRangeScaling](#resizerangescaling)。若min_max_size为None,则跳过此步骤
3. 从原图中随机crop出大小为train_crop_size大小的子图,如若crop出来的图小于train_crop_size,则会将图padding到对应大小
4. 图像归一化
预测阶段:
1. 将图像的最长边resize至(min_max_size[0] + min_max_size[1])//2, 短边按比例resize。若min_max_size为None,则跳过此步骤
2. 图像归一化
Args:
mode(str): Transforms所处的阶段,包括`train', 'eval'或'test'
min_max_size(list): 用于对图像进行resize,具体作用参见上述步骤。
train_crop_size(list): 训练过程中随机裁剪原图用于训练,具体作用参见上述步骤。此参数仅在mode为`train`时生效。
mean(list): 图像均值, 默认为[0.485, 0.456, 0.406]。
std(list): 图像方差,默认为[0.229, 0.224, 0.225]。
random_horizontal_flip(bool): 数据增强,是否随机水平翻转图像,此参数仅在mode为`train`时生效。
"""
def __init__(self,
mode,
min_max_size=[400, 600],
train_crop_size=[512, 512],
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5],
random_horizontal_flip=True):
if mode == 'train':
# 训练时的transforms,包含数据增强
if min_max_size is None:
transforms = [
RandomPaddingCrop(crop_size=train_crop_size), Normalize(
mean=mean, std=std)
]
else:
transforms = [
ResizeRangeScaling(
min_value=min(min_max_size),
max_value=max(min_max_size)),
RandomPaddingCrop(crop_size=train_crop_size), Normalize(
mean=mean, std=std)
]
if random_horizontal_flip:
transforms.insert(0, RandomHorizontalFlip())
else:
# 验证/预测时的transforms
if min_max_size is None:
transforms = [Normalize(mean=mean, std=std)]
else:
long_size = (min(min_max_size) + max(min_max_size)) // 2
transforms = [
ResizeByLong(long_size=long_size), Normalize(
mean=mean, std=std)
]
super(ComposedSegTransforms, self).__init__(transforms)
|
# coding: utf8
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .ops import *
from .imgaug_support import execute_imgaug
import random
import os.path as osp
import numpy as np
from PIL import Image
import cv2
import imghdr
import six
import sys
from collections import OrderedDict
import paddlex.utils.logging as logging
class SegTransform:
""" 分割transform基类
"""
def __init__(self):
pass
class Compose(SegTransform):
"""根据数据预处理/增强算子对输入数据进行操作。
所有操作的输入图像流形状均是[H, W, C],其中H为图像高,W为图像宽,C为图像通道数。
Args:
transforms (list): 数据预处理/增强算子。
Raises:
TypeError: transforms不是list对象
ValueError: transforms元素个数小于1。
"""
def __init__(self, transforms):
if not isinstance(transforms, list):
raise TypeError('The transforms must be a list!')
if len(transforms) < 1:
raise ValueError('The length of transforms ' + \
'must be equal or larger than 1!')
self.transforms = transforms
self.batch_transforms = None
self.to_rgb = False
# 检查transforms里面的操作,目前支持PaddleX定义的或者是imgaug操作
for op in self.transforms:
if not isinstance(op, SegTransform):
import imgaug.augmenters as iaa
if not isinstance(op, iaa.Augmenter):
raise Exception(
"Elements in transforms should be defined in 'paddlex.seg.transforms' or class of imgaug.augmenters.Augmenter, see docs here: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/"
)
@staticmethod
def read_img(img_path, input_channel=3):
img_format = imghdr.what(img_path)
name, ext = osp.splitext(img_path)
if img_format == 'tiff' or ext == '.img':
try:
import gdal
except:
six.reraise(*sys.exc_info())
raise Exception(
"Please refer to https://github.com/PaddlePaddle/PaddleX/tree/develop/examples/multi-channel_remote_sensing/README.md to install gdal"
)
dataset = gdal.Open(img_path)
if dataset == None:
raise Exception('Can not open', img_path)
im_data = dataset.ReadAsArray()
return im_data.transpose((1, 2, 0))
elif img_format in ['jpeg', 'bmp', 'png']:
if input_channel == 3:
return cv2.imread(img_path)
else:
return cv2.imread(im_file, cv2.IMREAD_UNCHANGED)
elif ext == '.npy':
return np.load(img_path)
else:
raise Exception('Image format {} is not supported!'.format(ext))
@staticmethod
def decode_image(im_path, label, input_channel=3):
if isinstance(im_path, np.ndarray):
if len(im_path.shape) != 3:
raise Exception(
"im should be 3-dimensions, but now is {}-dimensions".
format(len(im_path.shape)))
im = im_path
else:
try:
im = Compose.read_img(im_path, input_channel).astype('float32')
except:
raise ValueError('Can\'t read The image file {}!'.format(
im_path))
im = im.astype('float32')
if label is not None:
if isinstance(label, np.ndarray):
if len(label.shape) != 2:
raise Exception(
"label should be 2-dimensions, but now is {}-dimensions".
format(len(label.shape)))
else:
try:
label = np.asarray(Image.open(label))
except:
ValueError('Can\'t read The label file {}!'.format(label))
im_height, im_width, _ = im.shape
label_height, label_width = label.shape
if im_height != label_height or im_width != label_width:
raise Exception(
"The height or width of the image is not same as the label")
return (im, label)
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (str/np.ndarray): 图像路径/图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (str/np.ndarray): 标注图像路径/标注图像np.ndarray数据。
Returns:
tuple: 根据网络所需字段所组成的tuple;字段由transforms中的最后一个数据预处理操作决定。
"""
input_channel = getattr(self, 'input_channel', 3)
im, label = self.decode_image(im, label, input_channel)
if self.to_rgb and input_channel == 3:
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if im_info is None:
im_info = [('origin_shape', im.shape[0:2])]
if label is not None:
origin_label = label.copy()
for op in self.transforms:
if isinstance(op, SegTransform):
outputs = op(im, im_info, label)
im = outputs[0]
if len(outputs) >= 2:
im_info = outputs[1]
if len(outputs) == 3:
label = outputs[2]
else:
im = execute_imgaug(op, im)
if label is not None:
outputs = (im, im_info, label)
else:
outputs = (im, im_info)
if self.transforms[-1].__class__.__name__ == 'ArrangeSegmenter':
if self.transforms[-1].mode == 'eval':
if label is not None:
outputs = (im, im_info, origin_label)
return outputs
def add_augmenters(self, augmenters):
if not isinstance(augmenters, list):
raise Exception(
"augmenters should be list type in func add_augmenters()")
transform_names = [type(x).__name__ for x in self.transforms]
for aug in augmenters:
if type(aug).__name__ in transform_names:
logging.error(
"{} is already in ComposedTransforms, need to remove it from add_augmenters().".
format(type(aug).__name__))
self.transforms = augmenters + self.transforms
class RandomHorizontalFlip(SegTransform):
"""以一定的概率对图像进行水平翻转。当存在标注图像时,则同步进行翻转。
Args:
prob (float): 随机水平翻转的概率。默认值为0.5。
"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if random.random() < self.prob:
im = horizontal_flip(im)
if label is not None:
label = horizontal_flip(label)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class RandomVerticalFlip(SegTransform):
"""以一定的概率对图像进行垂直翻转。当存在标注图像时,则同步进行翻转。
Args:
prob (float): 随机垂直翻转的概率。默认值为0.1。
"""
def __init__(self, prob=0.1):
self.prob = prob
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if random.random() < self.prob:
im = vertical_flip(im)
if label is not None:
label = vertical_flip(label)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class Resize(SegTransform):
"""调整图像大小(resize),当存在标注图像时,则同步进行处理。
- 当目标大小(target_size)类型为int时,根据插值方式,
将图像resize为[target_size, target_size]。
- 当目标大小(target_size)类型为list或tuple时,根据插值方式,
将图像resize为target_size, target_size的输入应为[w, h]或(w, h)。
Args:
target_size (int|list|tuple): 目标大小。
interp (str): resize的插值方式,与opencv的插值方式对应,
可选的值为['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4'],默认为"LINEAR"。
Raises:
TypeError: target_size不是int/list/tuple。
ValueError: target_size为list/tuple时元素个数不等于2。
AssertionError: interp的取值不在['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4']之内。
"""
# The interpolation mode
interp_dict = {
'NEAREST': cv2.INTER_NEAREST,
'LINEAR': cv2.INTER_LINEAR,
'CUBIC': cv2.INTER_CUBIC,
'AREA': cv2.INTER_AREA,
'LANCZOS4': cv2.INTER_LANCZOS4
}
def __init__(self, target_size, interp='LINEAR'):
self.interp = interp
assert interp in self.interp_dict, "interp should be one of {}".format(
interp_dict.keys())
if isinstance(target_size, list) or isinstance(target_size, tuple):
if len(target_size) != 2:
raise ValueError(
'when target is list or tuple, it should include 2 elements, but it is {}'
.format(target_size))
elif not isinstance(target_size, int):
raise TypeError(
"Type of target_size is invalid. Must be Integer or List or tuple, now is {}"
.format(type(target_size)))
self.target_size = target_size
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
其中,im_info跟新字段为:
-shape_before_resize (tuple): 保存resize之前图像的形状(h, w)。
Raises:
ZeroDivisionError: im的短边为0。
TypeError: im不是np.ndarray数据。
ValueError: im不是3维nd.ndarray。
"""
if im_info is None:
im_info = OrderedDict()
im_info.append(('resize', im.shape[:2]))
if not isinstance(im, np.ndarray):
raise TypeError("ResizeImage: image type is not np.ndarray.")
if len(im.shape) != 3:
raise ValueError('ResizeImage: image is not 3-dimensional.')
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
if float(im_size_min) == 0:
raise ZeroDivisionError('ResizeImage: min size of image is 0')
if isinstance(self.target_size, int):
resize_w = self.target_size
resize_h = self.target_size
else:
resize_w = self.target_size[0]
resize_h = self.target_size[1]
im_scale_x = float(resize_w) / float(im_shape[1])
im_scale_y = float(resize_h) / float(im_shape[0])
im = cv2.resize(
im,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp_dict[self.interp])
if im.ndim < 3:
im = np.expand_dims(im, axis=-1)
if label is not None:
label = cv2.resize(
label,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp_dict['NEAREST'])
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class ResizeByLong(SegTransform):
"""对图像长边resize到固定值,短边按比例进行缩放。当存在标注图像时,则同步进行处理。
Args:
long_size (int): resize后图像的长边大小。
"""
def __init__(self, long_size):
self.long_size = long_size
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
其中,im_info新增字段为:
-shape_before_resize (tuple): 保存resize之前图像的形状(h, w)。
"""
if im_info is None:
im_info = OrderedDict()
im_info.append(('resize', im.shape[:2]))
im = resize_long(im, self.long_size)
if label is not None:
label = resize_long(label, self.long_size, cv2.INTER_NEAREST)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class ResizeByShort(SegTransform):
"""根据图像的短边调整图像大小(resize)。
1. 获取图像的长边和短边长度。
2. 根据短边与short_size的比例,计算长边的目标长度,
此时高、宽的resize比例为short_size/原图短边长度。
3. 如果max_size>0,调整resize比例:
如果长边的目标长度>max_size,则高、宽的resize比例为max_size/原图长边长度。
4. 根据调整大小的比例对图像进行resize。
Args:
target_size (int): 短边目标长度。默认为800。
max_size (int): 长边目标长度的最大限制。默认为1333。
Raises:
TypeError: 形参数据类型不满足需求。
"""
def __init__(self, short_size=800, max_size=1333):
self.max_size = int(max_size)
if not isinstance(short_size, int):
raise TypeError(
"Type of short_size is invalid. Must be Integer, now is {}".
format(type(short_size)))
self.short_size = short_size
if not (isinstance(self.max_size, int)):
raise TypeError("max_size: input type is invalid.")
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (numnp.ndarraypy): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
其中,im_info更新字段为:
-shape_before_resize (tuple): 保存resize之前图像的形状(h, w)。
Raises:
TypeError: 形参数据类型不满足需求。
ValueError: 数据长度不匹配。
"""
if im_info is None:
im_info = OrderedDict()
if not isinstance(im, np.ndarray):
raise TypeError("ResizeByShort: image type is not numpy.")
if len(im.shape) != 3:
raise ValueError('ResizeByShort: image is not 3-dimensional.')
im_info.append(('resize', im.shape[:2]))
im_short_size = min(im.shape[0], im.shape[1])
im_long_size = max(im.shape[0], im.shape[1])
scale = float(self.short_size) / im_short_size
if self.max_size > 0 and np.round(scale *
im_long_size) > self.max_size:
scale = float(self.max_size) / float(im_long_size)
resized_width = int(round(im.shape[1] * scale))
resized_height = int(round(im.shape[0] * scale))
im = cv2.resize(
im, (resized_width, resized_height),
interpolation=cv2.INTER_NEAREST)
if im.ndim < 3:
im = np.expand_dims(im, axis=-1)
if label is not None:
im = cv2.resize(
label, (resized_width, resized_height),
interpolation=cv2.INTER_NEAREST)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class ResizeRangeScaling(SegTransform):
"""对图像长边随机resize到指定范围内,短边按比例进行缩放。当存在标注图像时,则同步进行处理。
Args:
min_value (int): 图像长边resize后的最小值。默认值400。
max_value (int): 图像长边resize后的最大值。默认值600。
Raises:
ValueError: min_value大于max_value
"""
def __init__(self, min_value=400, max_value=600):
if min_value > max_value:
raise ValueError('min_value must be less than max_value, '
'but they are {} and {}.'.format(min_value,
max_value))
self.min_value = min_value
self.max_value = max_value
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if self.min_value == self.max_value:
random_size = self.max_value
else:
random_size = int(
np.random.uniform(self.min_value, self.max_value) + 0.5)
im = resize_long(im, random_size, cv2.INTER_LINEAR)
if label is not None:
label = resize_long(label, random_size, cv2.INTER_NEAREST)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class ResizeStepScaling(SegTransform):
"""对图像按照某一个比例resize,这个比例以scale_step_size为步长
在[min_scale_factor, max_scale_factor]随机变动。当存在标注图像时,则同步进行处理。
Args:
min_scale_factor(float), resize最小尺度。默认值0.75。
max_scale_factor (float), resize最大尺度。默认值1.25。
scale_step_size (float), resize尺度范围间隔。默认值0.25。
Raises:
ValueError: min_scale_factor大于max_scale_factor
"""
def __init__(self,
min_scale_factor=0.75,
max_scale_factor=1.25,
scale_step_size=0.25):
if min_scale_factor > max_scale_factor:
raise ValueError(
'min_scale_factor must be less than max_scale_factor, '
'but they are {} and {}.'.format(min_scale_factor,
max_scale_factor))
self.min_scale_factor = min_scale_factor
self.max_scale_factor = max_scale_factor
self.scale_step_size = scale_step_size
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if self.min_scale_factor == self.max_scale_factor:
scale_factor = self.min_scale_factor
elif self.scale_step_size == 0:
scale_factor = np.random.uniform(self.min_scale_factor,
self.max_scale_factor)
else:
num_steps = int((self.max_scale_factor - self.min_scale_factor) /
self.scale_step_size + 1)
scale_factors = np.linspace(self.min_scale_factor,
self.max_scale_factor,
num_steps).tolist()
np.random.shuffle(scale_factors)
scale_factor = scale_factors[0]
im = cv2.resize(
im, (0, 0),
fx=scale_factor,
fy=scale_factor,
interpolation=cv2.INTER_LINEAR)
if im.ndim < 3:
im = np.expand_dims(im, axis=-1)
if label is not None:
label = cv2.resize(
label, (0, 0),
fx=scale_factor,
fy=scale_factor,
interpolation=cv2.INTER_NEAREST)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class Normalize(SegTransform):
"""对图像进行标准化。
1.像素值减去min_val
2.像素值除以(max_val-min_val)
3.对图像进行减均值除以标准差操作。
Args:
mean (list): 图像数据集的均值。默认值[0.5, 0.5, 0.5]。
std (list): 图像数据集的标准差。默认值[0.5, 0.5, 0.5]。
min_val (list): 图像数据集的最小值。默认值[0, 0, 0]。
max_val (list): 图像数据集的最大值。默认值[255.0, 255.0, 255.0]。
Raises:
ValueError: mean或std不是list对象。std包含0。
"""
def __init__(self,
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5],
min_val=[0, 0, 0],
max_val=[255.0, 255.0, 255.0]):
self.min_val = min_val
self.max_val = max_val
self.mean = mean
self.std = std
if not (isinstance(self.mean, list) and isinstance(self.std, list)):
raise ValueError("{}: input type is invalid.".format(self))
if not (isinstance(self.min_val, list) and isinstance(self.max_val,
list)):
raise ValueError("{}: input type is invalid.".format(self))
from functools import reduce
if reduce(lambda x, y: x * y, self.std) == 0:
raise ValueError('{}: std is invalid!'.format(self))
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
im = normalize(im, mean, std, self.min_val, self.max_val)
im = im.astype('float32')
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class Padding(SegTransform):
"""对图像或标注图像进行padding,padding方向为右和下。
根据提供的值对图像或标注图像进行padding操作。
Args:
target_size (int|list|tuple): padding后图像的大小。
im_padding_value (list): 图像padding的值。默认为[127.5, 127.5, 127.5]。
label_padding_value (int): 标注图像padding的值。默认值为255。
Raises:
TypeError: target_size不是int|list|tuple。
ValueError: target_size为list|tuple时元素个数不等于2。
"""
def __init__(self,
target_size,
im_padding_value=[127.5, 127.5, 127.5],
label_padding_value=255):
if isinstance(target_size, list) or isinstance(target_size, tuple):
if len(target_size) != 2:
raise ValueError(
'when target is list or tuple, it should include 2 elements, but it is {}'
.format(target_size))
elif not isinstance(target_size, int):
raise TypeError(
"Type of target_size is invalid. Must be Integer or List or tuple, now is {}"
.format(type(target_size)))
self.target_size = target_size
self.im_padding_value = im_padding_value
self.label_padding_value = label_padding_value
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
其中,im_info新增字段为:
-shape_before_padding (tuple): 保存padding之前图像的形状(h, w)。
Raises:
ValueError: 输入图像im或label的形状大于目标值
"""
if im_info is None:
im_info = OrderedDict()
im_info.append(('padding', im.shape[:2]))
im_height, im_width = im.shape[0], im.shape[1]
if isinstance(self.target_size, int):
target_height = self.target_size
target_width = self.target_size
else:
target_height = self.target_size[1]
target_width = self.target_size[0]
pad_height = target_height - im_height
pad_width = target_width - im_width
pad_height = max(pad_height, 0)
pad_width = max(pad_width, 0)
if (pad_height > 0 or pad_width > 0):
im_channel = im.shape[2]
import copy
orig_im = copy.deepcopy(im)
im = np.zeros((im_height + pad_height, im_width + pad_width,
im_channel)).astype(orig_im.dtype)
for i in range(im_channel):
im[:, :, i] = np.pad(
orig_im[:, :, i],
pad_width=((0, pad_height), (0, pad_width)),
mode='constant',
constant_values=(self.im_padding_value[i],
self.im_padding_value[i]))
if label is not None:
label = np.pad(label,
pad_width=((0, pad_height), (0, pad_width)),
mode='constant',
constant_values=(self.label_padding_value,
self.label_padding_value))
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class RandomPaddingCrop(SegTransform):
"""对图像和标注图进行随机裁剪,当所需要的裁剪尺寸大于原图时,则进行padding操作。
Args:
crop_size (int|list|tuple): 裁剪图像大小。默认为512。
im_padding_value (list): 图像padding的值。默认为[127.5, 127.5, 127.5]。
label_padding_value (int): 标注图像padding的值。默认值为255。
Raises:
TypeError: crop_size不是int/list/tuple。
ValueError: target_size为list/tuple时元素个数不等于2。
"""
def __init__(self,
crop_size=512,
im_padding_value=[127.5, 127.5, 127.5],
label_padding_value=255):
if isinstance(crop_size, list) or isinstance(crop_size, tuple):
if len(crop_size) != 2:
raise ValueError(
'when crop_size is list or tuple, it should include 2 elements, but it is {}'
.format(crop_size))
elif not isinstance(crop_size, int):
raise TypeError(
"Type of crop_size is invalid. Must be Integer or List or tuple, now is {}"
.format(type(crop_size)))
self.crop_size = crop_size
self.im_padding_value = im_padding_value
self.label_padding_value = label_padding_value
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if isinstance(self.crop_size, int):
crop_width = self.crop_size
crop_height = self.crop_size
else:
crop_width = self.crop_size[0]
crop_height = self.crop_size[1]
img_height = im.shape[0]
img_width = im.shape[1]
if img_height == crop_height and img_width == crop_width:
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
else:
pad_height = max(crop_height - img_height, 0)
pad_width = max(crop_width - img_width, 0)
if (pad_height > 0 or pad_width > 0):
img_channel = im.shape[2]
import copy
orig_im = copy.deepcopy(im)
im = np.zeros((img_height + pad_height, img_width + pad_width,
img_channel)).astype(orig_im.dtype)
for i in range(img_channel):
im[:, :, i] = np.pad(
orig_im[:, :, i],
pad_width=((0, pad_height), (0, pad_width)),
mode='constant',
constant_values=(self.im_padding_value[i],
self.im_padding_value[i]))
if label is not None:
label = np.pad(label,
pad_width=((0, pad_height), (0, pad_width)),
mode='constant',
constant_values=(self.label_padding_value,
self.label_padding_value))
img_height = im.shape[0]
img_width = im.shape[1]
if crop_height > 0 and crop_width > 0:
h_off = np.random.randint(img_height - crop_height + 1)
w_off = np.random.randint(img_width - crop_width + 1)
im = im[h_off:(crop_height + h_off), w_off:(w_off + crop_width
), :]
if label is not None:
label = label[h_off:(crop_height + h_off), w_off:(
w_off + crop_width)]
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class RandomBlur(SegTransform):
"""以一定的概率对图像进行高斯模糊。
Args:
prob (float): 图像模糊概率。默认为0.1。
"""
def __init__(self, prob=0.1):
self.prob = prob
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if self.prob <= 0:
n = 0
elif self.prob >= 1:
n = 1
else:
n = int(1.0 / self.prob)
if n > 0:
if np.random.randint(0, n) == 0:
radius = np.random.randint(3, 10)
if radius % 2 != 1:
radius = radius + 1
if radius > 9:
radius = 9
im = cv2.GaussianBlur(im, (radius, radius), 0, 0)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class RandomRotate(SegTransform):
"""对图像进行随机旋转, 模型训练时的数据增强操作。
在旋转区间[-rotate_range, rotate_range]内,对图像进行随机旋转,当存在标注图像时,同步进行,
并对旋转后的图像和标注图像进行相应的padding。
Args:
rotate_range (float): 最大旋转角度。默认为15度。
im_padding_value (list): 图像padding的值。默认为[127.5, 127.5, 127.5]。
label_padding_value (int): 标注图像padding的值。默认为255。
"""
def __init__(self,
rotate_range=15,
im_padding_value=[127.5, 127.5, 127.5],
label_padding_value=255):
self.rotate_range = rotate_range
self.im_padding_value = im_padding_value
self.label_padding_value = label_padding_value
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if self.rotate_range > 0:
h, w, c = im.shape
do_rotation = np.random.uniform(-self.rotate_range,
self.rotate_range)
pc = (w // 2, h // 2)
r = cv2.getRotationMatrix2D(pc, do_rotation, 1.0)
cos = np.abs(r[0, 0])
sin = np.abs(r[0, 1])
nw = int((h * sin) + (w * cos))
nh = int((h * cos) + (w * sin))
(cx, cy) = pc
r[0, 2] += (nw / 2) - cx
r[1, 2] += (nh / 2) - cy
dsize = (nw, nh)
rot_ims = list()
for i in range(0, c, 3):
ori_im = im[:, :, i:i + 3]
rot_im = cv2.warpAffine(
ori_im,
r,
dsize=dsize,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=self.im_padding_value[i:i + 3])
rot_ims.append(rot_im)
im = np.concatenate(rot_ims, axis=-1)
label = cv2.warpAffine(
label,
r,
dsize=dsize,
flags=cv2.INTER_NEAREST,
borderMode=cv2.BORDER_CONSTANT,
borderValue=self.label_padding_value)
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class RandomScaleAspect(SegTransform):
"""裁剪并resize回原始尺寸的图像和标注图像。
按照一定的面积比和宽高比对图像进行裁剪,并reszie回原始图像的图像,当存在标注图时,同步进行。
Args:
min_scale (float):裁取图像占原始图像的面积比,取值[0,1],为0时则返回原图。默认为0.5。
aspect_ratio (float): 裁取图像的宽高比范围,非负值,为0时返回原图。默认为0.33。
"""
def __init__(self, min_scale=0.5, aspect_ratio=0.33):
self.min_scale = min_scale
self.aspect_ratio = aspect_ratio
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
if self.min_scale != 0 and self.aspect_ratio != 0:
img_height = im.shape[0]
img_width = im.shape[1]
for i in range(0, 10):
area = img_height * img_width
target_area = area * np.random.uniform(self.min_scale, 1.0)
aspectRatio = np.random.uniform(self.aspect_ratio,
1.0 / self.aspect_ratio)
dw = int(np.sqrt(target_area * 1.0 * aspectRatio))
dh = int(np.sqrt(target_area * 1.0 / aspectRatio))
if (np.random.randint(10) < 5):
tmp = dw
dw = dh
dh = tmp
if (dh < img_height and dw < img_width):
h1 = np.random.randint(0, img_height - dh)
w1 = np.random.randint(0, img_width - dw)
im = im[h1:(h1 + dh), w1:(w1 + dw), :]
label = label[h1:(h1 + dh), w1:(w1 + dw)]
im = cv2.resize(
im, (img_width, img_height),
interpolation=cv2.INTER_LINEAR)
if im.ndim < 3:
im = np.expand_dims(im, axis=-1)
label = cv2.resize(
label, (img_width, img_height),
interpolation=cv2.INTER_NEAREST)
break
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class RandomDistort(SegTransform):
"""对图像进行随机失真。
1. 对变换的操作顺序进行随机化操作。
2. 按照1中的顺序以一定的概率对图像进行随机像素内容变换。
Args:
brightness_range (float): 明亮度因子的范围。默认为0.5。
brightness_prob (float): 随机调整明亮度的概率。默认为0.5。
contrast_range (float): 对比度因子的范围。默认为0.5。
contrast_prob (float): 随机调整对比度的概率。默认为0.5。
saturation_range (float): 饱和度因子的范围。默认为0.5。
saturation_prob (float): 随机调整饱和度的概率。默认为0.5。
hue_range (int): 色调因子的范围。默认为18。
hue_prob (float): 随机调整色调的概率。默认为0.5。
"""
def __init__(self,
brightness_range=0.5,
brightness_prob=0.5,
contrast_range=0.5,
contrast_prob=0.5,
saturation_range=0.5,
saturation_prob=0.5,
hue_range=18,
hue_prob=0.5):
self.brightness_range = brightness_range
self.brightness_prob = brightness_prob
self.contrast_range = contrast_range
self.contrast_prob = contrast_prob
self.saturation_range = saturation_range
self.saturation_prob = saturation_prob
self.hue_range = hue_range
self.hue_prob = hue_prob
def __call__(self, im, im_info=None, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、
存储与图像相关信息的字典和标注图像np.ndarray数据。
"""
brightness_lower = 1 - self.brightness_range
brightness_upper = 1 + self.brightness_range
contrast_lower = 1 - self.contrast_range
contrast_upper = 1 + self.contrast_range
saturation_lower = 1 - self.saturation_range
saturation_upper = 1 + self.saturation_range
hue_lower = -self.hue_range
hue_upper = self.hue_range
ops = [brightness, contrast, saturation, hue]
random.shuffle(ops)
params_dict = {
'brightness': {
'brightness_lower': brightness_lower,
'brightness_upper': brightness_upper
},
'contrast': {
'contrast_lower': contrast_lower,
'contrast_upper': contrast_upper
},
'saturation': {
'saturation_lower': saturation_lower,
'saturation_upper': saturation_upper
},
'hue': {
'hue_lower': hue_lower,
'hue_upper': hue_upper
}
}
prob_dict = {
'brightness': self.brightness_prob,
'contrast': self.contrast_prob,
'saturation': self.saturation_prob,
'hue': self.hue_prob
}
dis_ims = list()
h, w, c = im.shape
for i in range(0, c, 3):
ori_im = im[:, :, i:i + 3]
for id in range(4):
params = params_dict[ops[id].__name__]
prob = prob_dict[ops[id].__name__]
params['im'] = ori_im
if np.random.uniform(0, 1) < prob:
ori_im = ops[id](**params)
dis_ims.append(ori_im)
im = np.concatenate(dis_ims, axis=-1)
im = im.astype('float32')
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class Clip(SegTransform):
"""
对图像上超出一定范围的数据进行截断。
Args:
min_val (list): 裁剪的下限,小于min_val的数值均设为min_val. 默认值0.
max_val (list): 裁剪的上限,大于max_val的数值均设为max_val. 默认值255.0.
"""
def __init__(self, min_val=[0, 0, 0], max_val=[255.0, 255.0, 255.0]):
self.min_val = min_val
self.max_val = max_val
if not (isinstance(self.min_val, list) and isinstance(self.max_val,
list)):
raise ValueError("{}: input type is invalid.".format(self))
def __call__(self, im, im_info=None, label=None):
for k in range(im.shape[2]):
np.clip(
im[:, :, k], self.min_val[k], self.max_val[k], out=im[:, :, k])
if label is None:
return (im, im_info)
else:
return (im, im_info, label)
class ArrangeSegmenter(SegTransform):
"""获取训练/验证/预测所需的信息。
Args:
mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
Raises:
ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内
"""
def __init__(self, mode):
if mode not in ['train', 'eval', 'test', 'quant']:
raise ValueError(
"mode should be defined as one of ['train', 'eval', 'test', 'quant']!"
)
self.mode = mode
def __call__(self, im, im_info, label=None):
"""
Args:
im (np.ndarray): 图像np.ndarray数据。
im_info (list): 存储图像reisze或padding前的shape信息,如
[('resize', [200, 300]), ('padding', [400, 600])]表示
图像在过resize前shape为(200, 300), 过padding前shape为
(400, 600)
label (np.ndarray): 标注图像np.ndarray数据。
Returns:
tuple: 当mode为'train'或'eval'时,返回的tuple为(im, label),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
当mode为'test'时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;当mode为
'quant'时,返回的tuple为(im,),为图像np.ndarray数据。
"""
im = permute(im, False)
if self.mode == 'train':
label = label[np.newaxis, :, :]
return (im, label)
if self.mode == 'eval':
label = label[np.newaxis, :, :]
return (im, im_info, label)
elif self.mode == 'test':
return (im, im_info)
else:
return (im, )
class ComposedSegTransforms(Compose):
""" 语义分割模型(UNet/DeepLabv3p)的图像处理流程,具体如下
训练阶段:
1. 随机对图像以0.5的概率水平翻转,若random_horizontal_flip为False,则跳过此步骤
2. 按不同的比例随机Resize原图, 处理方式参考[paddlex.seg.transforms.ResizeRangeScaling](#resizerangescaling)。若min_max_size为None,则跳过此步骤
3. 从原图中随机crop出大小为train_crop_size大小的子图,如若crop出来的图小于train_crop_size,则会将图padding到对应大小
4. 图像归一化
预测阶段:
1. 将图像的最长边resize至(min_max_size[0] + min_max_size[1])//2, 短边按比例resize。若min_max_size为None,则跳过此步骤
2. 图像归一化
Args:
mode(str): Transforms所处的阶段,包括`train', 'eval'或'test'
min_max_size(list): 用于对图像进行resize,具体作用参见上述步骤。
train_crop_size(list): 训练过程中随机裁剪原图用于训练,具体作用参见上述步骤。此参数仅在mode为`train`时生效。
mean(list): 图像均值, 默认为[0.485, 0.456, 0.406]。
std(list): 图像方差,默认为[0.229, 0.224, 0.225]。
random_horizontal_flip(bool): 数据增强,是否随机水平翻转图像,此参数仅在mode为`train`时生效。
"""
def __init__(self,
mode,
min_max_size=[400, 600],
train_crop_size=[512, 512],
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5],
random_horizontal_flip=True):
if mode == 'train':
# 训练时的transforms,包含数据增强
if min_max_size is None:
transforms = [
RandomPaddingCrop(crop_size=train_crop_size), Normalize(
mean=mean, std=std)
]
else:
transforms = [
ResizeRangeScaling(
min_value=min(min_max_size),
max_value=max(min_max_size)),
RandomPaddingCrop(crop_size=train_crop_size), Normalize(
mean=mean, std=std)
]
if random_horizontal_flip:
transforms.insert(0, RandomHorizontalFlip())
else:
# 验证/预测时的transforms
if min_max_size is None:
transforms = [Normalize(mean=mean, std=std)]
else:
long_size = (min(min_max_size) + max(min_max_size)) // 2
transforms = [
ResizeByLong(long_size=long_size), Normalize(
mean=mean, std=std)
]
super(ComposedSegTransforms, self).__init__(transforms)
|
zh
| 0.789983
|
# coding: utf8 # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. 分割transform基类 根据数据预处理/增强算子对输入数据进行操作。 所有操作的输入图像流形状均是[H, W, C],其中H为图像高,W为图像宽,C为图像通道数。 Args: transforms (list): 数据预处理/增强算子。 Raises: TypeError: transforms不是list对象 ValueError: transforms元素个数小于1。 # 检查transforms里面的操作,目前支持PaddleX定义的或者是imgaug操作 Args: im (str/np.ndarray): 图像路径/图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (str/np.ndarray): 标注图像路径/标注图像np.ndarray数据。 Returns: tuple: 根据网络所需字段所组成的tuple;字段由transforms中的最后一个数据预处理操作决定。 以一定的概率对图像进行水平翻转。当存在标注图像时,则同步进行翻转。 Args: prob (float): 随机水平翻转的概率。默认值为0.5。 Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 以一定的概率对图像进行垂直翻转。当存在标注图像时,则同步进行翻转。 Args: prob (float): 随机垂直翻转的概率。默认值为0.1。 Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 调整图像大小(resize),当存在标注图像时,则同步进行处理。 - 当目标大小(target_size)类型为int时,根据插值方式, 将图像resize为[target_size, target_size]。 - 当目标大小(target_size)类型为list或tuple时,根据插值方式, 将图像resize为target_size, target_size的输入应为[w, h]或(w, h)。 Args: target_size (int|list|tuple): 目标大小。 interp (str): resize的插值方式,与opencv的插值方式对应, 可选的值为['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4'],默认为"LINEAR"。 Raises: TypeError: target_size不是int/list/tuple。 ValueError: target_size为list/tuple时元素个数不等于2。 AssertionError: interp的取值不在['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4']之内。 # The interpolation mode Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 其中,im_info跟新字段为: -shape_before_resize (tuple): 保存resize之前图像的形状(h, w)。 Raises: ZeroDivisionError: im的短边为0。 TypeError: im不是np.ndarray数据。 ValueError: im不是3维nd.ndarray。 对图像长边resize到固定值,短边按比例进行缩放。当存在标注图像时,则同步进行处理。 Args: long_size (int): resize后图像的长边大小。 Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 其中,im_info新增字段为: -shape_before_resize (tuple): 保存resize之前图像的形状(h, w)。 根据图像的短边调整图像大小(resize)。 1. 获取图像的长边和短边长度。 2. 根据短边与short_size的比例,计算长边的目标长度, 此时高、宽的resize比例为short_size/原图短边长度。 3. 如果max_size>0,调整resize比例: 如果长边的目标长度>max_size,则高、宽的resize比例为max_size/原图长边长度。 4. 根据调整大小的比例对图像进行resize。 Args: target_size (int): 短边目标长度。默认为800。 max_size (int): 长边目标长度的最大限制。默认为1333。 Raises: TypeError: 形参数据类型不满足需求。 Args: im (numnp.ndarraypy): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 其中,im_info更新字段为: -shape_before_resize (tuple): 保存resize之前图像的形状(h, w)。 Raises: TypeError: 形参数据类型不满足需求。 ValueError: 数据长度不匹配。 对图像长边随机resize到指定范围内,短边按比例进行缩放。当存在标注图像时,则同步进行处理。 Args: min_value (int): 图像长边resize后的最小值。默认值400。 max_value (int): 图像长边resize后的最大值。默认值600。 Raises: ValueError: min_value大于max_value Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 对图像按照某一个比例resize,这个比例以scale_step_size为步长 在[min_scale_factor, max_scale_factor]随机变动。当存在标注图像时,则同步进行处理。 Args: min_scale_factor(float), resize最小尺度。默认值0.75。 max_scale_factor (float), resize最大尺度。默认值1.25。 scale_step_size (float), resize尺度范围间隔。默认值0.25。 Raises: ValueError: min_scale_factor大于max_scale_factor Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 对图像进行标准化。 1.像素值减去min_val 2.像素值除以(max_val-min_val) 3.对图像进行减均值除以标准差操作。 Args: mean (list): 图像数据集的均值。默认值[0.5, 0.5, 0.5]。 std (list): 图像数据集的标准差。默认值[0.5, 0.5, 0.5]。 min_val (list): 图像数据集的最小值。默认值[0, 0, 0]。 max_val (list): 图像数据集的最大值。默认值[255.0, 255.0, 255.0]。 Raises: ValueError: mean或std不是list对象。std包含0。 Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 对图像或标注图像进行padding,padding方向为右和下。 根据提供的值对图像或标注图像进行padding操作。 Args: target_size (int|list|tuple): padding后图像的大小。 im_padding_value (list): 图像padding的值。默认为[127.5, 127.5, 127.5]。 label_padding_value (int): 标注图像padding的值。默认值为255。 Raises: TypeError: target_size不是int|list|tuple。 ValueError: target_size为list|tuple时元素个数不等于2。 Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 其中,im_info新增字段为: -shape_before_padding (tuple): 保存padding之前图像的形状(h, w)。 Raises: ValueError: 输入图像im或label的形状大于目标值 对图像和标注图进行随机裁剪,当所需要的裁剪尺寸大于原图时,则进行padding操作。 Args: crop_size (int|list|tuple): 裁剪图像大小。默认为512。 im_padding_value (list): 图像padding的值。默认为[127.5, 127.5, 127.5]。 label_padding_value (int): 标注图像padding的值。默认值为255。 Raises: TypeError: crop_size不是int/list/tuple。 ValueError: target_size为list/tuple时元素个数不等于2。 Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 以一定的概率对图像进行高斯模糊。 Args: prob (float): 图像模糊概率。默认为0.1。 Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 对图像进行随机旋转, 模型训练时的数据增强操作。 在旋转区间[-rotate_range, rotate_range]内,对图像进行随机旋转,当存在标注图像时,同步进行, 并对旋转后的图像和标注图像进行相应的padding。 Args: rotate_range (float): 最大旋转角度。默认为15度。 im_padding_value (list): 图像padding的值。默认为[127.5, 127.5, 127.5]。 label_padding_value (int): 标注图像padding的值。默认为255。 Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 裁剪并resize回原始尺寸的图像和标注图像。 按照一定的面积比和宽高比对图像进行裁剪,并reszie回原始图像的图像,当存在标注图时,同步进行。 Args: min_scale (float):裁取图像占原始图像的面积比,取值[0,1],为0时则返回原图。默认为0.5。 aspect_ratio (float): 裁取图像的宽高比范围,非负值,为0时返回原图。默认为0.33。 Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 对图像进行随机失真。 1. 对变换的操作顺序进行随机化操作。 2. 按照1中的顺序以一定的概率对图像进行随机像素内容变换。 Args: brightness_range (float): 明亮度因子的范围。默认为0.5。 brightness_prob (float): 随机调整明亮度的概率。默认为0.5。 contrast_range (float): 对比度因子的范围。默认为0.5。 contrast_prob (float): 随机调整对比度的概率。默认为0.5。 saturation_range (float): 饱和度因子的范围。默认为0.5。 saturation_prob (float): 随机调整饱和度的概率。默认为0.5。 hue_range (int): 色调因子的范围。默认为18。 hue_prob (float): 随机调整色调的概率。默认为0.5。 Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当label为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当label不为空时,返回的tuple为(im, im_info, label),分别对应图像np.ndarray数据、 存储与图像相关信息的字典和标注图像np.ndarray数据。 对图像上超出一定范围的数据进行截断。 Args: min_val (list): 裁剪的下限,小于min_val的数值均设为min_val. 默认值0. max_val (list): 裁剪的上限,大于max_val的数值均设为max_val. 默认值255.0. 获取训练/验证/预测所需的信息。 Args: mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。 Raises: ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内 Args: im (np.ndarray): 图像np.ndarray数据。 im_info (list): 存储图像reisze或padding前的shape信息,如 [('resize', [200, 300]), ('padding', [400, 600])]表示 图像在过resize前shape为(200, 300), 过padding前shape为 (400, 600) label (np.ndarray): 标注图像np.ndarray数据。 Returns: tuple: 当mode为'train'或'eval'时,返回的tuple为(im, label),分别对应图像np.ndarray数据、存储与图像相关信息的字典; 当mode为'test'时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;当mode为 'quant'时,返回的tuple为(im,),为图像np.ndarray数据。 语义分割模型(UNet/DeepLabv3p)的图像处理流程,具体如下 训练阶段: 1. 随机对图像以0.5的概率水平翻转,若random_horizontal_flip为False,则跳过此步骤 2. 按不同的比例随机Resize原图, 处理方式参考[paddlex.seg.transforms.ResizeRangeScaling](#resizerangescaling)。若min_max_size为None,则跳过此步骤 3. 从原图中随机crop出大小为train_crop_size大小的子图,如若crop出来的图小于train_crop_size,则会将图padding到对应大小 4. 图像归一化 预测阶段: 1. 将图像的最长边resize至(min_max_size[0] + min_max_size[1])//2, 短边按比例resize。若min_max_size为None,则跳过此步骤 2. 图像归一化 Args: mode(str): Transforms所处的阶段,包括`train', 'eval'或'test' min_max_size(list): 用于对图像进行resize,具体作用参见上述步骤。 train_crop_size(list): 训练过程中随机裁剪原图用于训练,具体作用参见上述步骤。此参数仅在mode为`train`时生效。 mean(list): 图像均值, 默认为[0.485, 0.456, 0.406]。 std(list): 图像方差,默认为[0.229, 0.224, 0.225]。 random_horizontal_flip(bool): 数据增强,是否随机水平翻转图像,此参数仅在mode为`train`时生效。 # 训练时的transforms,包含数据增强 # 验证/预测时的transforms
| 2.107641
| 2
|
iseq_prof/__init__.py
|
EBI-Metagenomics/iseq-prof
| 0
|
6628857
|
<filename>iseq_prof/__init__.py
from importlib import import_module as _import_module
from . import fasta, filedb, pfam, plot, sam, solut_space
from ._accession import Accession
from ._cache import turn_cache_on
from ._cli import cli
from ._confusion import ConfusionMatrix
from ._genbank import GenBank, genbank_catalog
from ._organism_result import OrganismResult
from ._profiling import Profiling
from ._testit import test
try:
__version__ = getattr(_import_module("iseq_prof._version"), "version", "x.x.x")
except ModuleNotFoundError:
__version__ = "x.x.x"
__all__ = [
"Accession",
"ConfusionMatrix",
"GenBank",
"OrganismResult",
"Profiling",
"__version__",
"cli",
"fasta",
"filedb",
"genbank",
"genbank_catalog",
"pfam",
"plot",
"sam",
"solut_space",
"test",
"turn_cache_on",
]
|
<filename>iseq_prof/__init__.py
from importlib import import_module as _import_module
from . import fasta, filedb, pfam, plot, sam, solut_space
from ._accession import Accession
from ._cache import turn_cache_on
from ._cli import cli
from ._confusion import ConfusionMatrix
from ._genbank import GenBank, genbank_catalog
from ._organism_result import OrganismResult
from ._profiling import Profiling
from ._testit import test
try:
__version__ = getattr(_import_module("iseq_prof._version"), "version", "x.x.x")
except ModuleNotFoundError:
__version__ = "x.x.x"
__all__ = [
"Accession",
"ConfusionMatrix",
"GenBank",
"OrganismResult",
"Profiling",
"__version__",
"cli",
"fasta",
"filedb",
"genbank",
"genbank_catalog",
"pfam",
"plot",
"sam",
"solut_space",
"test",
"turn_cache_on",
]
|
none
| 1
| 1.579161
| 2
|
|
snowfakery/template_funcs.py
|
adamlincoln/Snowfakery
| 0
|
6628858
|
import random
from datetime import date, datetime
import dateutil.parser
from ast import literal_eval
from typing import Callable, Any, Optional, Union, List, Tuple
from faker import Faker
from .data_gen_exceptions import DataGenError
import snowfakery.data_generator_runtime # noqa
RuntimeContext = "snowfakery.data_generator_runtime.RuntimeContext"
FieldDefinition = "snowfakery.data_generator_runtime_dom.FieldDefinition"
ObjectRow = "snowfakery.data_generator_runtime.ObjectRow"
fake = Faker()
# It might make more sense to use context vars for context handling when
# Python 3.6 is out of the support matrix.
def lazy(func: Any) -> Callable:
"""A lazy function is one that expects its arguments to be unparsed"""
func.lazy = True
return func
def random_number(context: RuntimeContext, min: int, max: int) -> int:
"""Pick a random number between min and max like Python's randint."""
return random.randint(min, max)
def parse_weight_str(context: RuntimeContext, weight_value) -> int:
"""For constructs like:
- choice:
probability: 60%
pick: Closed Won
Render and convert the 60% to just 60.
"""
weight_str = weight_value.render(context)
if isinstance(weight_str, str):
weight_str = weight_str.rstrip("%")
return int(weight_str)
def weighted_choice(choices: List[Tuple[int, object]]):
"""Selects from choices based on their weights"""
weights = [weight for weight, value in choices]
options = [value for weight, value in choices]
return random.choices(options, weights, k=1)[0]
@lazy
def random_choice(context: RuntimeContext, *choices):
"""Template helper for random choices.
Supports structures like this:
random_choice:
- a
- b
- <<c>>
Or like this:
random_choice:
- choice:
pick: A
probability: 50%
- choice:
pick: A
probability: 50%
Probabilities are really just weights and don't need to
add up to 100.
Pick-items can have arbitrary internal complexity.
Pick-items are lazily evaluated.
"""
if not choices:
raise ValueError("No choices supplied!")
if getattr(choices[0], "function_name", None) == "choice":
choices = [choice.render(context) for choice in choices]
rc = weighted_choice(choices)
else:
rc = random.choice(choices)
if hasattr(rc, "render"):
rc = rc.render(context)
return rc
@lazy
def choice_wrapper(
context: RuntimeContext,
pick,
probability: FieldDefinition = None,
when: FieldDefinition = None,
):
"""Supports the choice: sub-items used in `random_choice` or `if`"""
if probability:
probability = parse_weight_str(context, probability)
return probability or when, pick
def parse_date(d: Union[str, datetime, date]) -> Optional[Union[datetime, date]]:
if isinstance(d, (datetime, date)):
return d
try:
return dateutil.parser.parse(d)
except dateutil.parser.ParserError:
pass
def date_(
context: RuntimeContext,
*,
year: Union[str, int],
month: Union[str, int],
day: Union[str, int],
):
"""A YAML-embeddable function to construct a date from strings or integers"""
return date(year, month, day)
def datetime_(
context: RuntimeContext,
*,
year: Union[str, int],
month: Union[str, int],
day: Union[str, int],
hour=0,
minute=0,
second=0,
microsecond=0,
):
"""A YAML-embeddable function to construct a datetime from strings or integers"""
return datetime(year, month, day, hour, minute, second, microsecond)
def date_between(context: RuntimeContext, start_date, end_date):
"""A YAML-embeddable function to pick a date between two ranges"""
start_date = parse_date(start_date) or start_date
end_date = parse_date(end_date) or end_date
try:
return fake.date_between(start_date, end_date)
except ValueError as e:
if "empty range" not in str(e):
raise
# swallow empty range errors per Python conventions
def reference(context: RuntimeContext, x: Union[ObjectRow, str]):
"""YAML-embeddable function to Reference another object."""
if hasattr(x, "id"): # reference to an object with an id
target = x
elif isinstance(x, str): # name of an object
obj = context.field_vars()[x]
if not getattr(obj, "id"):
raise DataGenError(f"Reference to incorrect object type {obj}", None, None)
target = obj
else:
raise DataGenError(
f"Can't get reference to object of type {type(x)}: {x}", None, None
)
return target
def render_boolean(context: RuntimeContext, value: FieldDefinition) -> bool:
val = value.render(context)
if isinstance(val, str):
val = literal_eval(val)
return bool(val)
@lazy
def if_(context: RuntimeContext, *choices: FieldDefinition):
"""Template helper for conditional choices.
Supports structures like this:
if:
- choice:
when: <<something>>
pick: A
- choice:
when: <<something>>
pick: B
Pick-items can have arbitrary internal complexity.
Pick-items are lazily evaluated.
"""
if not choices:
raise ValueError("No choices supplied!")
choices = [choice.render(context) for choice in choices]
for when, choice in choices[:-1]:
if when is None:
raise SyntaxError(
"Every choice except the last one should have a when-clause"
)
true_choices = (
choice for when, choice in choices if when and render_boolean(context, when)
)
rc = next(true_choices, choices[-1][-1]) # default to last choice
if hasattr(rc, "render"):
rc = rc.render(context)
return rc
template_funcs = {
"int": lambda context, number: int(number),
"choice": choice_wrapper,
"random_number": random_number,
"random_choice": random_choice,
"date_between": date_between,
"reference": reference,
"date": date_,
"datetime": datetime_,
"if": if_,
}
|
import random
from datetime import date, datetime
import dateutil.parser
from ast import literal_eval
from typing import Callable, Any, Optional, Union, List, Tuple
from faker import Faker
from .data_gen_exceptions import DataGenError
import snowfakery.data_generator_runtime # noqa
RuntimeContext = "snowfakery.data_generator_runtime.RuntimeContext"
FieldDefinition = "snowfakery.data_generator_runtime_dom.FieldDefinition"
ObjectRow = "snowfakery.data_generator_runtime.ObjectRow"
fake = Faker()
# It might make more sense to use context vars for context handling when
# Python 3.6 is out of the support matrix.
def lazy(func: Any) -> Callable:
"""A lazy function is one that expects its arguments to be unparsed"""
func.lazy = True
return func
def random_number(context: RuntimeContext, min: int, max: int) -> int:
"""Pick a random number between min and max like Python's randint."""
return random.randint(min, max)
def parse_weight_str(context: RuntimeContext, weight_value) -> int:
"""For constructs like:
- choice:
probability: 60%
pick: Closed Won
Render and convert the 60% to just 60.
"""
weight_str = weight_value.render(context)
if isinstance(weight_str, str):
weight_str = weight_str.rstrip("%")
return int(weight_str)
def weighted_choice(choices: List[Tuple[int, object]]):
"""Selects from choices based on their weights"""
weights = [weight for weight, value in choices]
options = [value for weight, value in choices]
return random.choices(options, weights, k=1)[0]
@lazy
def random_choice(context: RuntimeContext, *choices):
"""Template helper for random choices.
Supports structures like this:
random_choice:
- a
- b
- <<c>>
Or like this:
random_choice:
- choice:
pick: A
probability: 50%
- choice:
pick: A
probability: 50%
Probabilities are really just weights and don't need to
add up to 100.
Pick-items can have arbitrary internal complexity.
Pick-items are lazily evaluated.
"""
if not choices:
raise ValueError("No choices supplied!")
if getattr(choices[0], "function_name", None) == "choice":
choices = [choice.render(context) for choice in choices]
rc = weighted_choice(choices)
else:
rc = random.choice(choices)
if hasattr(rc, "render"):
rc = rc.render(context)
return rc
@lazy
def choice_wrapper(
context: RuntimeContext,
pick,
probability: FieldDefinition = None,
when: FieldDefinition = None,
):
"""Supports the choice: sub-items used in `random_choice` or `if`"""
if probability:
probability = parse_weight_str(context, probability)
return probability or when, pick
def parse_date(d: Union[str, datetime, date]) -> Optional[Union[datetime, date]]:
if isinstance(d, (datetime, date)):
return d
try:
return dateutil.parser.parse(d)
except dateutil.parser.ParserError:
pass
def date_(
context: RuntimeContext,
*,
year: Union[str, int],
month: Union[str, int],
day: Union[str, int],
):
"""A YAML-embeddable function to construct a date from strings or integers"""
return date(year, month, day)
def datetime_(
context: RuntimeContext,
*,
year: Union[str, int],
month: Union[str, int],
day: Union[str, int],
hour=0,
minute=0,
second=0,
microsecond=0,
):
"""A YAML-embeddable function to construct a datetime from strings or integers"""
return datetime(year, month, day, hour, minute, second, microsecond)
def date_between(context: RuntimeContext, start_date, end_date):
"""A YAML-embeddable function to pick a date between two ranges"""
start_date = parse_date(start_date) or start_date
end_date = parse_date(end_date) or end_date
try:
return fake.date_between(start_date, end_date)
except ValueError as e:
if "empty range" not in str(e):
raise
# swallow empty range errors per Python conventions
def reference(context: RuntimeContext, x: Union[ObjectRow, str]):
"""YAML-embeddable function to Reference another object."""
if hasattr(x, "id"): # reference to an object with an id
target = x
elif isinstance(x, str): # name of an object
obj = context.field_vars()[x]
if not getattr(obj, "id"):
raise DataGenError(f"Reference to incorrect object type {obj}", None, None)
target = obj
else:
raise DataGenError(
f"Can't get reference to object of type {type(x)}: {x}", None, None
)
return target
def render_boolean(context: RuntimeContext, value: FieldDefinition) -> bool:
val = value.render(context)
if isinstance(val, str):
val = literal_eval(val)
return bool(val)
@lazy
def if_(context: RuntimeContext, *choices: FieldDefinition):
"""Template helper for conditional choices.
Supports structures like this:
if:
- choice:
when: <<something>>
pick: A
- choice:
when: <<something>>
pick: B
Pick-items can have arbitrary internal complexity.
Pick-items are lazily evaluated.
"""
if not choices:
raise ValueError("No choices supplied!")
choices = [choice.render(context) for choice in choices]
for when, choice in choices[:-1]:
if when is None:
raise SyntaxError(
"Every choice except the last one should have a when-clause"
)
true_choices = (
choice for when, choice in choices if when and render_boolean(context, when)
)
rc = next(true_choices, choices[-1][-1]) # default to last choice
if hasattr(rc, "render"):
rc = rc.render(context)
return rc
template_funcs = {
"int": lambda context, number: int(number),
"choice": choice_wrapper,
"random_number": random_number,
"random_choice": random_choice,
"date_between": date_between,
"reference": reference,
"date": date_,
"datetime": datetime_,
"if": if_,
}
|
en
| 0.835846
|
# noqa # It might make more sense to use context vars for context handling when # Python 3.6 is out of the support matrix. A lazy function is one that expects its arguments to be unparsed Pick a random number between min and max like Python's randint. For constructs like: - choice: probability: 60% pick: Closed Won Render and convert the 60% to just 60. Selects from choices based on their weights Template helper for random choices. Supports structures like this: random_choice: - a - b - <<c>> Or like this: random_choice: - choice: pick: A probability: 50% - choice: pick: A probability: 50% Probabilities are really just weights and don't need to add up to 100. Pick-items can have arbitrary internal complexity. Pick-items are lazily evaluated. Supports the choice: sub-items used in `random_choice` or `if` A YAML-embeddable function to construct a date from strings or integers A YAML-embeddable function to construct a datetime from strings or integers A YAML-embeddable function to pick a date between two ranges # swallow empty range errors per Python conventions YAML-embeddable function to Reference another object. # reference to an object with an id # name of an object Template helper for conditional choices. Supports structures like this: if: - choice: when: <<something>> pick: A - choice: when: <<something>> pick: B Pick-items can have arbitrary internal complexity. Pick-items are lazily evaluated. # default to last choice
| 2.707762
| 3
|
astropy/coordinates/earth.py
|
Apoorve73/astropy
| 0
|
6628859
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from warnings import warn
import collections
import socket
import json
import urllib.request
import urllib.error
import urllib.parse
import numpy as np
import erfa
from astropy import units as u
from astropy import constants as consts
from astropy.units.quantity import QuantityInfoBase
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Angle, Longitude, Latitude
from .representation import CartesianRepresentation, CartesianDifferential
from .errors import UnknownSiteException
from astropy.utils import data
__all__ = ['EarthLocation']
GeodeticLocation = collections.namedtuple('GeodeticLocation', ['lon', 'lat', 'height'])
# Available ellipsoids (defined in erfam.h, with numbers exposed in erfa).
ELLIPSOIDS = ('WGS84', 'GRS80', 'WGS72')
OMEGA_EARTH = u.Quantity(7.292115855306589e-5, 1./u.s)
"""
Rotational velocity of Earth. In UT1 seconds, this would be 2 pi / (24 * 3600),
but we need the value in SI seconds.
See Explanatory Supplement to the Astronomical Almanac, ed. P. <NAME> (1992),
University Science Books.
"""
def _check_ellipsoid(ellipsoid=None, default='WGS84'):
if ellipsoid is None:
ellipsoid = default
if ellipsoid not in ELLIPSOIDS:
raise ValueError(f'Ellipsoid {ellipsoid} not among known ones ({ELLIPSOIDS})')
return ellipsoid
def _get_json_result(url, err_str, use_google):
# need to do this here to prevent a series of complicated circular imports
from .name_resolve import NameResolveError
try:
# Retrieve JSON response from Google maps API
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = json.loads(resp.read().decode('utf8'))
except urllib.error.URLError as e:
# This catches a timeout error, see:
# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python
if isinstance(e.reason, socket.timeout):
raise NameResolveError(err_str.format(msg="connection timed out"))
else:
raise NameResolveError(err_str.format(msg=e.reason))
except socket.timeout:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
raise NameResolveError(err_str.format(msg="connection timed out"))
if use_google:
results = resp_data.get('results', [])
if resp_data.get('status', None) != 'OK':
raise NameResolveError(err_str.format(msg="unknown failure with "
"Google API"))
else: # OpenStreetMap returns a list
results = resp_data
if not results:
raise NameResolveError(err_str.format(msg="no results returned"))
return results
class EarthLocationInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('x', 'y', 'z', 'ellipsoid')
def _construct_from_dict(self, map):
# Need to pop ellipsoid off and update post-instantiation. This is
# on the to-fix list in #4261.
ellipsoid = map.pop('ellipsoid')
out = self._parent_cls(**map)
out.ellipsoid = ellipsoid
return out
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new EarthLocation instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : EarthLocation (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Very similar to QuantityInfo.new_like, but the creation of the
# map is different enough that this needs its own rouinte.
# Get merged info attributes shape, dtype, format, description.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# The above raises an error if the dtypes do not match, but returns
# just the string representation, which is not useful, so remove.
attrs.pop('dtype')
# Make empty EarthLocation using the dtype and unit of the last column.
# Use zeros so we do not get problems for possible conversion to
# geodetic coordinates.
shape = (length,) + attrs.pop('shape')
data = u.Quantity(np.zeros(shape=shape, dtype=cols[0].dtype),
unit=cols[0].unit, copy=False)
# Get arguments needed to reconstruct class
map = {key: (data[key] if key in 'xyz' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class EarthLocation(u.Quantity):
"""
Location on the Earth.
Initialization is first attempted assuming geocentric (x, y, z) coordinates
are given; if that fails, another attempt is made assuming geodetic
coordinates (longitude, latitude, height above a reference ellipsoid).
When using the geodetic forms, Longitudes are measured increasing to the
east, so west longitudes are negative. Internally, the coordinates are
stored as geocentric.
To ensure a specific type of coordinates is used, use the corresponding
class methods (`from_geocentric` and `from_geodetic`) or initialize the
arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``,
``height`` for geodetic). See the class methods for details.
Notes
-----
This class fits into the coordinates transformation framework in that it
encodes a position on the `~astropy.coordinates.ITRS` frame. To get a
proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs``
property.
"""
_ellipsoid = 'WGS84'
_location_dtype = np.dtype({'names': ['x', 'y', 'z'],
'formats': [np.float64]*3})
_array_dtype = np.dtype((np.float64, (3,)))
info = EarthLocationInfo()
def __new__(cls, *args, **kwargs):
# TODO: needs copy argument and better dealing with inputs.
if (len(args) == 1 and len(kwargs) == 0 and
isinstance(args[0], EarthLocation)):
return args[0].copy()
try:
self = cls.from_geocentric(*args, **kwargs)
except (u.UnitsError, TypeError) as exc_geocentric:
try:
self = cls.from_geodetic(*args, **kwargs)
except Exception as exc_geodetic:
raise TypeError('Coordinates could not be parsed as either '
'geocentric or geodetic, with respective '
'exceptions "{}" and "{}"'
.format(exc_geocentric, exc_geodetic))
return self
@classmethod
def from_geocentric(cls, x, y, z, unit=None):
"""
Location on Earth, initialized from geocentric coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array_like
Cartesian coordinates. If not quantities, ``unit`` should be given.
unit : `~astropy.units.UnitBase` object or None
Physical unit of the coordinate values. If ``x``, ``y``, and/or
``z`` are quantities, they will be converted to this unit.
Raises
------
astropy.units.UnitsError
If the units on ``x``, ``y``, and ``z`` do not match or an invalid
unit is given.
ValueError
If the shapes of ``x``, ``y``, and ``z`` do not match.
TypeError
If ``x`` is not a `~astropy.units.Quantity` and no unit is given.
"""
if unit is None:
try:
unit = x.unit
except AttributeError:
raise TypeError("Geocentric coordinates should be Quantities "
"unless an explicit unit is given.")
else:
unit = u.Unit(unit)
if unit.physical_type != 'length':
raise u.UnitsError("Geocentric coordinates should be in "
"units of length.")
try:
x = u.Quantity(x, unit, copy=False)
y = u.Quantity(y, unit, copy=False)
z = u.Quantity(z, unit, copy=False)
except u.UnitsError:
raise u.UnitsError("Geocentric coordinate units should all be "
"consistent.")
x, y, z = np.broadcast_arrays(x, y, z)
struc = np.empty(x.shape, cls._location_dtype)
struc['x'], struc['y'], struc['z'] = x, y, z
return super().__new__(cls, struc, unit, copy=False)
@classmethod
def from_geodetic(cls, lon, lat, height=0., ellipsoid=None):
"""
Location on Earth, initialized from geodetic coordinates.
Parameters
----------
lon : `~astropy.coordinates.Longitude` or float
Earth East longitude. Can be anything that initialises an
`~astropy.coordinates.Angle` object (if float, in degrees).
lat : `~astropy.coordinates.Latitude` or float
Earth latitude. Can be anything that initialises an
`~astropy.coordinates.Latitude` object (if float, in degrees).
height : `~astropy.units.Quantity` or float, optional
Height above reference ellipsoid (if float, in meters; default: 0).
ellipsoid : str, optional
Name of the reference ellipsoid to use (default: 'WGS84').
Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'.
Raises
------
astropy.units.UnitsError
If the units on ``lon`` and ``lat`` are inconsistent with angular
ones, or that on ``height`` with a length.
ValueError
If ``lon``, ``lat``, and ``height`` do not have the same shape, or
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geocentric coordinates, the ERFA routine
``gd2gc`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid)
# We use Angle here since there is no need to wrap the longitude -
# gd2gc will just take cos/sin anyway. And wrapping might fail
# on readonly input.
lon = Angle(lon, u.degree, copy=False)
lat = Latitude(lat, u.degree, copy=False)
# don't convert to m by default, so we can use the height unit below.
if not isinstance(height, u.Quantity):
height = u.Quantity(height, u.m, copy=False)
# get geocentric coordinates. Have to give one-dimensional array.
xyz = erfa.gd2gc(getattr(erfa, ellipsoid),
lon.to_value(u.radian),
lat.to_value(u.radian),
height.to_value(u.m))
self = xyz.ravel().view(cls._location_dtype,
cls).reshape(xyz.shape[:-1])
self._unit = u.meter
self._ellipsoid = ellipsoid
return self.to(height.unit)
@classmethod
def of_site(cls, site_name):
"""
Return an object of this class for a known observatory/site by name.
This is intended as a quick convenience function to get basic site
information, not a fully-featured exhaustive registry of observatories
and all their properties.
Additional information about the site is stored in the ``.info.meta``
dictionary of sites obtained using this method (see the examples below).
.. note::
When this function is called, it will attempt to download site
information from the astropy data server. If you would like a site
to be added, issue a pull request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
If a site cannot be found in the registry (i.e., an internet
connection is not available), it will fall back on a built-in list,
In the future, this bundled list might include a version-controlled
list of canonical observatories extracted from the online version,
but it currently only contains the Greenwich Royal Observatory as an
example case.
Parameters
----------
site_name : str
Name of the observatory (case-insensitive).
Returns
-------
site : This class (a `~astropy.coordinates.EarthLocation` or subclass)
The location of the observatory.
Examples
--------
>>> from astropy.coordinates import EarthLocation
>>> keck = EarthLocation.of_site('Keck Observatory') # doctest: +REMOTE_DATA
>>> keck.geodetic # doctest: +REMOTE_DATA +FLOAT_CMP
GeodeticLocation(lon=<Longitude -155.47833333 deg>, lat=<Latitude 19.82833333 deg>, height=<Quantity 4160. m>)
>>> keck.info # doctest: +REMOTE_DATA
name = <NAME> Observatory
dtype = void192
unit = m
class = EarthLocation
n_bad = 0
>>> keck.info.meta # doctest: +REMOTE_DATA
{'source': 'IRAF Observatory Database', 'timezone': 'US/Aleutian'}
See Also
--------
get_site_names : the list of sites that this function can access
""" # noqa
registry = cls._get_site_registry()
try:
el = registry[site_name]
except UnknownSiteException as e:
raise UnknownSiteException(e.site, 'EarthLocation.get_site_names',
close_names=e.close_names)
if cls is el.__class__:
return el
else:
newel = cls.from_geodetic(*el.to_geodetic())
newel.info.name = el.info.name
return newel
@classmethod
def of_address(cls, address, get_height=False, google_api_key=None):
"""
Return an object of this class for a given address by querying either
the OpenStreetMap Nominatim tool [1]_ (default) or the Google geocoding
API [2]_, which requires a specified API key.
This is intended as a quick convenience function to get easy access to
locations. If you need to specify a precise location, you should use the
initializer directly and pass in a longitude, latitude, and elevation.
In the background, this just issues a web query to either of
the APIs noted above. This is not meant to be abused! Both
OpenStreetMap and Google use IP-based query limiting and will ban your
IP if you send more than a few thousand queries per hour [2]_.
.. warning::
If the query returns more than one location (e.g., searching on
``address='springfield'``), this function will use the **first**
returned location.
Parameters
----------
address : str
The address to get the location for. As per the Google maps API,
this can be a fully specified street address (e.g., 123 Main St.,
New York, NY) or a city name (e.g., Danbury, CT), or etc.
get_height : bool, optional
This only works when using the Google API! See the ``google_api_key``
block below. Use the retrieved location to perform a second query to
the Google maps elevation API to retrieve the height of the input
address [3]_.
google_api_key : str, optional
A Google API key with the Geocoding API and (optionally) the
elevation API enabled. See [4]_ for more information.
Returns
-------
location : This class (a `~astropy.coordinates.EarthLocation` or subclass)
The location of the input address.
References
----------
.. [1] https://nominatim.openstreetmap.org/
.. [2] https://developers.google.com/maps/documentation/geocoding/start
.. [3] https://developers.google.com/maps/documentation/elevation/start
.. [4] https://developers.google.com/maps/documentation/geocoding/get-api-key
"""
use_google = google_api_key is not None
# Fail fast if invalid options are passed:
if not use_google and get_height:
raise ValueError(
'Currently, `get_height` only works when using '
'the Google geocoding API, which requires passing '
'a Google API key with `google_api_key`. See: '
'https://developers.google.com/maps/documentation/geocoding/get-api-key '
'for information on obtaining an API key.')
if use_google: # Google
pars = urllib.parse.urlencode({'address': address,
'key': google_api_key})
geo_url = f"https://maps.googleapis.com/maps/api/geocode/json?{pars}"
else: # OpenStreetMap
pars = urllib.parse.urlencode({'q': address,
'format': 'json'})
geo_url = f"https://nominatim.openstreetmap.org/search?{pars}"
# get longitude and latitude location
err_str = f"Unable to retrieve coordinates for address '{address}'; {{msg}}"
geo_result = _get_json_result(geo_url, err_str=err_str,
use_google=use_google)
if use_google:
loc = geo_result[0]['geometry']['location']
lat = loc['lat']
lon = loc['lng']
else:
loc = geo_result[0]
lat = float(loc['lat']) # strings are returned by OpenStreetMap
lon = float(loc['lon'])
if get_height:
pars = {'locations': f'{lat:.8f},{lon:.8f}',
'key': google_api_key}
pars = urllib.parse.urlencode(pars)
ele_url = f"https://maps.googleapis.com/maps/api/elevation/json?{pars}"
err_str = f"Unable to retrieve elevation for address '{address}'; {{msg}}"
ele_result = _get_json_result(ele_url, err_str=err_str,
use_google=use_google)
height = ele_result[0]['elevation']*u.meter
else:
height = 0.
return cls.from_geodetic(lon=lon*u.deg, lat=lat*u.deg, height=height)
@classmethod
def get_site_names(cls):
"""
Get list of names of observatories for use with
`~astropy.coordinates.EarthLocation.of_site`.
.. note::
When this function is called, it will first attempt to
download site information from the astropy data server. If it
cannot (i.e., an internet connection is not available), it will fall
back on the list included with astropy (which is a limited and dated
set of sites). If you think a site should be added, issue a pull
request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
Returns
-------
names : list of str
List of valid observatory names
See Also
--------
of_site : Gets the actual location object for one of the sites names
this returns.
"""
return cls._get_site_registry().names
@classmethod
def _get_site_registry(cls, force_download=False, force_builtin=False):
"""
Gets the site registry. The first time this either downloads or loads
from the data file packaged with astropy. Subsequent calls will use the
cached version unless explicitly overridden.
Parameters
----------
force_download : bool or str
If not False, force replacement of the cached registry with a
downloaded version. If a str, that will be used as the URL to
download from (if just True, the default URL will be used).
force_builtin : bool
If True, load from the data file bundled with astropy and set the
cache to that.
returns
-------
reg : astropy.coordinates.sites.SiteRegistry
"""
# need to do this here at the bottom to avoid circular dependencies
from .sites import get_builtin_sites, get_downloaded_sites
if force_builtin and force_download:
raise ValueError('Cannot have both force_builtin and force_download True')
if force_builtin:
reg = cls._site_registry = get_builtin_sites()
else:
reg = getattr(cls, '_site_registry', None)
if force_download or not reg:
try:
if isinstance(force_download, str):
reg = get_downloaded_sites(force_download)
else:
reg = get_downloaded_sites()
except OSError:
if force_download:
raise
msg = ('Could not access the online site list. Falling '
'back on the built-in version, which is rather '
'limited. If you want to retry the download, do '
'{0}._get_site_registry(force_download=True)')
warn(AstropyUserWarning(msg.format(cls.__name__)))
reg = get_builtin_sites()
cls._site_registry = reg
return reg
@property
def ellipsoid(self):
"""The default ellipsoid used to convert to geodetic coordinates."""
return self._ellipsoid
@ellipsoid.setter
def ellipsoid(self, ellipsoid):
self._ellipsoid = _check_ellipsoid(ellipsoid)
@property
def geodetic(self):
"""Convert to geodetic coordinates for the default ellipsoid."""
return self.to_geodetic()
def to_geodetic(self, ellipsoid=None):
"""Convert to geodetic coordinates.
Parameters
----------
ellipsoid : str, optional
Reference ellipsoid to use. Default is the one the coordinates
were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72'
Returns
-------
(lon, lat, height) : tuple
The tuple contains instances of `~astropy.coordinates.Longitude`,
`~astropy.coordinates.Latitude`, and `~astropy.units.Quantity`
Raises
------
ValueError
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geodetic coordinates, the ERFA routine
``gc2gd`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid)
self_array = self.to(u.meter).view(self._array_dtype, np.ndarray)
lon, lat, height = erfa.gc2gd(getattr(erfa, ellipsoid), self_array)
return GeodeticLocation(
Longitude(lon * u.radian, u.degree,
wrap_angle=180.*u.degree, copy=False),
Latitude(lat * u.radian, u.degree, copy=False),
u.Quantity(height * u.meter, self.unit, copy=False))
@property
def lon(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[0]
@property
def lat(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[1]
@property
def height(self):
"""Height of the location, for the default ellipsoid."""
return self.geodetic[2]
# mostly for symmetry with geodetic and to_geodetic.
@property
def geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities"""
return self.to_geocentric()
def to_geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities"""
return (self.x, self.y, self.z)
def get_itrs(self, obstime=None):
"""
Generates an `~astropy.coordinates.ITRS` object with the location of
this object at the requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time` or None
The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or
if None, the default ``obstime`` will be used.
Returns
-------
itrs : `~astropy.coordinates.ITRS`
The new object in the ITRS frame
"""
# Broadcast for a single position at multiple times, but don't attempt
# to be more general here.
if obstime and self.size == 1 and obstime.shape:
self = np.broadcast_to(self, obstime.shape, subok=True)
# do this here to prevent a series of complicated circular imports
from .builtin_frames import ITRS
return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime)
itrs = property(get_itrs, doc="""An `~astropy.coordinates.ITRS` object with
for the location of this object at the
default ``obstime``.""")
def get_gcrs(self, obstime):
"""GCRS position with velocity at ``obstime`` as a GCRS coordinate.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
--------
gcrs : `~astropy.coordinates.GCRS` instance
With velocity included.
"""
# do this here to prevent a series of complicated circular imports
from .builtin_frames import GCRS
itrs = self.get_itrs(obstime)
# Assume the observatory itself is fixed on the ground.
# We do a direct assignment rather than an update to avoid validation
# and creation of a new object.
zeros = np.broadcast_to(0. * u.km / u.s, (3,) + itrs.shape, subok=True)
itrs.data.differentials['s'] = CartesianDifferential(zeros)
return itrs.transform_to(GCRS(obstime=obstime))
def get_gcrs_posvel(self, obstime):
"""
Calculate the GCRS position and velocity of this object at the
requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
--------
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`
The GCRS position of the object
obsgeovel : `~astropy.coordinates.CartesianRepresentation`
The GCRS velocity of the object
"""
# GCRS position
gcrs_data = self.get_gcrs(obstime).data
obsgeopos = gcrs_data.without_differentials()
obsgeovel = gcrs_data.differentials['s'].to_cartesian()
return obsgeopos, obsgeovel
def gravitational_redshift(self, obstime,
bodies=['sun', 'jupiter', 'moon'],
masses={}):
"""Return the gravitational redshift at this EarthLocation.
Calculates the gravitational redshift, of order 3 m/s, due to the
requested solar system bodies.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the redshift at.
bodies : iterable, optional
The bodies (other than the Earth) to include in the redshift
calculation. List elements should be any body name
`get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and
the Moon. Earth is always included (because the class represents
an *Earth* location).
masses : dict of str to Quantity, optional
The mass or gravitational parameters (G * mass) to assume for the
bodies requested in ``bodies``. Can be used to override the
defaults for the Sun, Jupiter, the Moon, and the Earth, or to
pass in masses for other bodies.
Returns
--------
redshift : `~astropy.units.Quantity`
Gravitational redshift in velocity units at given obstime.
"""
# needs to be here to avoid circular imports
from .solar_system import get_body_barycentric
bodies = list(bodies)
# Ensure earth is included and last in the list.
if 'earth' in bodies:
bodies.remove('earth')
bodies.append('earth')
_masses = {'sun': consts.GM_sun,
'jupiter': consts.GM_jup,
'moon': consts.G * 7.34767309e22*u.kg,
'earth': consts.GM_earth}
_masses.update(masses)
GMs = []
M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg))
for body in bodies:
try:
GMs.append(_masses[body].to(u.m**3/u.s**2, [M_GM_equivalency]))
except KeyError:
raise KeyError(f'body "{body}" does not have a mass!')
except u.UnitsError as exc:
exc.args += ('"masses" argument values must be masses or '
'gravitational parameters',)
raise
positions = [get_body_barycentric(name, obstime) for name in bodies]
# Calculate distances to objects other than earth.
distances = [(pos - positions[-1]).norm() for pos in positions[:-1]]
# Append distance from Earth's center for Earth's contribution.
distances.append(CartesianRepresentation(self.geocentric).norm())
# Get redshifts due to all objects.
redshifts = [-GM / consts.c / distance for (GM, distance) in
zip(GMs, distances)]
# Reverse order of summing, to go from small to big, and to get
# "earth" first, which gives m/s as unit.
return sum(redshifts[::-1])
@property
def x(self):
"""The X component of the geocentric coordinates."""
return self['x']
@property
def y(self):
"""The Y component of the geocentric coordinates."""
return self['y']
@property
def z(self):
"""The Z component of the geocentric coordinates."""
return self['z']
def __getitem__(self, item):
result = super().__getitem__(item)
if result.dtype is self.dtype:
return result.view(self.__class__)
else:
return result.view(u.Quantity)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, '_ellipsoid'):
self._ellipsoid = obj._ellipsoid
def __len__(self):
if self.shape == ():
raise IndexError('0-d EarthLocation arrays cannot be indexed')
else:
return super().__len__()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
# Conversion to another unit in both ``to`` and ``to_value`` goes
# via this routine. To make the regular quantity routines work, we
# temporarily turn the structured array into a regular one.
array_view = self.view(self._array_dtype, np.ndarray)
if equivalencies == []:
equivalencies = self._equivalencies
new_array = self.unit.to(unit, array_view, equivalencies=equivalencies)
return new_array.view(self.dtype).reshape(self.shape)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from warnings import warn
import collections
import socket
import json
import urllib.request
import urllib.error
import urllib.parse
import numpy as np
import erfa
from astropy import units as u
from astropy import constants as consts
from astropy.units.quantity import QuantityInfoBase
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Angle, Longitude, Latitude
from .representation import CartesianRepresentation, CartesianDifferential
from .errors import UnknownSiteException
from astropy.utils import data
__all__ = ['EarthLocation']
GeodeticLocation = collections.namedtuple('GeodeticLocation', ['lon', 'lat', 'height'])
# Available ellipsoids (defined in erfam.h, with numbers exposed in erfa).
ELLIPSOIDS = ('WGS84', 'GRS80', 'WGS72')
OMEGA_EARTH = u.Quantity(7.292115855306589e-5, 1./u.s)
"""
Rotational velocity of Earth. In UT1 seconds, this would be 2 pi / (24 * 3600),
but we need the value in SI seconds.
See Explanatory Supplement to the Astronomical Almanac, ed. P. <NAME> (1992),
University Science Books.
"""
def _check_ellipsoid(ellipsoid=None, default='WGS84'):
if ellipsoid is None:
ellipsoid = default
if ellipsoid not in ELLIPSOIDS:
raise ValueError(f'Ellipsoid {ellipsoid} not among known ones ({ELLIPSOIDS})')
return ellipsoid
def _get_json_result(url, err_str, use_google):
# need to do this here to prevent a series of complicated circular imports
from .name_resolve import NameResolveError
try:
# Retrieve JSON response from Google maps API
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = json.loads(resp.read().decode('utf8'))
except urllib.error.URLError as e:
# This catches a timeout error, see:
# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python
if isinstance(e.reason, socket.timeout):
raise NameResolveError(err_str.format(msg="connection timed out"))
else:
raise NameResolveError(err_str.format(msg=e.reason))
except socket.timeout:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
raise NameResolveError(err_str.format(msg="connection timed out"))
if use_google:
results = resp_data.get('results', [])
if resp_data.get('status', None) != 'OK':
raise NameResolveError(err_str.format(msg="unknown failure with "
"Google API"))
else: # OpenStreetMap returns a list
results = resp_data
if not results:
raise NameResolveError(err_str.format(msg="no results returned"))
return results
class EarthLocationInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('x', 'y', 'z', 'ellipsoid')
def _construct_from_dict(self, map):
# Need to pop ellipsoid off and update post-instantiation. This is
# on the to-fix list in #4261.
ellipsoid = map.pop('ellipsoid')
out = self._parent_cls(**map)
out.ellipsoid = ellipsoid
return out
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new EarthLocation instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : EarthLocation (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Very similar to QuantityInfo.new_like, but the creation of the
# map is different enough that this needs its own rouinte.
# Get merged info attributes shape, dtype, format, description.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# The above raises an error if the dtypes do not match, but returns
# just the string representation, which is not useful, so remove.
attrs.pop('dtype')
# Make empty EarthLocation using the dtype and unit of the last column.
# Use zeros so we do not get problems for possible conversion to
# geodetic coordinates.
shape = (length,) + attrs.pop('shape')
data = u.Quantity(np.zeros(shape=shape, dtype=cols[0].dtype),
unit=cols[0].unit, copy=False)
# Get arguments needed to reconstruct class
map = {key: (data[key] if key in 'xyz' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class EarthLocation(u.Quantity):
"""
Location on the Earth.
Initialization is first attempted assuming geocentric (x, y, z) coordinates
are given; if that fails, another attempt is made assuming geodetic
coordinates (longitude, latitude, height above a reference ellipsoid).
When using the geodetic forms, Longitudes are measured increasing to the
east, so west longitudes are negative. Internally, the coordinates are
stored as geocentric.
To ensure a specific type of coordinates is used, use the corresponding
class methods (`from_geocentric` and `from_geodetic`) or initialize the
arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``,
``height`` for geodetic). See the class methods for details.
Notes
-----
This class fits into the coordinates transformation framework in that it
encodes a position on the `~astropy.coordinates.ITRS` frame. To get a
proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs``
property.
"""
_ellipsoid = 'WGS84'
_location_dtype = np.dtype({'names': ['x', 'y', 'z'],
'formats': [np.float64]*3})
_array_dtype = np.dtype((np.float64, (3,)))
info = EarthLocationInfo()
def __new__(cls, *args, **kwargs):
# TODO: needs copy argument and better dealing with inputs.
if (len(args) == 1 and len(kwargs) == 0 and
isinstance(args[0], EarthLocation)):
return args[0].copy()
try:
self = cls.from_geocentric(*args, **kwargs)
except (u.UnitsError, TypeError) as exc_geocentric:
try:
self = cls.from_geodetic(*args, **kwargs)
except Exception as exc_geodetic:
raise TypeError('Coordinates could not be parsed as either '
'geocentric or geodetic, with respective '
'exceptions "{}" and "{}"'
.format(exc_geocentric, exc_geodetic))
return self
@classmethod
def from_geocentric(cls, x, y, z, unit=None):
"""
Location on Earth, initialized from geocentric coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array_like
Cartesian coordinates. If not quantities, ``unit`` should be given.
unit : `~astropy.units.UnitBase` object or None
Physical unit of the coordinate values. If ``x``, ``y``, and/or
``z`` are quantities, they will be converted to this unit.
Raises
------
astropy.units.UnitsError
If the units on ``x``, ``y``, and ``z`` do not match or an invalid
unit is given.
ValueError
If the shapes of ``x``, ``y``, and ``z`` do not match.
TypeError
If ``x`` is not a `~astropy.units.Quantity` and no unit is given.
"""
if unit is None:
try:
unit = x.unit
except AttributeError:
raise TypeError("Geocentric coordinates should be Quantities "
"unless an explicit unit is given.")
else:
unit = u.Unit(unit)
if unit.physical_type != 'length':
raise u.UnitsError("Geocentric coordinates should be in "
"units of length.")
try:
x = u.Quantity(x, unit, copy=False)
y = u.Quantity(y, unit, copy=False)
z = u.Quantity(z, unit, copy=False)
except u.UnitsError:
raise u.UnitsError("Geocentric coordinate units should all be "
"consistent.")
x, y, z = np.broadcast_arrays(x, y, z)
struc = np.empty(x.shape, cls._location_dtype)
struc['x'], struc['y'], struc['z'] = x, y, z
return super().__new__(cls, struc, unit, copy=False)
@classmethod
def from_geodetic(cls, lon, lat, height=0., ellipsoid=None):
"""
Location on Earth, initialized from geodetic coordinates.
Parameters
----------
lon : `~astropy.coordinates.Longitude` or float
Earth East longitude. Can be anything that initialises an
`~astropy.coordinates.Angle` object (if float, in degrees).
lat : `~astropy.coordinates.Latitude` or float
Earth latitude. Can be anything that initialises an
`~astropy.coordinates.Latitude` object (if float, in degrees).
height : `~astropy.units.Quantity` or float, optional
Height above reference ellipsoid (if float, in meters; default: 0).
ellipsoid : str, optional
Name of the reference ellipsoid to use (default: 'WGS84').
Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'.
Raises
------
astropy.units.UnitsError
If the units on ``lon`` and ``lat`` are inconsistent with angular
ones, or that on ``height`` with a length.
ValueError
If ``lon``, ``lat``, and ``height`` do not have the same shape, or
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geocentric coordinates, the ERFA routine
``gd2gc`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid)
# We use Angle here since there is no need to wrap the longitude -
# gd2gc will just take cos/sin anyway. And wrapping might fail
# on readonly input.
lon = Angle(lon, u.degree, copy=False)
lat = Latitude(lat, u.degree, copy=False)
# don't convert to m by default, so we can use the height unit below.
if not isinstance(height, u.Quantity):
height = u.Quantity(height, u.m, copy=False)
# get geocentric coordinates. Have to give one-dimensional array.
xyz = erfa.gd2gc(getattr(erfa, ellipsoid),
lon.to_value(u.radian),
lat.to_value(u.radian),
height.to_value(u.m))
self = xyz.ravel().view(cls._location_dtype,
cls).reshape(xyz.shape[:-1])
self._unit = u.meter
self._ellipsoid = ellipsoid
return self.to(height.unit)
@classmethod
def of_site(cls, site_name):
"""
Return an object of this class for a known observatory/site by name.
This is intended as a quick convenience function to get basic site
information, not a fully-featured exhaustive registry of observatories
and all their properties.
Additional information about the site is stored in the ``.info.meta``
dictionary of sites obtained using this method (see the examples below).
.. note::
When this function is called, it will attempt to download site
information from the astropy data server. If you would like a site
to be added, issue a pull request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
If a site cannot be found in the registry (i.e., an internet
connection is not available), it will fall back on a built-in list,
In the future, this bundled list might include a version-controlled
list of canonical observatories extracted from the online version,
but it currently only contains the Greenwich Royal Observatory as an
example case.
Parameters
----------
site_name : str
Name of the observatory (case-insensitive).
Returns
-------
site : This class (a `~astropy.coordinates.EarthLocation` or subclass)
The location of the observatory.
Examples
--------
>>> from astropy.coordinates import EarthLocation
>>> keck = EarthLocation.of_site('Keck Observatory') # doctest: +REMOTE_DATA
>>> keck.geodetic # doctest: +REMOTE_DATA +FLOAT_CMP
GeodeticLocation(lon=<Longitude -155.47833333 deg>, lat=<Latitude 19.82833333 deg>, height=<Quantity 4160. m>)
>>> keck.info # doctest: +REMOTE_DATA
name = <NAME> Observatory
dtype = void192
unit = m
class = EarthLocation
n_bad = 0
>>> keck.info.meta # doctest: +REMOTE_DATA
{'source': 'IRAF Observatory Database', 'timezone': 'US/Aleutian'}
See Also
--------
get_site_names : the list of sites that this function can access
""" # noqa
registry = cls._get_site_registry()
try:
el = registry[site_name]
except UnknownSiteException as e:
raise UnknownSiteException(e.site, 'EarthLocation.get_site_names',
close_names=e.close_names)
if cls is el.__class__:
return el
else:
newel = cls.from_geodetic(*el.to_geodetic())
newel.info.name = el.info.name
return newel
@classmethod
def of_address(cls, address, get_height=False, google_api_key=None):
"""
Return an object of this class for a given address by querying either
the OpenStreetMap Nominatim tool [1]_ (default) or the Google geocoding
API [2]_, which requires a specified API key.
This is intended as a quick convenience function to get easy access to
locations. If you need to specify a precise location, you should use the
initializer directly and pass in a longitude, latitude, and elevation.
In the background, this just issues a web query to either of
the APIs noted above. This is not meant to be abused! Both
OpenStreetMap and Google use IP-based query limiting and will ban your
IP if you send more than a few thousand queries per hour [2]_.
.. warning::
If the query returns more than one location (e.g., searching on
``address='springfield'``), this function will use the **first**
returned location.
Parameters
----------
address : str
The address to get the location for. As per the Google maps API,
this can be a fully specified street address (e.g., 123 Main St.,
New York, NY) or a city name (e.g., Danbury, CT), or etc.
get_height : bool, optional
This only works when using the Google API! See the ``google_api_key``
block below. Use the retrieved location to perform a second query to
the Google maps elevation API to retrieve the height of the input
address [3]_.
google_api_key : str, optional
A Google API key with the Geocoding API and (optionally) the
elevation API enabled. See [4]_ for more information.
Returns
-------
location : This class (a `~astropy.coordinates.EarthLocation` or subclass)
The location of the input address.
References
----------
.. [1] https://nominatim.openstreetmap.org/
.. [2] https://developers.google.com/maps/documentation/geocoding/start
.. [3] https://developers.google.com/maps/documentation/elevation/start
.. [4] https://developers.google.com/maps/documentation/geocoding/get-api-key
"""
use_google = google_api_key is not None
# Fail fast if invalid options are passed:
if not use_google and get_height:
raise ValueError(
'Currently, `get_height` only works when using '
'the Google geocoding API, which requires passing '
'a Google API key with `google_api_key`. See: '
'https://developers.google.com/maps/documentation/geocoding/get-api-key '
'for information on obtaining an API key.')
if use_google: # Google
pars = urllib.parse.urlencode({'address': address,
'key': google_api_key})
geo_url = f"https://maps.googleapis.com/maps/api/geocode/json?{pars}"
else: # OpenStreetMap
pars = urllib.parse.urlencode({'q': address,
'format': 'json'})
geo_url = f"https://nominatim.openstreetmap.org/search?{pars}"
# get longitude and latitude location
err_str = f"Unable to retrieve coordinates for address '{address}'; {{msg}}"
geo_result = _get_json_result(geo_url, err_str=err_str,
use_google=use_google)
if use_google:
loc = geo_result[0]['geometry']['location']
lat = loc['lat']
lon = loc['lng']
else:
loc = geo_result[0]
lat = float(loc['lat']) # strings are returned by OpenStreetMap
lon = float(loc['lon'])
if get_height:
pars = {'locations': f'{lat:.8f},{lon:.8f}',
'key': google_api_key}
pars = urllib.parse.urlencode(pars)
ele_url = f"https://maps.googleapis.com/maps/api/elevation/json?{pars}"
err_str = f"Unable to retrieve elevation for address '{address}'; {{msg}}"
ele_result = _get_json_result(ele_url, err_str=err_str,
use_google=use_google)
height = ele_result[0]['elevation']*u.meter
else:
height = 0.
return cls.from_geodetic(lon=lon*u.deg, lat=lat*u.deg, height=height)
@classmethod
def get_site_names(cls):
"""
Get list of names of observatories for use with
`~astropy.coordinates.EarthLocation.of_site`.
.. note::
When this function is called, it will first attempt to
download site information from the astropy data server. If it
cannot (i.e., an internet connection is not available), it will fall
back on the list included with astropy (which is a limited and dated
set of sites). If you think a site should be added, issue a pull
request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
Returns
-------
names : list of str
List of valid observatory names
See Also
--------
of_site : Gets the actual location object for one of the sites names
this returns.
"""
return cls._get_site_registry().names
@classmethod
def _get_site_registry(cls, force_download=False, force_builtin=False):
"""
Gets the site registry. The first time this either downloads or loads
from the data file packaged with astropy. Subsequent calls will use the
cached version unless explicitly overridden.
Parameters
----------
force_download : bool or str
If not False, force replacement of the cached registry with a
downloaded version. If a str, that will be used as the URL to
download from (if just True, the default URL will be used).
force_builtin : bool
If True, load from the data file bundled with astropy and set the
cache to that.
returns
-------
reg : astropy.coordinates.sites.SiteRegistry
"""
# need to do this here at the bottom to avoid circular dependencies
from .sites import get_builtin_sites, get_downloaded_sites
if force_builtin and force_download:
raise ValueError('Cannot have both force_builtin and force_download True')
if force_builtin:
reg = cls._site_registry = get_builtin_sites()
else:
reg = getattr(cls, '_site_registry', None)
if force_download or not reg:
try:
if isinstance(force_download, str):
reg = get_downloaded_sites(force_download)
else:
reg = get_downloaded_sites()
except OSError:
if force_download:
raise
msg = ('Could not access the online site list. Falling '
'back on the built-in version, which is rather '
'limited. If you want to retry the download, do '
'{0}._get_site_registry(force_download=True)')
warn(AstropyUserWarning(msg.format(cls.__name__)))
reg = get_builtin_sites()
cls._site_registry = reg
return reg
@property
def ellipsoid(self):
"""The default ellipsoid used to convert to geodetic coordinates."""
return self._ellipsoid
@ellipsoid.setter
def ellipsoid(self, ellipsoid):
self._ellipsoid = _check_ellipsoid(ellipsoid)
@property
def geodetic(self):
"""Convert to geodetic coordinates for the default ellipsoid."""
return self.to_geodetic()
def to_geodetic(self, ellipsoid=None):
"""Convert to geodetic coordinates.
Parameters
----------
ellipsoid : str, optional
Reference ellipsoid to use. Default is the one the coordinates
were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72'
Returns
-------
(lon, lat, height) : tuple
The tuple contains instances of `~astropy.coordinates.Longitude`,
`~astropy.coordinates.Latitude`, and `~astropy.units.Quantity`
Raises
------
ValueError
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geodetic coordinates, the ERFA routine
``gc2gd`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid)
self_array = self.to(u.meter).view(self._array_dtype, np.ndarray)
lon, lat, height = erfa.gc2gd(getattr(erfa, ellipsoid), self_array)
return GeodeticLocation(
Longitude(lon * u.radian, u.degree,
wrap_angle=180.*u.degree, copy=False),
Latitude(lat * u.radian, u.degree, copy=False),
u.Quantity(height * u.meter, self.unit, copy=False))
@property
def lon(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[0]
@property
def lat(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[1]
@property
def height(self):
"""Height of the location, for the default ellipsoid."""
return self.geodetic[2]
# mostly for symmetry with geodetic and to_geodetic.
@property
def geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities"""
return self.to_geocentric()
def to_geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities"""
return (self.x, self.y, self.z)
def get_itrs(self, obstime=None):
"""
Generates an `~astropy.coordinates.ITRS` object with the location of
this object at the requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time` or None
The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or
if None, the default ``obstime`` will be used.
Returns
-------
itrs : `~astropy.coordinates.ITRS`
The new object in the ITRS frame
"""
# Broadcast for a single position at multiple times, but don't attempt
# to be more general here.
if obstime and self.size == 1 and obstime.shape:
self = np.broadcast_to(self, obstime.shape, subok=True)
# do this here to prevent a series of complicated circular imports
from .builtin_frames import ITRS
return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime)
itrs = property(get_itrs, doc="""An `~astropy.coordinates.ITRS` object with
for the location of this object at the
default ``obstime``.""")
def get_gcrs(self, obstime):
"""GCRS position with velocity at ``obstime`` as a GCRS coordinate.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
--------
gcrs : `~astropy.coordinates.GCRS` instance
With velocity included.
"""
# do this here to prevent a series of complicated circular imports
from .builtin_frames import GCRS
itrs = self.get_itrs(obstime)
# Assume the observatory itself is fixed on the ground.
# We do a direct assignment rather than an update to avoid validation
# and creation of a new object.
zeros = np.broadcast_to(0. * u.km / u.s, (3,) + itrs.shape, subok=True)
itrs.data.differentials['s'] = CartesianDifferential(zeros)
return itrs.transform_to(GCRS(obstime=obstime))
def get_gcrs_posvel(self, obstime):
"""
Calculate the GCRS position and velocity of this object at the
requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
--------
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`
The GCRS position of the object
obsgeovel : `~astropy.coordinates.CartesianRepresentation`
The GCRS velocity of the object
"""
# GCRS position
gcrs_data = self.get_gcrs(obstime).data
obsgeopos = gcrs_data.without_differentials()
obsgeovel = gcrs_data.differentials['s'].to_cartesian()
return obsgeopos, obsgeovel
def gravitational_redshift(self, obstime,
bodies=['sun', 'jupiter', 'moon'],
masses={}):
"""Return the gravitational redshift at this EarthLocation.
Calculates the gravitational redshift, of order 3 m/s, due to the
requested solar system bodies.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the redshift at.
bodies : iterable, optional
The bodies (other than the Earth) to include in the redshift
calculation. List elements should be any body name
`get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and
the Moon. Earth is always included (because the class represents
an *Earth* location).
masses : dict of str to Quantity, optional
The mass or gravitational parameters (G * mass) to assume for the
bodies requested in ``bodies``. Can be used to override the
defaults for the Sun, Jupiter, the Moon, and the Earth, or to
pass in masses for other bodies.
Returns
--------
redshift : `~astropy.units.Quantity`
Gravitational redshift in velocity units at given obstime.
"""
# needs to be here to avoid circular imports
from .solar_system import get_body_barycentric
bodies = list(bodies)
# Ensure earth is included and last in the list.
if 'earth' in bodies:
bodies.remove('earth')
bodies.append('earth')
_masses = {'sun': consts.GM_sun,
'jupiter': consts.GM_jup,
'moon': consts.G * 7.34767309e22*u.kg,
'earth': consts.GM_earth}
_masses.update(masses)
GMs = []
M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg))
for body in bodies:
try:
GMs.append(_masses[body].to(u.m**3/u.s**2, [M_GM_equivalency]))
except KeyError:
raise KeyError(f'body "{body}" does not have a mass!')
except u.UnitsError as exc:
exc.args += ('"masses" argument values must be masses or '
'gravitational parameters',)
raise
positions = [get_body_barycentric(name, obstime) for name in bodies]
# Calculate distances to objects other than earth.
distances = [(pos - positions[-1]).norm() for pos in positions[:-1]]
# Append distance from Earth's center for Earth's contribution.
distances.append(CartesianRepresentation(self.geocentric).norm())
# Get redshifts due to all objects.
redshifts = [-GM / consts.c / distance for (GM, distance) in
zip(GMs, distances)]
# Reverse order of summing, to go from small to big, and to get
# "earth" first, which gives m/s as unit.
return sum(redshifts[::-1])
@property
def x(self):
"""The X component of the geocentric coordinates."""
return self['x']
@property
def y(self):
"""The Y component of the geocentric coordinates."""
return self['y']
@property
def z(self):
"""The Z component of the geocentric coordinates."""
return self['z']
def __getitem__(self, item):
result = super().__getitem__(item)
if result.dtype is self.dtype:
return result.view(self.__class__)
else:
return result.view(u.Quantity)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, '_ellipsoid'):
self._ellipsoid = obj._ellipsoid
def __len__(self):
if self.shape == ():
raise IndexError('0-d EarthLocation arrays cannot be indexed')
else:
return super().__len__()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
# Conversion to another unit in both ``to`` and ``to_value`` goes
# via this routine. To make the regular quantity routines work, we
# temporarily turn the structured array into a regular one.
array_view = self.view(self._array_dtype, np.ndarray)
if equivalencies == []:
equivalencies = self._equivalencies
new_array = self.unit.to(unit, array_view, equivalencies=equivalencies)
return new_array.view(self.dtype).reshape(self.shape)
|
en
| 0.726322
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst # Available ellipsoids (defined in erfam.h, with numbers exposed in erfa). Rotational velocity of Earth. In UT1 seconds, this would be 2 pi / (24 * 3600), but we need the value in SI seconds. See Explanatory Supplement to the Astronomical Almanac, ed. P. <NAME> (1992), University Science Books. # need to do this here to prevent a series of complicated circular imports # Retrieve JSON response from Google maps API # This catches a timeout error, see: # http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python # There are some cases where urllib2 does not catch socket.timeout # especially while receiving response data on an already previously # working request # OpenStreetMap returns a list Container for meta information like name, description, format. This is required when the object is used as a mixin column within a table, but can be used as a general way to store meta information. # Need to pop ellipsoid off and update post-instantiation. This is # on the to-fix list in #4261. Return a new EarthLocation instance which is consistent with the input ``cols`` and has ``length`` rows. This is intended for creating an empty column object whose elements can be set in-place for table operations like join or vstack. Parameters ---------- cols : list List of input columns length : int Length of the output column object metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name Returns ------- col : EarthLocation (or subclass) Empty instance of this class consistent with ``cols`` # Very similar to QuantityInfo.new_like, but the creation of the # map is different enough that this needs its own rouinte. # Get merged info attributes shape, dtype, format, description. # The above raises an error if the dtypes do not match, but returns # just the string representation, which is not useful, so remove. # Make empty EarthLocation using the dtype and unit of the last column. # Use zeros so we do not get problems for possible conversion to # geodetic coordinates. # Get arguments needed to reconstruct class # Set remaining info attributes Location on the Earth. Initialization is first attempted assuming geocentric (x, y, z) coordinates are given; if that fails, another attempt is made assuming geodetic coordinates (longitude, latitude, height above a reference ellipsoid). When using the geodetic forms, Longitudes are measured increasing to the east, so west longitudes are negative. Internally, the coordinates are stored as geocentric. To ensure a specific type of coordinates is used, use the corresponding class methods (`from_geocentric` and `from_geodetic`) or initialize the arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``, ``height`` for geodetic). See the class methods for details. Notes ----- This class fits into the coordinates transformation framework in that it encodes a position on the `~astropy.coordinates.ITRS` frame. To get a proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs`` property. # TODO: needs copy argument and better dealing with inputs. Location on Earth, initialized from geocentric coordinates. Parameters ---------- x, y, z : `~astropy.units.Quantity` or array_like Cartesian coordinates. If not quantities, ``unit`` should be given. unit : `~astropy.units.UnitBase` object or None Physical unit of the coordinate values. If ``x``, ``y``, and/or ``z`` are quantities, they will be converted to this unit. Raises ------ astropy.units.UnitsError If the units on ``x``, ``y``, and ``z`` do not match or an invalid unit is given. ValueError If the shapes of ``x``, ``y``, and ``z`` do not match. TypeError If ``x`` is not a `~astropy.units.Quantity` and no unit is given. Location on Earth, initialized from geodetic coordinates. Parameters ---------- lon : `~astropy.coordinates.Longitude` or float Earth East longitude. Can be anything that initialises an `~astropy.coordinates.Angle` object (if float, in degrees). lat : `~astropy.coordinates.Latitude` or float Earth latitude. Can be anything that initialises an `~astropy.coordinates.Latitude` object (if float, in degrees). height : `~astropy.units.Quantity` or float, optional Height above reference ellipsoid (if float, in meters; default: 0). ellipsoid : str, optional Name of the reference ellipsoid to use (default: 'WGS84'). Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'. Raises ------ astropy.units.UnitsError If the units on ``lon`` and ``lat`` are inconsistent with angular ones, or that on ``height`` with a length. ValueError If ``lon``, ``lat``, and ``height`` do not have the same shape, or if ``ellipsoid`` is not recognized as among the ones implemented. Notes ----- For the conversion to geocentric coordinates, the ERFA routine ``gd2gc`` is used. See https://github.com/liberfa/erfa # We use Angle here since there is no need to wrap the longitude - # gd2gc will just take cos/sin anyway. And wrapping might fail # on readonly input. # don't convert to m by default, so we can use the height unit below. # get geocentric coordinates. Have to give one-dimensional array. Return an object of this class for a known observatory/site by name. This is intended as a quick convenience function to get basic site information, not a fully-featured exhaustive registry of observatories and all their properties. Additional information about the site is stored in the ``.info.meta`` dictionary of sites obtained using this method (see the examples below). .. note:: When this function is called, it will attempt to download site information from the astropy data server. If you would like a site to be added, issue a pull request to the `astropy-data repository <https://github.com/astropy/astropy-data>`_ . If a site cannot be found in the registry (i.e., an internet connection is not available), it will fall back on a built-in list, In the future, this bundled list might include a version-controlled list of canonical observatories extracted from the online version, but it currently only contains the Greenwich Royal Observatory as an example case. Parameters ---------- site_name : str Name of the observatory (case-insensitive). Returns ------- site : This class (a `~astropy.coordinates.EarthLocation` or subclass) The location of the observatory. Examples -------- >>> from astropy.coordinates import EarthLocation >>> keck = EarthLocation.of_site('Keck Observatory') # doctest: +REMOTE_DATA >>> keck.geodetic # doctest: +REMOTE_DATA +FLOAT_CMP GeodeticLocation(lon=<Longitude -155.47833333 deg>, lat=<Latitude 19.82833333 deg>, height=<Quantity 4160. m>) >>> keck.info # doctest: +REMOTE_DATA name = <NAME> Observatory dtype = void192 unit = m class = EarthLocation n_bad = 0 >>> keck.info.meta # doctest: +REMOTE_DATA {'source': 'IRAF Observatory Database', 'timezone': 'US/Aleutian'} See Also -------- get_site_names : the list of sites that this function can access # noqa Return an object of this class for a given address by querying either the OpenStreetMap Nominatim tool [1]_ (default) or the Google geocoding API [2]_, which requires a specified API key. This is intended as a quick convenience function to get easy access to locations. If you need to specify a precise location, you should use the initializer directly and pass in a longitude, latitude, and elevation. In the background, this just issues a web query to either of the APIs noted above. This is not meant to be abused! Both OpenStreetMap and Google use IP-based query limiting and will ban your IP if you send more than a few thousand queries per hour [2]_. .. warning:: If the query returns more than one location (e.g., searching on ``address='springfield'``), this function will use the **first** returned location. Parameters ---------- address : str The address to get the location for. As per the Google maps API, this can be a fully specified street address (e.g., 123 Main St., New York, NY) or a city name (e.g., Danbury, CT), or etc. get_height : bool, optional This only works when using the Google API! See the ``google_api_key`` block below. Use the retrieved location to perform a second query to the Google maps elevation API to retrieve the height of the input address [3]_. google_api_key : str, optional A Google API key with the Geocoding API and (optionally) the elevation API enabled. See [4]_ for more information. Returns ------- location : This class (a `~astropy.coordinates.EarthLocation` or subclass) The location of the input address. References ---------- .. [1] https://nominatim.openstreetmap.org/ .. [2] https://developers.google.com/maps/documentation/geocoding/start .. [3] https://developers.google.com/maps/documentation/elevation/start .. [4] https://developers.google.com/maps/documentation/geocoding/get-api-key # Fail fast if invalid options are passed: # Google # OpenStreetMap # get longitude and latitude location # strings are returned by OpenStreetMap Get list of names of observatories for use with `~astropy.coordinates.EarthLocation.of_site`. .. note:: When this function is called, it will first attempt to download site information from the astropy data server. If it cannot (i.e., an internet connection is not available), it will fall back on the list included with astropy (which is a limited and dated set of sites). If you think a site should be added, issue a pull request to the `astropy-data repository <https://github.com/astropy/astropy-data>`_ . Returns ------- names : list of str List of valid observatory names See Also -------- of_site : Gets the actual location object for one of the sites names this returns. Gets the site registry. The first time this either downloads or loads from the data file packaged with astropy. Subsequent calls will use the cached version unless explicitly overridden. Parameters ---------- force_download : bool or str If not False, force replacement of the cached registry with a downloaded version. If a str, that will be used as the URL to download from (if just True, the default URL will be used). force_builtin : bool If True, load from the data file bundled with astropy and set the cache to that. returns ------- reg : astropy.coordinates.sites.SiteRegistry # need to do this here at the bottom to avoid circular dependencies The default ellipsoid used to convert to geodetic coordinates. Convert to geodetic coordinates for the default ellipsoid. Convert to geodetic coordinates. Parameters ---------- ellipsoid : str, optional Reference ellipsoid to use. Default is the one the coordinates were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72' Returns ------- (lon, lat, height) : tuple The tuple contains instances of `~astropy.coordinates.Longitude`, `~astropy.coordinates.Latitude`, and `~astropy.units.Quantity` Raises ------ ValueError if ``ellipsoid`` is not recognized as among the ones implemented. Notes ----- For the conversion to geodetic coordinates, the ERFA routine ``gc2gd`` is used. See https://github.com/liberfa/erfa Longitude of the location, for the default ellipsoid. Longitude of the location, for the default ellipsoid. Height of the location, for the default ellipsoid. # mostly for symmetry with geodetic and to_geodetic. Convert to a tuple with X, Y, and Z as quantities Convert to a tuple with X, Y, and Z as quantities Generates an `~astropy.coordinates.ITRS` object with the location of this object at the requested ``obstime``. Parameters ---------- obstime : `~astropy.time.Time` or None The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or if None, the default ``obstime`` will be used. Returns ------- itrs : `~astropy.coordinates.ITRS` The new object in the ITRS frame # Broadcast for a single position at multiple times, but don't attempt # to be more general here. # do this here to prevent a series of complicated circular imports An `~astropy.coordinates.ITRS` object with for the location of this object at the default ``obstime``. GCRS position with velocity at ``obstime`` as a GCRS coordinate. Parameters ---------- obstime : `~astropy.time.Time` The ``obstime`` to calculate the GCRS position/velocity at. Returns -------- gcrs : `~astropy.coordinates.GCRS` instance With velocity included. # do this here to prevent a series of complicated circular imports # Assume the observatory itself is fixed on the ground. # We do a direct assignment rather than an update to avoid validation # and creation of a new object. Calculate the GCRS position and velocity of this object at the requested ``obstime``. Parameters ---------- obstime : `~astropy.time.Time` The ``obstime`` to calculate the GCRS position/velocity at. Returns -------- obsgeoloc : `~astropy.coordinates.CartesianRepresentation` The GCRS position of the object obsgeovel : `~astropy.coordinates.CartesianRepresentation` The GCRS velocity of the object # GCRS position Return the gravitational redshift at this EarthLocation. Calculates the gravitational redshift, of order 3 m/s, due to the requested solar system bodies. Parameters ---------- obstime : `~astropy.time.Time` The ``obstime`` to calculate the redshift at. bodies : iterable, optional The bodies (other than the Earth) to include in the redshift calculation. List elements should be any body name `get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and the Moon. Earth is always included (because the class represents an *Earth* location). masses : dict of str to Quantity, optional The mass or gravitational parameters (G * mass) to assume for the bodies requested in ``bodies``. Can be used to override the defaults for the Sun, Jupiter, the Moon, and the Earth, or to pass in masses for other bodies. Returns -------- redshift : `~astropy.units.Quantity` Gravitational redshift in velocity units at given obstime. # needs to be here to avoid circular imports # Ensure earth is included and last in the list. # Calculate distances to objects other than earth. # Append distance from Earth's center for Earth's contribution. # Get redshifts due to all objects. # Reverse order of summing, to go from small to big, and to get # "earth" first, which gives m/s as unit. The X component of the geocentric coordinates. The Y component of the geocentric coordinates. The Z component of the geocentric coordinates. Helper method for to and to_value. # Conversion to another unit in both ``to`` and ``to_value`` goes # via this routine. To make the regular quantity routines work, we # temporarily turn the structured array into a regular one.
| 2.631964
| 3
|
python/setup.py
|
OpenNMT/CTranslate2
| 259
|
6628860
|
import os
import sys
import pybind11
from setuptools import setup, find_packages, Extension
base_dir = os.path.dirname(os.path.abspath(__file__))
include_dirs = [pybind11.get_include()]
library_dirs = []
def _get_long_description():
readme_path = os.path.join(base_dir, "README.md")
if not os.path.exists(readme_path):
return ""
with open(readme_path, encoding="utf-8") as readme_file:
return readme_file.read()
def _get_project_version():
version_path = os.path.join(base_dir, "ctranslate2", "version.py")
version = {}
with open(version_path, encoding="utf-8") as fp:
exec(fp.read(), version)
return version["__version__"]
def _maybe_add_library_root(lib_name):
if "%s_ROOT" % lib_name in os.environ:
root = os.environ["%s_ROOT" % lib_name]
include_dirs.append("%s/include" % root)
for lib_dir in ("lib", "lib64"):
path = "%s/%s" % (root, lib_dir)
if os.path.exists(path):
library_dirs.append(path)
break
_maybe_add_library_root("CTRANSLATE2")
cflags = ["-std=c++17"]
ldflags = []
if sys.platform == "darwin":
# std::visit requires macOS 10.14
cflags.append("-mmacosx-version-min=10.14")
ldflags.append("-Wl,-rpath,/usr/local/lib")
ctranslate2_module = Extension(
"ctranslate2.translator",
sources=["translator.cc"],
extra_compile_args=cflags,
extra_link_args=ldflags,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=["ctranslate2"],
)
setup(
name="ctranslate2",
version=_get_project_version(),
license="MIT",
description="Fast inference engine for Transformer models",
long_description=_get_long_description(),
long_description_content_type="text/markdown",
author="OpenNMT",
author_email="<EMAIL>",
url="https://opennmt.net",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: GPU :: NVIDIA CUDA :: 11.0",
"Environment :: GPU :: NVIDIA CUDA :: 11.1",
"Environment :: GPU :: NVIDIA CUDA :: 11.2",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
project_urls={
"Forum": "https://forum.opennmt.net",
"Gitter": "https://gitter.im/OpenNMT/CTranslate2",
"Source": "https://github.com/OpenNMT/CTranslate2",
},
keywords="opennmt nmt neural machine translation cuda mkl inference quantization",
packages=find_packages(exclude=["bin"]),
ext_modules=[ctranslate2_module],
python_requires=">=3.6,<3.11",
install_requires=[
"numpy",
],
entry_points={
"console_scripts": [
"ct2-fairseq-converter=ctranslate2.converters.fairseq:main",
"ct2-opennmt-py-converter=ctranslate2.converters.opennmt_py:main",
"ct2-opennmt-tf-converter=ctranslate2.converters.opennmt_tf:main",
],
},
)
|
import os
import sys
import pybind11
from setuptools import setup, find_packages, Extension
base_dir = os.path.dirname(os.path.abspath(__file__))
include_dirs = [pybind11.get_include()]
library_dirs = []
def _get_long_description():
readme_path = os.path.join(base_dir, "README.md")
if not os.path.exists(readme_path):
return ""
with open(readme_path, encoding="utf-8") as readme_file:
return readme_file.read()
def _get_project_version():
version_path = os.path.join(base_dir, "ctranslate2", "version.py")
version = {}
with open(version_path, encoding="utf-8") as fp:
exec(fp.read(), version)
return version["__version__"]
def _maybe_add_library_root(lib_name):
if "%s_ROOT" % lib_name in os.environ:
root = os.environ["%s_ROOT" % lib_name]
include_dirs.append("%s/include" % root)
for lib_dir in ("lib", "lib64"):
path = "%s/%s" % (root, lib_dir)
if os.path.exists(path):
library_dirs.append(path)
break
_maybe_add_library_root("CTRANSLATE2")
cflags = ["-std=c++17"]
ldflags = []
if sys.platform == "darwin":
# std::visit requires macOS 10.14
cflags.append("-mmacosx-version-min=10.14")
ldflags.append("-Wl,-rpath,/usr/local/lib")
ctranslate2_module = Extension(
"ctranslate2.translator",
sources=["translator.cc"],
extra_compile_args=cflags,
extra_link_args=ldflags,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=["ctranslate2"],
)
setup(
name="ctranslate2",
version=_get_project_version(),
license="MIT",
description="Fast inference engine for Transformer models",
long_description=_get_long_description(),
long_description_content_type="text/markdown",
author="OpenNMT",
author_email="<EMAIL>",
url="https://opennmt.net",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: GPU :: NVIDIA CUDA :: 11.0",
"Environment :: GPU :: NVIDIA CUDA :: 11.1",
"Environment :: GPU :: NVIDIA CUDA :: 11.2",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
project_urls={
"Forum": "https://forum.opennmt.net",
"Gitter": "https://gitter.im/OpenNMT/CTranslate2",
"Source": "https://github.com/OpenNMT/CTranslate2",
},
keywords="opennmt nmt neural machine translation cuda mkl inference quantization",
packages=find_packages(exclude=["bin"]),
ext_modules=[ctranslate2_module],
python_requires=">=3.6,<3.11",
install_requires=[
"numpy",
],
entry_points={
"console_scripts": [
"ct2-fairseq-converter=ctranslate2.converters.fairseq:main",
"ct2-opennmt-py-converter=ctranslate2.converters.opennmt_py:main",
"ct2-opennmt-tf-converter=ctranslate2.converters.opennmt_tf:main",
],
},
)
|
en
| 0.228905
|
# std::visit requires macOS 10.14
| 1.986231
| 2
|
tests/fixes/test_base_class.py
|
dmendelsohn/pytestify
| 12
|
6628861
|
import pytest
from pytestify.fixes.base_class import remove_base_class
@pytest.mark.parametrize(
'before, after', [
('class Cls(unittest.TestCase): pass', 'class TestCls: pass'),
('class Cls(TestCase): pass', 'class TestCls: pass'),
('class TestCls(unittest.TestCase): pass', 'class TestCls: pass'),
('class TestCls(TestCase): pass', 'class TestCls: pass'),
('class ClsTests(TestCase): pass', 'class TestCls: pass'),
('class ClsTest(TestCase): pass', 'class TestCls: pass'),
(
'class ThingTestCase(unittest.TestCase): pass',
'class TestThing: pass',
),
('class ThingTestCase(TestCase): pass', 'class TestThing: pass'),
],
)
def test_remove_base_class(before, after):
imports = (
'import unittest\n' +
'from unittest import TestCase\n\n'
)
assert remove_base_class(imports + before) == imports + after
@pytest.mark.parametrize(
'line', [
'# class Cls(unittest.TestClass):',
'class Cls: pass',
'class TestCls: pass',
],
)
def test_doesnt_remove_base_class(line):
assert remove_base_class(line) == line
|
import pytest
from pytestify.fixes.base_class import remove_base_class
@pytest.mark.parametrize(
'before, after', [
('class Cls(unittest.TestCase): pass', 'class TestCls: pass'),
('class Cls(TestCase): pass', 'class TestCls: pass'),
('class TestCls(unittest.TestCase): pass', 'class TestCls: pass'),
('class TestCls(TestCase): pass', 'class TestCls: pass'),
('class ClsTests(TestCase): pass', 'class TestCls: pass'),
('class ClsTest(TestCase): pass', 'class TestCls: pass'),
(
'class ThingTestCase(unittest.TestCase): pass',
'class TestThing: pass',
),
('class ThingTestCase(TestCase): pass', 'class TestThing: pass'),
],
)
def test_remove_base_class(before, after):
imports = (
'import unittest\n' +
'from unittest import TestCase\n\n'
)
assert remove_base_class(imports + before) == imports + after
@pytest.mark.parametrize(
'line', [
'# class Cls(unittest.TestClass):',
'class Cls: pass',
'class TestCls: pass',
],
)
def test_doesnt_remove_base_class(line):
assert remove_base_class(line) == line
|
none
| 1
| 2.619202
| 3
|
|
4.select/5.Mm.get_hits_dynamic_e.py
|
oaxiom/episcan
| 0
|
6628862
|
#!/usr/bin/env python3
import numpy, pickle
from glbase3 import *
import matplotlib.pyplot as plot
'''
Round 1, For each domain, work out a dynamic threshold for each motif, and discard motifs that are useless
'''
final_results = {}
model_matrix = glload('../3.model/AUCtable.glb')
dynamicE = {d['domain']: float(d['e']) for d in model_matrix}
epifactors_filtered = glload('../1.extract_epifactors_FASTA/mm_epifactors.all.glb').removeDuplicates('ensg')
#########
hmmer_search = genelist(filename='Mm.gencode.txt', format=format.hmmer_domtbl)
matches = shared.get_dynamic_e(hmmer_search, dynamicE)
# add wether it is in Epifactors DB, or not;
gl = genelist()
gl.load_list(matches)
gl.saveTSV('Mm.matches.tsv', key_order=['ensg', 'ensp', 'name'])
gl.save('Mm.matches.glb')
# Filtered result
episcan = set(gl.removeDuplicates('name')['name'])
epifactors = set(epifactors_filtered.removeDuplicates('name')['name'])
epifactors_only = epifactors.difference(episcan)
both = epifactors.intersection(episcan)
episcan_only = episcan.difference(epifactors)
print('Mm10: Episcan ( {0} ( {1} ) {2} ) Epifactors Filtered'.format(len(episcan_only), len(both), len(epifactors_only)))
with open('mm_result_filtered_epifactors_only.txt', 'wt') as oh:
for name in sorted(epifactors_only):
oh.write('{0}\n'.format(name))
with open('mm_result_filtered_both.txt', 'wt') as oh:
for name in sorted(both):
oh.write('{0}\n'.format(name))
with open('mm_result_filtered_episcan_only.txt', 'wt') as oh:
for name in sorted(episcan_only):
oh.write('{0}\n'.format(name))
|
#!/usr/bin/env python3
import numpy, pickle
from glbase3 import *
import matplotlib.pyplot as plot
'''
Round 1, For each domain, work out a dynamic threshold for each motif, and discard motifs that are useless
'''
final_results = {}
model_matrix = glload('../3.model/AUCtable.glb')
dynamicE = {d['domain']: float(d['e']) for d in model_matrix}
epifactors_filtered = glload('../1.extract_epifactors_FASTA/mm_epifactors.all.glb').removeDuplicates('ensg')
#########
hmmer_search = genelist(filename='Mm.gencode.txt', format=format.hmmer_domtbl)
matches = shared.get_dynamic_e(hmmer_search, dynamicE)
# add wether it is in Epifactors DB, or not;
gl = genelist()
gl.load_list(matches)
gl.saveTSV('Mm.matches.tsv', key_order=['ensg', 'ensp', 'name'])
gl.save('Mm.matches.glb')
# Filtered result
episcan = set(gl.removeDuplicates('name')['name'])
epifactors = set(epifactors_filtered.removeDuplicates('name')['name'])
epifactors_only = epifactors.difference(episcan)
both = epifactors.intersection(episcan)
episcan_only = episcan.difference(epifactors)
print('Mm10: Episcan ( {0} ( {1} ) {2} ) Epifactors Filtered'.format(len(episcan_only), len(both), len(epifactors_only)))
with open('mm_result_filtered_epifactors_only.txt', 'wt') as oh:
for name in sorted(epifactors_only):
oh.write('{0}\n'.format(name))
with open('mm_result_filtered_both.txt', 'wt') as oh:
for name in sorted(both):
oh.write('{0}\n'.format(name))
with open('mm_result_filtered_episcan_only.txt', 'wt') as oh:
for name in sorted(episcan_only):
oh.write('{0}\n'.format(name))
|
en
| 0.659064
|
#!/usr/bin/env python3 Round 1, For each domain, work out a dynamic threshold for each motif, and discard motifs that are useless ######### # add wether it is in Epifactors DB, or not; # Filtered result
| 2.307957
| 2
|
enhanced_rds/metric_maps.py
|
theletterf/enhanced-rds-monitoring
| 5
|
6628863
|
"""
The default structure(s) of the metric payloads delivered to the Lambda.
Note that the Aurora section only contains the structures in which there is
meaningful difference from the standard version.
"""
# Standard set of metric info
METRICS = [
'cpuUtilization',
'diskIO',
'fileSys',
'loadAverageMinute',
'memory',
'network',
'swap',
'tasks',
'OSprocesses',
'RDSprocesses'
]
PROCESS_METRICS = [
'vss',
'rss',
'memoryUsedPc',
'cpuUsedPc'
]
METRICS_DIMS = {
'diskIO': ['device'],
'fileSys': ['name', 'mountPoint'],
'network': ['interface']
}
# Metric info for Aurora instances.
METRICS_AURORA_DIMS = {
'diskIO': [], # Workaround to account for Aurora diskIO metrics
'fileSys': ['name', 'mountPoint'],
'network': ['interface']
}
# Metric info for Microsoft SQL instances.
METRICS_MICROSOFT = [
'cpuUtilization',
'disks',
'memory',
'network',
'OSprocesses',
'RDSprocesses',
'system'
]
PROCESS_METRICS_MICROSOFT = [
'cpuUsedPc',
'memUsedPc',
'workingSetKb',
'workingSetPrivKb',
'workingSetShareableKb',
'virtKb'
]
METRICS_MICROSOFT_DIMS = {
'disks': ['name'],
'network': ['interface']
}
|
"""
The default structure(s) of the metric payloads delivered to the Lambda.
Note that the Aurora section only contains the structures in which there is
meaningful difference from the standard version.
"""
# Standard set of metric info
METRICS = [
'cpuUtilization',
'diskIO',
'fileSys',
'loadAverageMinute',
'memory',
'network',
'swap',
'tasks',
'OSprocesses',
'RDSprocesses'
]
PROCESS_METRICS = [
'vss',
'rss',
'memoryUsedPc',
'cpuUsedPc'
]
METRICS_DIMS = {
'diskIO': ['device'],
'fileSys': ['name', 'mountPoint'],
'network': ['interface']
}
# Metric info for Aurora instances.
METRICS_AURORA_DIMS = {
'diskIO': [], # Workaround to account for Aurora diskIO metrics
'fileSys': ['name', 'mountPoint'],
'network': ['interface']
}
# Metric info for Microsoft SQL instances.
METRICS_MICROSOFT = [
'cpuUtilization',
'disks',
'memory',
'network',
'OSprocesses',
'RDSprocesses',
'system'
]
PROCESS_METRICS_MICROSOFT = [
'cpuUsedPc',
'memUsedPc',
'workingSetKb',
'workingSetPrivKb',
'workingSetShareableKb',
'virtKb'
]
METRICS_MICROSOFT_DIMS = {
'disks': ['name'],
'network': ['interface']
}
|
en
| 0.823701
|
The default structure(s) of the metric payloads delivered to the Lambda. Note that the Aurora section only contains the structures in which there is meaningful difference from the standard version. # Standard set of metric info # Metric info for Aurora instances. # Workaround to account for Aurora diskIO metrics # Metric info for Microsoft SQL instances.
| 1.598426
| 2
|
garage/tf/core/mlp.py
|
XavierJingfeng/starter
| 0
|
6628864
|
"""MLP model in TensorFlow."""
import tensorflow as tf
def mlp(input_var,
output_dim,
hidden_sizes,
name,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.glorot_uniform_initializer,
hidden_b_init=tf.zeros_initializer,
output_nonlinearity=None,
output_w_init=tf.glorot_uniform_initializer,
output_b_init=tf.zeros_initializer,
layer_normalization=False):
"""
MLP model.
Args:
input_var: Input tf.Tensor to the MLP.
output_dim: Dimension of the network output.
hidden_sizes: Output dimension of dense layer(s).
name: variable scope of the mlp.
hidden_nonlinearity: Activation function for
intermediate dense layer(s).
hidden_w_init: Initializer function for the weight
of intermediate dense layer(s).
hidden_b_init: Initializer function for the bias
of intermediate dense layer(s).
output_nonlinearity: Activation function for
output dense layer.
output_w_init: Initializer function for the weight
of output dense layer(s).
output_b_init: Initializer function for the bias
of output dense layer(s).
layer_normalization: Bool for using layer normalization or not.
Return:
The output tf.Tensor of the MLP
"""
with tf.variable_scope(name):
l_hid = input_var
for idx, hidden_size in enumerate(hidden_sizes):
l_hid = tf.layers.dense(
inputs=l_hid,
units=hidden_size,
activation=hidden_nonlinearity,
kernel_initializer=hidden_w_init,
bias_initializer=hidden_b_init,
name="hidden_{}".format(idx))
if layer_normalization:
l_hid = tf.contrib.layers.layer_norm(l_hid)
l_out = tf.layers.dense(
inputs=l_hid,
units=output_dim,
activation=output_nonlinearity,
kernel_initializer=output_w_init,
bias_initializer=output_b_init,
name="output")
return l_out
|
"""MLP model in TensorFlow."""
import tensorflow as tf
def mlp(input_var,
output_dim,
hidden_sizes,
name,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.glorot_uniform_initializer,
hidden_b_init=tf.zeros_initializer,
output_nonlinearity=None,
output_w_init=tf.glorot_uniform_initializer,
output_b_init=tf.zeros_initializer,
layer_normalization=False):
"""
MLP model.
Args:
input_var: Input tf.Tensor to the MLP.
output_dim: Dimension of the network output.
hidden_sizes: Output dimension of dense layer(s).
name: variable scope of the mlp.
hidden_nonlinearity: Activation function for
intermediate dense layer(s).
hidden_w_init: Initializer function for the weight
of intermediate dense layer(s).
hidden_b_init: Initializer function for the bias
of intermediate dense layer(s).
output_nonlinearity: Activation function for
output dense layer.
output_w_init: Initializer function for the weight
of output dense layer(s).
output_b_init: Initializer function for the bias
of output dense layer(s).
layer_normalization: Bool for using layer normalization or not.
Return:
The output tf.Tensor of the MLP
"""
with tf.variable_scope(name):
l_hid = input_var
for idx, hidden_size in enumerate(hidden_sizes):
l_hid = tf.layers.dense(
inputs=l_hid,
units=hidden_size,
activation=hidden_nonlinearity,
kernel_initializer=hidden_w_init,
bias_initializer=hidden_b_init,
name="hidden_{}".format(idx))
if layer_normalization:
l_hid = tf.contrib.layers.layer_norm(l_hid)
l_out = tf.layers.dense(
inputs=l_hid,
units=output_dim,
activation=output_nonlinearity,
kernel_initializer=output_w_init,
bias_initializer=output_b_init,
name="output")
return l_out
|
en
| 0.541819
|
MLP model in TensorFlow. MLP model. Args: input_var: Input tf.Tensor to the MLP. output_dim: Dimension of the network output. hidden_sizes: Output dimension of dense layer(s). name: variable scope of the mlp. hidden_nonlinearity: Activation function for intermediate dense layer(s). hidden_w_init: Initializer function for the weight of intermediate dense layer(s). hidden_b_init: Initializer function for the bias of intermediate dense layer(s). output_nonlinearity: Activation function for output dense layer. output_w_init: Initializer function for the weight of output dense layer(s). output_b_init: Initializer function for the bias of output dense layer(s). layer_normalization: Bool for using layer normalization or not. Return: The output tf.Tensor of the MLP
| 3.297006
| 3
|
data/external/repositories_2to3/113677/KaggleBillionWordImputation-master/scripts/compare_pos.py
|
Keesiu/meta-kaggle
| 0
|
6628865
|
#!/usr/bin/env python
'''
Compare POS tags to a gold standard.
'''
import sys, argparse, pickle
from collections import defaultdict
from util import tokenize_words, pos_tag
def opts():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('sample', type=argparse.FileType('r'),
help='POS-tagged sentences')
parser.add_argument('removed', type=argparse.FileType('r'),
help='File with indices of removed words in gold')
parser.add_argument('gold', type=argparse.FileType('r'),
help='Gold-standard POS-tagged sentences')
parser.add_argument('errors', type=argparse.FileType('w'),
help='Pickle file with errors broken down by POS tag')
return parser
if __name__ == "__main__":
args = opts().parse_args()
counts = defaultdict(lambda: defaultdict(int))
nerrors = 0
nsentences = 0
for sentence, ref_sentence, i_removed in zip(args.sample, args.gold, args.removed):
try:
i_removed = int(i_removed)
words = tokenize_words(sentence)
ref_words = tokenize_words(ref_sentence)
assert len(words) == len(ref_words)-1
pos = list(map(pos_tag, words))
ref_pos = list(map(pos_tag, ref_words))
has_error = False
for i in range(i_removed):
counts[pos[i]][ref_pos[i]] += 1
has_error |= (pos[i] != ref_pos[i])
for i in range(i_removed, len(words)):
counts[pos[i]][ref_pos[i+1]] += 1
has_error |= (pos[i] != ref_pos[i+1])
if has_error: nerrors += 1
nsentences += 1
except Exception as e:
print("Error processing: %s" % e, file=sys.stderr)
sys.stderr.write(ref_sentence)
print(sentence, file=sys.stderr)
print("Found %d/%d sentences with POS-tag errors" \
% (nerrors, nsentences), file=sys.stderr)
pickle.dump(dict(counts), args.errors)
|
#!/usr/bin/env python
'''
Compare POS tags to a gold standard.
'''
import sys, argparse, pickle
from collections import defaultdict
from util import tokenize_words, pos_tag
def opts():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('sample', type=argparse.FileType('r'),
help='POS-tagged sentences')
parser.add_argument('removed', type=argparse.FileType('r'),
help='File with indices of removed words in gold')
parser.add_argument('gold', type=argparse.FileType('r'),
help='Gold-standard POS-tagged sentences')
parser.add_argument('errors', type=argparse.FileType('w'),
help='Pickle file with errors broken down by POS tag')
return parser
if __name__ == "__main__":
args = opts().parse_args()
counts = defaultdict(lambda: defaultdict(int))
nerrors = 0
nsentences = 0
for sentence, ref_sentence, i_removed in zip(args.sample, args.gold, args.removed):
try:
i_removed = int(i_removed)
words = tokenize_words(sentence)
ref_words = tokenize_words(ref_sentence)
assert len(words) == len(ref_words)-1
pos = list(map(pos_tag, words))
ref_pos = list(map(pos_tag, ref_words))
has_error = False
for i in range(i_removed):
counts[pos[i]][ref_pos[i]] += 1
has_error |= (pos[i] != ref_pos[i])
for i in range(i_removed, len(words)):
counts[pos[i]][ref_pos[i+1]] += 1
has_error |= (pos[i] != ref_pos[i+1])
if has_error: nerrors += 1
nsentences += 1
except Exception as e:
print("Error processing: %s" % e, file=sys.stderr)
sys.stderr.write(ref_sentence)
print(sentence, file=sys.stderr)
print("Found %d/%d sentences with POS-tag errors" \
% (nerrors, nsentences), file=sys.stderr)
pickle.dump(dict(counts), args.errors)
|
en
| 0.263353
|
#!/usr/bin/env python Compare POS tags to a gold standard.
| 3.045237
| 3
|
backend/utils/middlewares.py
|
matrixhawk/eastmoney
| 31
|
6628866
|
import json
import math
import time
import functools
from json import JSONDecodeError
from django.urls import resolve
from apps.log.models import Log
def SaveLogMiddleware(func):
@functools.wraps(func)
def _inner(request, *args, **kwargs):
start = time.time()
bdata = request.body
if bdata:
try:
post_params = json.loads(bdata.decode('utf-8'))
except JSONDecodeError:
post_params = {}
else:
post_params = {}
res = func(request, *args, **kwargs)
end = time.time()
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
# 这里是真实的ip
ip_addr = x_forwarded_for.split(',')[0]
else:
# 这里是代理ip
ip_addr = request.META.get('REMOTE_ADDR')
req_method = request.method
api_path = request.path
if api_path == "/log/log/":
return res
get_params = request.GET
comments = resolve(api_path).url_name
params_list = []
if get_params:
for k, v in dict(get_params).items():
params_list.append(k + ":" + str(v))
if post_params:
for x, y in post_params.items():
params_list.append(x + ":" + str(y))
keep_time = math.ceil((end - start) * 1000)
status_content = ""
try:
status_content = res.status_text
except:
pass
Log.objects.create(api_name=api_path,
method=req_method,
params=",".join(params_list),
comments=comments,
time=keep_time,
ip=ip_addr,
username=request.user,
status_code=res.status_code,
status_text=status_content
)
return res
return _inner
|
import json
import math
import time
import functools
from json import JSONDecodeError
from django.urls import resolve
from apps.log.models import Log
def SaveLogMiddleware(func):
@functools.wraps(func)
def _inner(request, *args, **kwargs):
start = time.time()
bdata = request.body
if bdata:
try:
post_params = json.loads(bdata.decode('utf-8'))
except JSONDecodeError:
post_params = {}
else:
post_params = {}
res = func(request, *args, **kwargs)
end = time.time()
x_forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if x_forwarded_for:
# 这里是真实的ip
ip_addr = x_forwarded_for.split(',')[0]
else:
# 这里是代理ip
ip_addr = request.META.get('REMOTE_ADDR')
req_method = request.method
api_path = request.path
if api_path == "/log/log/":
return res
get_params = request.GET
comments = resolve(api_path).url_name
params_list = []
if get_params:
for k, v in dict(get_params).items():
params_list.append(k + ":" + str(v))
if post_params:
for x, y in post_params.items():
params_list.append(x + ":" + str(y))
keep_time = math.ceil((end - start) * 1000)
status_content = ""
try:
status_content = res.status_text
except:
pass
Log.objects.create(api_name=api_path,
method=req_method,
params=",".join(params_list),
comments=comments,
time=keep_time,
ip=ip_addr,
username=request.user,
status_code=res.status_code,
status_text=status_content
)
return res
return _inner
|
zh
| 0.976061
|
# 这里是真实的ip # 这里是代理ip
| 2.146899
| 2
|
mundo2/parte3/parteb/ex068.py
|
fcdennis/CursoPython
| 0
|
6628867
|
from random import randint
print('=-' * 13)
print('VAMOS JOGAR PAR OU ÍMPAR')
print('=-' * 13)
placar = 0
while True:
computador = randint(0, 9)
jogador = int(input('Diga um valor? '))
total = computador + jogador
pergunta = input('Par ou Ímpar? [P / I] ')
while pergunta not in 'PpIi':
pergunta = input('Par ou Ímpar? [P / I] ')
print(f'O computador jogou {computador} e você jogou {jogador}.')
print(f'O total é {total}.')
if pergunta in 'Pp' and total % 2 == 0 or pergunta in 'Ii' and total % 2 != 0:
print('Você VENCEU!')
placar += 1
else:
print('Você PERDEU')
continuar = input('Quer jogar de novo? [S/N] ')
while continuar not in 'SsNn':
continuar = input('Quer jogar de novo? [S/N] ')
if continuar in 'Nn':
break
print(f'GAME OVER! Você venceu {placar} vezes.')
|
from random import randint
print('=-' * 13)
print('VAMOS JOGAR PAR OU ÍMPAR')
print('=-' * 13)
placar = 0
while True:
computador = randint(0, 9)
jogador = int(input('Diga um valor? '))
total = computador + jogador
pergunta = input('Par ou Ímpar? [P / I] ')
while pergunta not in 'PpIi':
pergunta = input('Par ou Ímpar? [P / I] ')
print(f'O computador jogou {computador} e você jogou {jogador}.')
print(f'O total é {total}.')
if pergunta in 'Pp' and total % 2 == 0 or pergunta in 'Ii' and total % 2 != 0:
print('Você VENCEU!')
placar += 1
else:
print('Você PERDEU')
continuar = input('Quer jogar de novo? [S/N] ')
while continuar not in 'SsNn':
continuar = input('Quer jogar de novo? [S/N] ')
if continuar in 'Nn':
break
print(f'GAME OVER! Você venceu {placar} vezes.')
|
none
| 1
| 3.821515
| 4
|
|
work/wamplet1/wamplet1/component1.py
|
haizaar/crossbar-examples
| 97
|
6628868
|
<reponame>haizaar/crossbar-examples
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import datetime
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from calculator import Calculator
# WAMP application component with our app code.
##
class Component1(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
# register a function that can be called remotely
##
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
reg = yield self.register(utcnow, 'com.timeservice.now')
print("Procedure registered with ID {}".format(reg.id))
# create an application object that exposes methods for remoting
##
self.calculator = Calculator()
# register all methods on the "calculator" decorated with "@wamp.register"
##
results = yield self.register(self.calculator)
for success, res in results:
if success:
print("Ok, registered procedure with registration ID {}".format(res.id))
else:
print("Failed to register procedure: {}".format(res.value))
def onDisconnect(self):
reactor.stop()
def make(config):
##
# This component factory creates instances of the
# application component to run.
##
# The function will get called either during development
# using the ApplicationRunner below, or as a plugin running
# hosted in a WAMPlet container such as a Crossbar.io worker.
##
if config:
return Component1(config)
else:
# if no config given, return a description of this WAMPlet ..
return {'label': 'Awesome WAMPlet 1',
'description': 'This is just a test WAMPlet that provides some procedures to call.'}
if __name__ == '__main__':
from autobahn.twisted.wamp import ApplicationRunner
# test drive the component during development ..
runner = ApplicationRunner(
url="ws://1172.16.58.3:8080/ws",
realm="realm1",
debug=False, # low-level WebSocket debugging
debug_wamp=False, # WAMP protocol-level debugging
debug_app=True) # app-level debugging
runner.run(make)
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import datetime
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from calculator import Calculator
# WAMP application component with our app code.
##
class Component1(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
# register a function that can be called remotely
##
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
reg = yield self.register(utcnow, 'com.timeservice.now')
print("Procedure registered with ID {}".format(reg.id))
# create an application object that exposes methods for remoting
##
self.calculator = Calculator()
# register all methods on the "calculator" decorated with "@wamp.register"
##
results = yield self.register(self.calculator)
for success, res in results:
if success:
print("Ok, registered procedure with registration ID {}".format(res.id))
else:
print("Failed to register procedure: {}".format(res.value))
def onDisconnect(self):
reactor.stop()
def make(config):
##
# This component factory creates instances of the
# application component to run.
##
# The function will get called either during development
# using the ApplicationRunner below, or as a plugin running
# hosted in a WAMPlet container such as a Crossbar.io worker.
##
if config:
return Component1(config)
else:
# if no config given, return a description of this WAMPlet ..
return {'label': 'Awesome WAMPlet 1',
'description': 'This is just a test WAMPlet that provides some procedures to call.'}
if __name__ == '__main__':
from autobahn.twisted.wamp import ApplicationRunner
# test drive the component during development ..
runner = ApplicationRunner(
url="ws://1172.16.58.3:8080/ws",
realm="realm1",
debug=False, # low-level WebSocket debugging
debug_wamp=False, # WAMP protocol-level debugging
debug_app=True) # app-level debugging
runner.run(make)
|
en
| 0.731007
|
############################################################################### # # The MIT License (MIT) # # Copyright (c) Tavendo GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### # WAMP application component with our app code. ## # register a function that can be called remotely ## # create an application object that exposes methods for remoting ## # register all methods on the "calculator" decorated with "@wamp.register" ## ## # This component factory creates instances of the # application component to run. ## # The function will get called either during development # using the ApplicationRunner below, or as a plugin running # hosted in a WAMPlet container such as a Crossbar.io worker. ## # if no config given, return a description of this WAMPlet .. # test drive the component during development .. # low-level WebSocket debugging # WAMP protocol-level debugging # app-level debugging
| 1.749801
| 2
|
fairseq/models/pronouns.py
|
liufly/refreader
| 19
|
6628869
|
from collections import defaultdict
# name list obtained from: https://www.ssa.gov/oact/babynames/decades/century.html
# accessed on Nov 6th, 2018
class PronounLexicon():
def __init__(self, lexfile='pronouns.tsv'):
self.lexicon = defaultdict(lambda : [])
with open(lexfile) as fin:
for line in fin:
if len(line) > 2:
word = line.split()[0]
feats = dict(x.split('=') for x in line.split()[1].split(','))
for feat,val in feats.items():
self.lexicon['='.join([feat,val])].append(word)
print(f"Read lexicon from {lexfile}:\n{self.lexicon}")
def make_lex(self,feature,dictionary):
'''
given a fairseq dictionary, export a list of word idxs that match a desired feature
'''
return [idx for word,idx in dictionary.indices.items() if word.lower() in self.lexicon[feature]]
def all_word_idxs(self,dictionary):
return [idx for word,idx in dictionary.indices.items() if word.lower() in self.all_words()]
def all_words(self):
output = set()
for subset in self.lexicon.values():
for word in subset:
output.add(word)
return output
def get_feature_set(self, feature_set):
output = set()
for t in feature_set:
output |= set(self.lexicon[t])
return output
def annotate_feature_chunk_end(self, sentence, chunk_tags, feature_set):
pronoun_lexicons = self.get_feature_set(feature_set)
assert len(sentence) == len(chunk_tags)
output = [0 for _ in range(len(sentence))]
for i, (token, chunk_tag) in enumerate(zip(sentence, chunk_tags)):
if token.lower() in pronoun_lexicons:
if chunk_tag == 'O' or chunk_tag[:2] == 'U-':
output[i] = 1
else:
chunk_type = chunk_tag[2:]
for j in range(i, len(sentence)):
end_chunk = chunk_tags[j]
assert end_chunk[2:] == chunk_type
if end_chunk[:2] == 'L-':
output[j] = 1
break
return output
def find_gaps(sentence):
gaps = []
prev, cur = -1, -1
while cur < len(marked_sentence):
if sentence[cur] == 1:
if prev != -1:
gaps.append(cur - prev)
prev = cur
cur += 1
return gaps
if __name__ == '__main__':
lex = PronounLexicon()
all_words = lex.all_words()
in_file_path = "data/CBTest/data/cbt_train.txt"
all_lens = []
all_gaps = []
with open(in_file_path) as f:
for line in f:
line = line.strip()
marked_sentence = [1 if w in all_words else 0 for w in line.split(' ')]
all_lens.append(len(marked_sentence))
# print(marked_sentence)
gaps = find_gaps(marked_sentence)
# print(gaps)
all_gaps.extend(gaps)
import numpy as np
print(np.mean(all_lens), np.std(all_lens))
print(np.mean(all_gaps), np.std(all_gaps))
# l = 32 covers 81.5% of the sentences
# l = 64 covers 98.4% of the sentences
l = 64
print(len(list(filter(lambda x: x <= l, all_lens))) / float(len(all_lens)))
# l = 10 covers 82.7% of the gaps
# l = 20 covers 97.2% of the gaps
# l = 30 covers 99.4% of the gaps
l = 20
print(len(list(filter(lambda x: x <= l, all_gaps))) / float(len(all_gaps)))
|
from collections import defaultdict
# name list obtained from: https://www.ssa.gov/oact/babynames/decades/century.html
# accessed on Nov 6th, 2018
class PronounLexicon():
def __init__(self, lexfile='pronouns.tsv'):
self.lexicon = defaultdict(lambda : [])
with open(lexfile) as fin:
for line in fin:
if len(line) > 2:
word = line.split()[0]
feats = dict(x.split('=') for x in line.split()[1].split(','))
for feat,val in feats.items():
self.lexicon['='.join([feat,val])].append(word)
print(f"Read lexicon from {lexfile}:\n{self.lexicon}")
def make_lex(self,feature,dictionary):
'''
given a fairseq dictionary, export a list of word idxs that match a desired feature
'''
return [idx for word,idx in dictionary.indices.items() if word.lower() in self.lexicon[feature]]
def all_word_idxs(self,dictionary):
return [idx for word,idx in dictionary.indices.items() if word.lower() in self.all_words()]
def all_words(self):
output = set()
for subset in self.lexicon.values():
for word in subset:
output.add(word)
return output
def get_feature_set(self, feature_set):
output = set()
for t in feature_set:
output |= set(self.lexicon[t])
return output
def annotate_feature_chunk_end(self, sentence, chunk_tags, feature_set):
pronoun_lexicons = self.get_feature_set(feature_set)
assert len(sentence) == len(chunk_tags)
output = [0 for _ in range(len(sentence))]
for i, (token, chunk_tag) in enumerate(zip(sentence, chunk_tags)):
if token.lower() in pronoun_lexicons:
if chunk_tag == 'O' or chunk_tag[:2] == 'U-':
output[i] = 1
else:
chunk_type = chunk_tag[2:]
for j in range(i, len(sentence)):
end_chunk = chunk_tags[j]
assert end_chunk[2:] == chunk_type
if end_chunk[:2] == 'L-':
output[j] = 1
break
return output
def find_gaps(sentence):
gaps = []
prev, cur = -1, -1
while cur < len(marked_sentence):
if sentence[cur] == 1:
if prev != -1:
gaps.append(cur - prev)
prev = cur
cur += 1
return gaps
if __name__ == '__main__':
lex = PronounLexicon()
all_words = lex.all_words()
in_file_path = "data/CBTest/data/cbt_train.txt"
all_lens = []
all_gaps = []
with open(in_file_path) as f:
for line in f:
line = line.strip()
marked_sentence = [1 if w in all_words else 0 for w in line.split(' ')]
all_lens.append(len(marked_sentence))
# print(marked_sentence)
gaps = find_gaps(marked_sentence)
# print(gaps)
all_gaps.extend(gaps)
import numpy as np
print(np.mean(all_lens), np.std(all_lens))
print(np.mean(all_gaps), np.std(all_gaps))
# l = 32 covers 81.5% of the sentences
# l = 64 covers 98.4% of the sentences
l = 64
print(len(list(filter(lambda x: x <= l, all_lens))) / float(len(all_lens)))
# l = 10 covers 82.7% of the gaps
# l = 20 covers 97.2% of the gaps
# l = 30 covers 99.4% of the gaps
l = 20
print(len(list(filter(lambda x: x <= l, all_gaps))) / float(len(all_gaps)))
|
en
| 0.898597
|
# name list obtained from: https://www.ssa.gov/oact/babynames/decades/century.html # accessed on Nov 6th, 2018 given a fairseq dictionary, export a list of word idxs that match a desired feature # print(marked_sentence) # print(gaps) # l = 32 covers 81.5% of the sentences # l = 64 covers 98.4% of the sentences # l = 10 covers 82.7% of the gaps # l = 20 covers 97.2% of the gaps # l = 30 covers 99.4% of the gaps
| 3.227297
| 3
|
bot/exts/evergreen/battleship.py
|
AbooMinister25/sir-lancebot
| 0
|
6628870
|
<gh_stars>0
import asyncio
import logging
import random
import re
import typing
from dataclasses import dataclass
from functools import partial
import discord
from discord.ext import commands
from bot.constants import Colours
log = logging.getLogger(__name__)
@dataclass
class Square:
"""Each square on the battleship grid - if they contain a boat and if they've been aimed at."""
boat: typing.Optional[str]
aimed: bool
Grid = typing.List[typing.List[Square]]
EmojiSet = typing.Dict[typing.Tuple[bool, bool], str]
@dataclass
class Player:
"""Each player in the game - their messages for the boards and their current grid."""
user: discord.Member
board: discord.Message
opponent_board: discord.Message
grid: Grid
# The name of the ship and its size
SHIPS = {
"Carrier": 5,
"Battleship": 4,
"Cruiser": 3,
"Submarine": 3,
"Destroyer": 2,
}
# For these two variables, the first boolean is whether the square is a ship (True) or not (False).
# The second boolean is whether the player has aimed for that square (True) or not (False)
# This is for the player's own board which shows the location of their own ships.
SHIP_EMOJIS = {
(True, True): ":fire:",
(True, False): ":ship:",
(False, True): ":anger:",
(False, False): ":ocean:",
}
# This is for the opposing player's board which only shows aimed locations.
HIDDEN_EMOJIS = {
(True, True): ":red_circle:",
(True, False): ":black_circle:",
(False, True): ":white_circle:",
(False, False): ":black_circle:",
}
# For the top row of the board
LETTERS = (
":stop_button::regional_indicator_a::regional_indicator_b::regional_indicator_c::regional_indicator_d:"
":regional_indicator_e::regional_indicator_f::regional_indicator_g::regional_indicator_h:"
":regional_indicator_i::regional_indicator_j:"
)
# For the first column of the board
NUMBERS = [
":one:",
":two:",
":three:",
":four:",
":five:",
":six:",
":seven:",
":eight:",
":nine:",
":keycap_ten:",
]
CROSS_EMOJI = "\u274e"
HAND_RAISED_EMOJI = "\U0001f64b"
class Game:
"""A Battleship Game."""
def __init__(
self,
bot: commands.Bot,
channel: discord.TextChannel,
player1: discord.Member,
player2: discord.Member
) -> None:
self.bot = bot
self.public_channel = channel
self.p1 = Player(player1, None, None, self.generate_grid())
self.p2 = Player(player2, None, None, self.generate_grid())
self.gameover: bool = False
self.turn: typing.Optional[discord.Member] = None
self.next: typing.Optional[discord.Member] = None
self.match: typing.Optional[typing.Match] = None
self.surrender: bool = False
self.setup_grids()
@staticmethod
def generate_grid() -> Grid:
"""Generates a grid by instantiating the Squares."""
return [[Square(None, False) for _ in range(10)] for _ in range(10)]
@staticmethod
def format_grid(player: Player, emojiset: EmojiSet) -> str:
"""
Gets and formats the grid as a list into a string to be output to the DM.
Also adds the Letter and Number indexes.
"""
grid = [
[emojiset[bool(square.boat), square.aimed] for square in row]
for row in player.grid
]
rows = ["".join([number] + row) for number, row in zip(NUMBERS, grid)]
return "\n".join([LETTERS] + rows)
@staticmethod
def get_square(grid: Grid, square: str) -> Square:
"""Grabs a square from a grid with an inputted key."""
index = ord(square[0].upper()) - ord("A")
number = int(square[1:])
return grid[number-1][index] # -1 since lists are indexed from 0
async def game_over(
self,
*,
winner: discord.Member,
loser: discord.Member
) -> None:
"""Removes games from list of current games and announces to public chat."""
await self.public_channel.send(f"Game Over! {winner.mention} won against {loser.mention}")
for player in (self.p1, self.p2):
grid = self.format_grid(player, SHIP_EMOJIS)
await self.public_channel.send(f"{player.user}'s Board:\n{grid}")
@staticmethod
def check_sink(grid: Grid, boat: str) -> bool:
"""Checks if all squares containing a given boat have sunk."""
return all(square.aimed for row in grid for square in row if square.boat == boat)
@staticmethod
def check_gameover(grid: Grid) -> bool:
"""Checks if all boats have been sunk."""
return all(square.aimed for row in grid for square in row if square.boat)
def setup_grids(self) -> None:
"""Places the boats on the grids to initialise the game."""
for player in (self.p1, self.p2):
for name, size in SHIPS.items():
while True: # Repeats if about to overwrite another boat
ship_collision = False
coords = []
coord1 = random.randint(0, 9)
coord2 = random.randint(0, 10 - size)
if random.choice((True, False)): # Vertical or Horizontal
x, y = coord1, coord2
xincr, yincr = 0, 1
else:
x, y = coord2, coord1
xincr, yincr = 1, 0
for i in range(size):
new_x = x + (xincr * i)
new_y = y + (yincr * i)
if player.grid[new_x][new_y].boat: # Check if there's already a boat
ship_collision = True
break
coords.append((new_x, new_y))
if not ship_collision: # If not overwriting any other boat spaces, break loop
break
for x, y in coords:
player.grid[x][y].boat = name
async def print_grids(self) -> None:
"""Prints grids to the DM channels."""
# Convert squares into Emoji
boards = [
self.format_grid(player, emojiset)
for emojiset in (HIDDEN_EMOJIS, SHIP_EMOJIS)
for player in (self.p1, self.p2)
]
locations = (
(self.p2, "opponent_board"), (self.p1, "opponent_board"),
(self.p1, "board"), (self.p2, "board")
)
for board, location in zip(boards, locations):
player, attr = location
if getattr(player, attr):
await getattr(player, attr).edit(content=board)
else:
setattr(player, attr, await player.user.send(board))
def predicate(self, message: discord.Message) -> bool:
"""Predicate checking the message typed for each turn."""
if message.author == self.turn.user and message.channel == self.turn.user.dm_channel:
if message.content.lower() == "surrender":
self.surrender = True
return True
self.match = re.fullmatch("([A-J]|[a-j]) ?((10)|[1-9])", message.content.strip())
if not self.match:
self.bot.loop.create_task(message.add_reaction(CROSS_EMOJI))
return bool(self.match)
async def take_turn(self) -> typing.Optional[Square]:
"""Lets the player who's turn it is choose a square."""
square = None
turn_message = await self.turn.user.send(
"It's your turn! Type the square you want to fire at. Format it like this: A1\n"
"Type `surrender` to give up"
)
await self.next.user.send("Their turn", delete_after=3.0)
while True:
try:
await self.bot.wait_for("message", check=self.predicate, timeout=60.0)
except asyncio.TimeoutError:
await self.turn.user.send("You took too long. Game over!")
await self.next.user.send(f"{self.turn.user} took too long. Game over!")
await self.public_channel.send(
f"Game over! {self.turn.user.mention} timed out so {self.next.user.mention} wins!"
)
self.gameover = True
break
else:
if self.surrender:
await self.next.user.send(f"{self.turn.user} surrendered. Game over!")
await self.public_channel.send(
f"Game over! {self.turn.user.mention} surrendered to {self.next.user.mention}!"
)
self.gameover = True
break
square = self.get_square(self.next.grid, self.match.string)
if square.aimed:
await self.turn.user.send("You've already aimed at this square!", delete_after=3.0)
else:
break
await turn_message.delete()
return square
async def hit(self, square: Square, alert_messages: typing.List[discord.Message]) -> None:
"""Occurs when a player successfully aims for a ship."""
await self.turn.user.send("Hit!", delete_after=3.0)
alert_messages.append(await self.next.user.send("Hit!"))
if self.check_sink(self.next.grid, square.boat):
await self.turn.user.send(f"You've sunk their {square.boat} ship!", delete_after=3.0)
alert_messages.append(await self.next.user.send(f"Oh no! Your {square.boat} ship sunk!"))
if self.check_gameover(self.next.grid):
await self.turn.user.send("You win!")
await self.next.user.send("You lose!")
self.gameover = True
await self.game_over(winner=self.turn.user, loser=self.next.user)
async def start_game(self) -> None:
"""Begins the game."""
await self.p1.user.send(f"You're playing battleship with {self.p2.user}.")
await self.p2.user.send(f"You're playing battleship with {self.p1.user}.")
alert_messages = []
self.turn = self.p1
self.next = self.p2
while True:
await self.print_grids()
if self.gameover:
return
square = await self.take_turn()
if not square:
return
square.aimed = True
for message in alert_messages:
await message.delete()
alert_messages = []
alert_messages.append(await self.next.user.send(f"{self.turn.user} aimed at {self.match.string}!"))
if square.boat:
await self.hit(square, alert_messages)
if self.gameover:
return
else:
await self.turn.user.send("Miss!", delete_after=3.0)
alert_messages.append(await self.next.user.send("Miss!"))
self.turn, self.next = self.next, self.turn
class Battleship(commands.Cog):
"""Play the classic game Battleship!"""
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self.games: typing.List[Game] = []
self.waiting: typing.List[discord.Member] = []
def predicate(
self,
ctx: commands.Context,
announcement: discord.Message,
reaction: discord.Reaction,
user: discord.Member
) -> bool:
"""Predicate checking the criteria for the announcement message."""
if self.already_playing(ctx.author): # If they've joined a game since requesting a player 2
return True # Is dealt with later on
if (
user.id not in (ctx.me.id, ctx.author.id)
and str(reaction.emoji) == HAND_RAISED_EMOJI
and reaction.message.id == announcement.id
):
if self.already_playing(user):
self.bot.loop.create_task(ctx.send(f"{user.mention} You're already playing a game!"))
self.bot.loop.create_task(announcement.remove_reaction(reaction, user))
return False
if user in self.waiting:
self.bot.loop.create_task(ctx.send(
f"{user.mention} Please cancel your game first before joining another one."
))
self.bot.loop.create_task(announcement.remove_reaction(reaction, user))
return False
return True
if (
user.id == ctx.author.id
and str(reaction.emoji) == CROSS_EMOJI
and reaction.message.id == announcement.id
):
return True
return False
def already_playing(self, player: discord.Member) -> bool:
"""Check if someone is already in a game."""
return any(player in (game.p1.user, game.p2.user) for game in self.games)
@commands.group(invoke_without_command=True)
@commands.guild_only()
async def battleship(self, ctx: commands.Context) -> None:
"""
Play a game of Battleship with someone else!
This will set up a message waiting for someone else to react and play along.
The game takes place entirely in DMs.
Make sure you have your DMs open so that the bot can message you.
"""
if self.already_playing(ctx.author):
return await ctx.send("You're already playing a game!")
if ctx.author in self.waiting:
return await ctx.send("You've already sent out a request for a player 2")
announcement = await ctx.send(
"**Battleship**: A new game is about to start!\n"
f"Press {HAND_RAISED_EMOJI} to play against {ctx.author.mention}!\n"
f"(Cancel the game with {CROSS_EMOJI}.)"
)
self.waiting.append(ctx.author)
await announcement.add_reaction(HAND_RAISED_EMOJI)
await announcement.add_reaction(CROSS_EMOJI)
try:
reaction, user = await self.bot.wait_for(
"reaction_add",
check=partial(self.predicate, ctx, announcement),
timeout=60.0
)
except asyncio.TimeoutError:
self.waiting.remove(ctx.author)
await announcement.delete()
return await ctx.send(f"{ctx.author.mention} Seems like there's no one here to play...")
if str(reaction.emoji) == CROSS_EMOJI:
self.waiting.remove(ctx.author)
await announcement.delete()
return await ctx.send(f"{ctx.author.mention} Game cancelled.")
await announcement.delete()
self.waiting.remove(ctx.author)
if self.already_playing(ctx.author):
return
try:
game = Game(self.bot, ctx.channel, ctx.author, user)
self.games.append(game)
await game.start_game()
self.games.remove(game)
except discord.Forbidden:
await ctx.send(
f"{ctx.author.mention} {user.mention} "
"Game failed. This is likely due to you not having your DMs open. Check and try again."
)
self.games.remove(game)
except Exception:
# End the game in the event of an unforseen error so the players aren't stuck in a game
await ctx.send(f"{ctx.author.mention} {user.mention} An error occurred. Game failed")
self.games.remove(game)
raise
@battleship.command(name="ships", aliases=["boats"])
async def battleship_ships(self, ctx: commands.Context) -> None:
"""Lists the ships that are found on the battleship grid."""
embed = discord.Embed(colour=Colours.blue)
embed.add_field(name="Name", value="\n".join(SHIPS))
embed.add_field(name="Size", value="\n".join(str(size) for size in SHIPS.values()))
await ctx.send(embed=embed)
def setup(bot: commands.Bot) -> None:
"""Cog load."""
bot.add_cog(Battleship(bot))
|
import asyncio
import logging
import random
import re
import typing
from dataclasses import dataclass
from functools import partial
import discord
from discord.ext import commands
from bot.constants import Colours
log = logging.getLogger(__name__)
@dataclass
class Square:
"""Each square on the battleship grid - if they contain a boat and if they've been aimed at."""
boat: typing.Optional[str]
aimed: bool
Grid = typing.List[typing.List[Square]]
EmojiSet = typing.Dict[typing.Tuple[bool, bool], str]
@dataclass
class Player:
"""Each player in the game - their messages for the boards and their current grid."""
user: discord.Member
board: discord.Message
opponent_board: discord.Message
grid: Grid
# The name of the ship and its size
SHIPS = {
"Carrier": 5,
"Battleship": 4,
"Cruiser": 3,
"Submarine": 3,
"Destroyer": 2,
}
# For these two variables, the first boolean is whether the square is a ship (True) or not (False).
# The second boolean is whether the player has aimed for that square (True) or not (False)
# This is for the player's own board which shows the location of their own ships.
SHIP_EMOJIS = {
(True, True): ":fire:",
(True, False): ":ship:",
(False, True): ":anger:",
(False, False): ":ocean:",
}
# This is for the opposing player's board which only shows aimed locations.
HIDDEN_EMOJIS = {
(True, True): ":red_circle:",
(True, False): ":black_circle:",
(False, True): ":white_circle:",
(False, False): ":black_circle:",
}
# For the top row of the board
LETTERS = (
":stop_button::regional_indicator_a::regional_indicator_b::regional_indicator_c::regional_indicator_d:"
":regional_indicator_e::regional_indicator_f::regional_indicator_g::regional_indicator_h:"
":regional_indicator_i::regional_indicator_j:"
)
# For the first column of the board
NUMBERS = [
":one:",
":two:",
":three:",
":four:",
":five:",
":six:",
":seven:",
":eight:",
":nine:",
":keycap_ten:",
]
CROSS_EMOJI = "\u274e"
HAND_RAISED_EMOJI = "\U0001f64b"
class Game:
"""A Battleship Game."""
def __init__(
self,
bot: commands.Bot,
channel: discord.TextChannel,
player1: discord.Member,
player2: discord.Member
) -> None:
self.bot = bot
self.public_channel = channel
self.p1 = Player(player1, None, None, self.generate_grid())
self.p2 = Player(player2, None, None, self.generate_grid())
self.gameover: bool = False
self.turn: typing.Optional[discord.Member] = None
self.next: typing.Optional[discord.Member] = None
self.match: typing.Optional[typing.Match] = None
self.surrender: bool = False
self.setup_grids()
@staticmethod
def generate_grid() -> Grid:
"""Generates a grid by instantiating the Squares."""
return [[Square(None, False) for _ in range(10)] for _ in range(10)]
@staticmethod
def format_grid(player: Player, emojiset: EmojiSet) -> str:
"""
Gets and formats the grid as a list into a string to be output to the DM.
Also adds the Letter and Number indexes.
"""
grid = [
[emojiset[bool(square.boat), square.aimed] for square in row]
for row in player.grid
]
rows = ["".join([number] + row) for number, row in zip(NUMBERS, grid)]
return "\n".join([LETTERS] + rows)
@staticmethod
def get_square(grid: Grid, square: str) -> Square:
"""Grabs a square from a grid with an inputted key."""
index = ord(square[0].upper()) - ord("A")
number = int(square[1:])
return grid[number-1][index] # -1 since lists are indexed from 0
async def game_over(
self,
*,
winner: discord.Member,
loser: discord.Member
) -> None:
"""Removes games from list of current games and announces to public chat."""
await self.public_channel.send(f"Game Over! {winner.mention} won against {loser.mention}")
for player in (self.p1, self.p2):
grid = self.format_grid(player, SHIP_EMOJIS)
await self.public_channel.send(f"{player.user}'s Board:\n{grid}")
@staticmethod
def check_sink(grid: Grid, boat: str) -> bool:
"""Checks if all squares containing a given boat have sunk."""
return all(square.aimed for row in grid for square in row if square.boat == boat)
@staticmethod
def check_gameover(grid: Grid) -> bool:
"""Checks if all boats have been sunk."""
return all(square.aimed for row in grid for square in row if square.boat)
def setup_grids(self) -> None:
"""Places the boats on the grids to initialise the game."""
for player in (self.p1, self.p2):
for name, size in SHIPS.items():
while True: # Repeats if about to overwrite another boat
ship_collision = False
coords = []
coord1 = random.randint(0, 9)
coord2 = random.randint(0, 10 - size)
if random.choice((True, False)): # Vertical or Horizontal
x, y = coord1, coord2
xincr, yincr = 0, 1
else:
x, y = coord2, coord1
xincr, yincr = 1, 0
for i in range(size):
new_x = x + (xincr * i)
new_y = y + (yincr * i)
if player.grid[new_x][new_y].boat: # Check if there's already a boat
ship_collision = True
break
coords.append((new_x, new_y))
if not ship_collision: # If not overwriting any other boat spaces, break loop
break
for x, y in coords:
player.grid[x][y].boat = name
async def print_grids(self) -> None:
"""Prints grids to the DM channels."""
# Convert squares into Emoji
boards = [
self.format_grid(player, emojiset)
for emojiset in (HIDDEN_EMOJIS, SHIP_EMOJIS)
for player in (self.p1, self.p2)
]
locations = (
(self.p2, "opponent_board"), (self.p1, "opponent_board"),
(self.p1, "board"), (self.p2, "board")
)
for board, location in zip(boards, locations):
player, attr = location
if getattr(player, attr):
await getattr(player, attr).edit(content=board)
else:
setattr(player, attr, await player.user.send(board))
def predicate(self, message: discord.Message) -> bool:
"""Predicate checking the message typed for each turn."""
if message.author == self.turn.user and message.channel == self.turn.user.dm_channel:
if message.content.lower() == "surrender":
self.surrender = True
return True
self.match = re.fullmatch("([A-J]|[a-j]) ?((10)|[1-9])", message.content.strip())
if not self.match:
self.bot.loop.create_task(message.add_reaction(CROSS_EMOJI))
return bool(self.match)
async def take_turn(self) -> typing.Optional[Square]:
"""Lets the player who's turn it is choose a square."""
square = None
turn_message = await self.turn.user.send(
"It's your turn! Type the square you want to fire at. Format it like this: A1\n"
"Type `surrender` to give up"
)
await self.next.user.send("Their turn", delete_after=3.0)
while True:
try:
await self.bot.wait_for("message", check=self.predicate, timeout=60.0)
except asyncio.TimeoutError:
await self.turn.user.send("You took too long. Game over!")
await self.next.user.send(f"{self.turn.user} took too long. Game over!")
await self.public_channel.send(
f"Game over! {self.turn.user.mention} timed out so {self.next.user.mention} wins!"
)
self.gameover = True
break
else:
if self.surrender:
await self.next.user.send(f"{self.turn.user} surrendered. Game over!")
await self.public_channel.send(
f"Game over! {self.turn.user.mention} surrendered to {self.next.user.mention}!"
)
self.gameover = True
break
square = self.get_square(self.next.grid, self.match.string)
if square.aimed:
await self.turn.user.send("You've already aimed at this square!", delete_after=3.0)
else:
break
await turn_message.delete()
return square
async def hit(self, square: Square, alert_messages: typing.List[discord.Message]) -> None:
"""Occurs when a player successfully aims for a ship."""
await self.turn.user.send("Hit!", delete_after=3.0)
alert_messages.append(await self.next.user.send("Hit!"))
if self.check_sink(self.next.grid, square.boat):
await self.turn.user.send(f"You've sunk their {square.boat} ship!", delete_after=3.0)
alert_messages.append(await self.next.user.send(f"Oh no! Your {square.boat} ship sunk!"))
if self.check_gameover(self.next.grid):
await self.turn.user.send("You win!")
await self.next.user.send("You lose!")
self.gameover = True
await self.game_over(winner=self.turn.user, loser=self.next.user)
async def start_game(self) -> None:
"""Begins the game."""
await self.p1.user.send(f"You're playing battleship with {self.p2.user}.")
await self.p2.user.send(f"You're playing battleship with {self.p1.user}.")
alert_messages = []
self.turn = self.p1
self.next = self.p2
while True:
await self.print_grids()
if self.gameover:
return
square = await self.take_turn()
if not square:
return
square.aimed = True
for message in alert_messages:
await message.delete()
alert_messages = []
alert_messages.append(await self.next.user.send(f"{self.turn.user} aimed at {self.match.string}!"))
if square.boat:
await self.hit(square, alert_messages)
if self.gameover:
return
else:
await self.turn.user.send("Miss!", delete_after=3.0)
alert_messages.append(await self.next.user.send("Miss!"))
self.turn, self.next = self.next, self.turn
class Battleship(commands.Cog):
"""Play the classic game Battleship!"""
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self.games: typing.List[Game] = []
self.waiting: typing.List[discord.Member] = []
def predicate(
self,
ctx: commands.Context,
announcement: discord.Message,
reaction: discord.Reaction,
user: discord.Member
) -> bool:
"""Predicate checking the criteria for the announcement message."""
if self.already_playing(ctx.author): # If they've joined a game since requesting a player 2
return True # Is dealt with later on
if (
user.id not in (ctx.me.id, ctx.author.id)
and str(reaction.emoji) == HAND_RAISED_EMOJI
and reaction.message.id == announcement.id
):
if self.already_playing(user):
self.bot.loop.create_task(ctx.send(f"{user.mention} You're already playing a game!"))
self.bot.loop.create_task(announcement.remove_reaction(reaction, user))
return False
if user in self.waiting:
self.bot.loop.create_task(ctx.send(
f"{user.mention} Please cancel your game first before joining another one."
))
self.bot.loop.create_task(announcement.remove_reaction(reaction, user))
return False
return True
if (
user.id == ctx.author.id
and str(reaction.emoji) == CROSS_EMOJI
and reaction.message.id == announcement.id
):
return True
return False
def already_playing(self, player: discord.Member) -> bool:
"""Check if someone is already in a game."""
return any(player in (game.p1.user, game.p2.user) for game in self.games)
@commands.group(invoke_without_command=True)
@commands.guild_only()
async def battleship(self, ctx: commands.Context) -> None:
"""
Play a game of Battleship with someone else!
This will set up a message waiting for someone else to react and play along.
The game takes place entirely in DMs.
Make sure you have your DMs open so that the bot can message you.
"""
if self.already_playing(ctx.author):
return await ctx.send("You're already playing a game!")
if ctx.author in self.waiting:
return await ctx.send("You've already sent out a request for a player 2")
announcement = await ctx.send(
"**Battleship**: A new game is about to start!\n"
f"Press {HAND_RAISED_EMOJI} to play against {ctx.author.mention}!\n"
f"(Cancel the game with {CROSS_EMOJI}.)"
)
self.waiting.append(ctx.author)
await announcement.add_reaction(HAND_RAISED_EMOJI)
await announcement.add_reaction(CROSS_EMOJI)
try:
reaction, user = await self.bot.wait_for(
"reaction_add",
check=partial(self.predicate, ctx, announcement),
timeout=60.0
)
except asyncio.TimeoutError:
self.waiting.remove(ctx.author)
await announcement.delete()
return await ctx.send(f"{ctx.author.mention} Seems like there's no one here to play...")
if str(reaction.emoji) == CROSS_EMOJI:
self.waiting.remove(ctx.author)
await announcement.delete()
return await ctx.send(f"{ctx.author.mention} Game cancelled.")
await announcement.delete()
self.waiting.remove(ctx.author)
if self.already_playing(ctx.author):
return
try:
game = Game(self.bot, ctx.channel, ctx.author, user)
self.games.append(game)
await game.start_game()
self.games.remove(game)
except discord.Forbidden:
await ctx.send(
f"{ctx.author.mention} {user.mention} "
"Game failed. This is likely due to you not having your DMs open. Check and try again."
)
self.games.remove(game)
except Exception:
# End the game in the event of an unforseen error so the players aren't stuck in a game
await ctx.send(f"{ctx.author.mention} {user.mention} An error occurred. Game failed")
self.games.remove(game)
raise
@battleship.command(name="ships", aliases=["boats"])
async def battleship_ships(self, ctx: commands.Context) -> None:
"""Lists the ships that are found on the battleship grid."""
embed = discord.Embed(colour=Colours.blue)
embed.add_field(name="Name", value="\n".join(SHIPS))
embed.add_field(name="Size", value="\n".join(str(size) for size in SHIPS.values()))
await ctx.send(embed=embed)
def setup(bot: commands.Bot) -> None:
"""Cog load."""
bot.add_cog(Battleship(bot))
|
en
| 0.93882
|
Each square on the battleship grid - if they contain a boat and if they've been aimed at. Each player in the game - their messages for the boards and their current grid. # The name of the ship and its size # For these two variables, the first boolean is whether the square is a ship (True) or not (False). # The second boolean is whether the player has aimed for that square (True) or not (False) # This is for the player's own board which shows the location of their own ships. # This is for the opposing player's board which only shows aimed locations. # For the top row of the board # For the first column of the board A Battleship Game. Generates a grid by instantiating the Squares. Gets and formats the grid as a list into a string to be output to the DM. Also adds the Letter and Number indexes. Grabs a square from a grid with an inputted key. # -1 since lists are indexed from 0 Removes games from list of current games and announces to public chat. Checks if all squares containing a given boat have sunk. Checks if all boats have been sunk. Places the boats on the grids to initialise the game. # Repeats if about to overwrite another boat # Vertical or Horizontal # Check if there's already a boat # If not overwriting any other boat spaces, break loop Prints grids to the DM channels. # Convert squares into Emoji Predicate checking the message typed for each turn. Lets the player who's turn it is choose a square. Occurs when a player successfully aims for a ship. Begins the game. Play the classic game Battleship! Predicate checking the criteria for the announcement message. # If they've joined a game since requesting a player 2 # Is dealt with later on Check if someone is already in a game. Play a game of Battleship with someone else! This will set up a message waiting for someone else to react and play along. The game takes place entirely in DMs. Make sure you have your DMs open so that the bot can message you. # End the game in the event of an unforseen error so the players aren't stuck in a game Lists the ships that are found on the battleship grid. Cog load.
| 3.239107
| 3
|
tests/datasets/svmrank/test_mslr10k.py
|
rjagerman/pytorchltr
| 37
|
6628871
|
<filename>tests/datasets/svmrank/test_mslr10k.py
import os
import pytest
from pytorchltr.datasets.svmrank.mslr10k import MSLR10K
from tests.datasets.svmrank.test_svmrank import mock_svmrank_dataset
pkg = "pytorchltr.datasets.svmrank.mslr10k"
def test_wrong_split_raises_error():
with mock_svmrank_dataset(pkg) as (tmpdir, mock_super, mock_vali):
with pytest.raises(ValueError):
MSLR10K(tmpdir, split="nonexisting")
def test_wrong_fold_raises_error():
with mock_svmrank_dataset(pkg) as (tmpdir, mock_super, mock_vali):
with pytest.raises(ValueError):
MSLR10K(tmpdir, split="train", fold=99)
def test_call_validate_download():
with mock_svmrank_dataset(pkg) as (tmpdir, mock_super, mock_vali):
MSLR10K(tmpdir, split="train")
mock_vali.called_once()
args, kwargs = mock_vali.call_args
assert kwargs["location"] == tmpdir
assert kwargs["validate_checksums"]
assert isinstance(kwargs["expected_files"], list)
def test_call_super_train():
with mock_svmrank_dataset(pkg) as (tmpdir, mock_super, mock_vali):
MSLR10K(tmpdir, split="train", fold=1)
mock_super.called_once()
args, kwargs = mock_super.call_args
assert kwargs["file"] == os.path.join(tmpdir, "Fold1", "train.txt")
assert kwargs["normalize"]
assert not kwargs["filter_queries"]
def test_call_super_vali():
with mock_svmrank_dataset(pkg) as (tmpdir, mock_super, mock_vali):
MSLR10K(tmpdir, split="vali", fold=2)
mock_super.called_once()
args, kwargs = mock_super.call_args
assert kwargs["file"] == os.path.join(tmpdir, "Fold2", "vali.txt")
assert kwargs["normalize"]
assert kwargs["filter_queries"]
def test_call_super_test():
with mock_svmrank_dataset(pkg) as (tmpdir, mock_super, mock_vali):
MSLR10K(tmpdir, split="test", fold=5)
mock_super.called_once()
args, kwargs = mock_super.call_args
assert kwargs["file"] == os.path.join(tmpdir, "Fold5", "test.txt")
assert kwargs["normalize"]
assert kwargs["filter_queries"]
|
<filename>tests/datasets/svmrank/test_mslr10k.py
import os
import pytest
from pytorchltr.datasets.svmrank.mslr10k import MSLR10K
from tests.datasets.svmrank.test_svmrank import mock_svmrank_dataset
pkg = "pytorchltr.datasets.svmrank.mslr10k"
def test_wrong_split_raises_error():
with mock_svmrank_dataset(pkg) as (tmpdir, mock_super, mock_vali):
with pytest.raises(ValueError):
MSLR10K(tmpdir, split="nonexisting")
def test_wrong_fold_raises_error():
with mock_svmrank_dataset(pkg) as (tmpdir, mock_super, mock_vali):
with pytest.raises(ValueError):
MSLR10K(tmpdir, split="train", fold=99)
def test_call_validate_download():
with mock_svmrank_dataset(pkg) as (tmpdir, mock_super, mock_vali):
MSLR10K(tmpdir, split="train")
mock_vali.called_once()
args, kwargs = mock_vali.call_args
assert kwargs["location"] == tmpdir
assert kwargs["validate_checksums"]
assert isinstance(kwargs["expected_files"], list)
def test_call_super_train():
with mock_svmrank_dataset(pkg) as (tmpdir, mock_super, mock_vali):
MSLR10K(tmpdir, split="train", fold=1)
mock_super.called_once()
args, kwargs = mock_super.call_args
assert kwargs["file"] == os.path.join(tmpdir, "Fold1", "train.txt")
assert kwargs["normalize"]
assert not kwargs["filter_queries"]
def test_call_super_vali():
with mock_svmrank_dataset(pkg) as (tmpdir, mock_super, mock_vali):
MSLR10K(tmpdir, split="vali", fold=2)
mock_super.called_once()
args, kwargs = mock_super.call_args
assert kwargs["file"] == os.path.join(tmpdir, "Fold2", "vali.txt")
assert kwargs["normalize"]
assert kwargs["filter_queries"]
def test_call_super_test():
with mock_svmrank_dataset(pkg) as (tmpdir, mock_super, mock_vali):
MSLR10K(tmpdir, split="test", fold=5)
mock_super.called_once()
args, kwargs = mock_super.call_args
assert kwargs["file"] == os.path.join(tmpdir, "Fold5", "test.txt")
assert kwargs["normalize"]
assert kwargs["filter_queries"]
|
none
| 1
| 2.318128
| 2
|
|
run.py
|
Edudeiko/dj_helper_search_api
| 1
|
6628872
|
<gh_stars>1-10
from support import *
from dotenv import load_dotenv
import os
import requests
from flask import Flask, request, jsonify, Response
import pandas as pd
import json
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
load_dotenv() # load environment variables
app = Flask(__name__)
@app.route('/')
def hello_world():
'''check if app is live'''
return "it's live!"
@app.route('/prepare_search_track/<name>', methods=['GET', 'POST'])
def search_by_name(name):
'''search for songs'''
myparams = {
'type': 'track',
'limit': 10}
myparams['q'] = name
resp = requests.get(SEARCH_ENDPOINT, headers=headers, params=myparams)
return resp.json()
@app.route('/track_search_ready/<name>', methods=['GET', 'POST'])
def search(name):
'''search for songs in desired format'''
data = search_by_name(name)
users_response = []
for i, track in enumerate(data['tracks']['items']):
user_dict = (i, track['artists'][0]['name'], track['name'], track['id'], track['external_urls']['spotify'], track['explicit'],
track['preview_url'], track['album']['images'][1]['url'])
users_response.append(user_dict)
_track_df = pd.DataFrame(users_response, columns=['ind','artist_name', 'song_name',
'id', 'external_urls', 'explicit', 'preview', 'image'])
_track_df = _track_df.drop(['ind'], axis=1)
_track_df['preview'] = _track_df['preview'].apply(get_rid_of_nulls)
_track_df.index += 1
return (json.dumps(json.loads(_track_df.to_json(orient='index')), indent=2)) # orient='values', 'records', 'index', 'columns'
@app.route('/audio_features/<name>', methods=['GET', 'POST'])
def audio_feat(name):
'''get songs with audio features'''
data = search_by_name(name)
users_response = []
for i, track in enumerate(data['tracks']['items']):
user_dict = (i, track['artists'][0]['name'], track['name'], track['id'], track['external_urls']['spotify'], track['explicit'],
track['preview_url'], track['album']['images'][1]['url'])
users_response.append(user_dict)
_track_df = pd.DataFrame(users_response, columns=['ind','artist_name', 'song_name',
'id', 'external_urls', 'explicit', 'preview', 'image'])
_track_df = _track_df.drop(['ind'], axis=1)
_track_df['preview'] = _track_df['preview'].apply(get_rid_of_nulls)
'''start index count from 1 instead of 0'''
_track_df.index += 1
'''apply the function'''
_audiofeat = get_audio_features(_track_df['id'])
'''creat columns names for the dataframe'''
_audiofeat = pd.DataFrame(_audiofeat, columns=['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence', 'tempo', 'type', 'id', 'uri', 'track_href', 'analysis_url', 'duration_ms', 'time_signature'])
_audiofeat_df = _audiofeat.drop(['analysis_url', 'track_href', 'type', 'uri'], axis=1)
tracks_plus_df = _track_df.merge(_audiofeat_df, how='left', left_on='id', right_on='id')
tracks_plus_df.index += 1
return (json.dumps(json.loads(tracks_plus_df.to_json(orient='index')), indent=2))
@app.route('/predict/<track_id>', methods=['GET', 'POST'])
def dj_rec(track_id):
'''get similar songs depending on the audio features'''
neighbors = 4
max_distance = 5.0
'''
[:-10] will return only 10 closest songs to the original track_id
by removing [:-10], code will return 20 songs.
It will take double of time to make a prediction though'''
rel_artists = sp.artist_related_artists(sp.track(track_id=track_id)['artists'][0]['id'])['artists'][:-10]
artist_log = []
for a in rel_artists:
artist_log.append(a['id'])
feat_log = []
for artist in artist_log:
for track in sp.artist_top_tracks(artist)['tracks']:
feat_log.append(sp.audio_features(track['id'])[0])
catalog = pd.DataFrame.from_dict(feat_log)
root = pd.DataFrame.from_dict(sp.audio_features(tracks=[track_id]))
merged_df = root.append(catalog, ignore_index=True)
dropped_df = merged_df.drop(columns=['uri', 'track_href', 'id', 'duration_ms', 'time_signature', 'mode', 'loudness', 'type', 'analysis_url'])
scaled_df = StandardScaler().fit_transform(dropped_df)
trans_array = scaled_df.copy()
trans_array[:, 0] = [u*2.4 for u in trans_array[:, 0]] # acousticness
trans_array[:, 1] = [((u*u)**0.5)*u for u in trans_array[:, 1]] # danceability
trans_array[:, 2] = [u*1.7 for u in trans_array[:, 2]] # energy
trans_array[:, 3] = [u*1.4 for u in trans_array[:, 3]] # instrumentalness
trans_array[:, 4] = [u*0.9 for u in trans_array[:, 4]] # key
trans_array[:, 5] = [u*1.0 for u in trans_array[:, 5]] # liveness
trans_array[:, 6] = [u*1.0 for u in trans_array[:, 6]] # speechiness
trans_array[:, 7] = [u*1.1 for u in trans_array[:, 7]] # tempo
trans_array[:, 8] = [u*2.5 for u in trans_array[:, 8]] # valence
knn = NearestNeighbors()
knn.fit(trans_array)
rec = knn.kneighbors(trans_array[[0]], n_neighbors=neighbors+1)
predict_response = []
for n in range(1, neighbors+1):
if rec[0][0][n] <= max_distance:
pred_dict = (merged_df.loc[rec[1][0][n], 'id'], rec[0][0][n])
predict_response.append(pred_dict)
pred = pd.DataFrame(predict_response, columns=['recommendation', 'distance'])
df_predict_tracks = pd.DataFrame() # create dataframe
a = [sp.track(ii)['artists'][0]['name'] for ii in pred['recommendation']]
b = [sp.track(ii)['name'] for ii in pred['recommendation']]
c = [sp.track(ii)['id'] for ii in pred['recommendation']]
d = [sp.track(ii)['external_urls']['spotify'] for ii in pred['recommendation']]
e = [sp.track(ii)['explicit'] for ii in pred['recommendation']]
f = [sp.track(ii)['preview_url'] for ii in pred['recommendation']]
g = [sp.track(ii)['album']['images'][1]['url'] for ii in pred['recommendation']]
# Save the results
df_predict_tracks['artist_name'] = a
df_predict_tracks['song_name'] = b
df_predict_tracks['id'] = c
df_predict_tracks['url'] = d
df_predict_tracks['explicit'] = e
df_predict_tracks['preview'] = f
df_predict_tracks['image'] = g
df_predict_tracks['preview'] = df_predict_tracks['preview'].apply(get_rid_of_nulls)
df_predict_tracks.index += 1
return json.dumps(json.loads(df_predict_tracks.to_json(orient='index')), indent=2)
if __name__ == "__main__":
app.run(debug=True)
|
from support import *
from dotenv import load_dotenv
import os
import requests
from flask import Flask, request, jsonify, Response
import pandas as pd
import json
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
load_dotenv() # load environment variables
app = Flask(__name__)
@app.route('/')
def hello_world():
'''check if app is live'''
return "it's live!"
@app.route('/prepare_search_track/<name>', methods=['GET', 'POST'])
def search_by_name(name):
'''search for songs'''
myparams = {
'type': 'track',
'limit': 10}
myparams['q'] = name
resp = requests.get(SEARCH_ENDPOINT, headers=headers, params=myparams)
return resp.json()
@app.route('/track_search_ready/<name>', methods=['GET', 'POST'])
def search(name):
'''search for songs in desired format'''
data = search_by_name(name)
users_response = []
for i, track in enumerate(data['tracks']['items']):
user_dict = (i, track['artists'][0]['name'], track['name'], track['id'], track['external_urls']['spotify'], track['explicit'],
track['preview_url'], track['album']['images'][1]['url'])
users_response.append(user_dict)
_track_df = pd.DataFrame(users_response, columns=['ind','artist_name', 'song_name',
'id', 'external_urls', 'explicit', 'preview', 'image'])
_track_df = _track_df.drop(['ind'], axis=1)
_track_df['preview'] = _track_df['preview'].apply(get_rid_of_nulls)
_track_df.index += 1
return (json.dumps(json.loads(_track_df.to_json(orient='index')), indent=2)) # orient='values', 'records', 'index', 'columns'
@app.route('/audio_features/<name>', methods=['GET', 'POST'])
def audio_feat(name):
'''get songs with audio features'''
data = search_by_name(name)
users_response = []
for i, track in enumerate(data['tracks']['items']):
user_dict = (i, track['artists'][0]['name'], track['name'], track['id'], track['external_urls']['spotify'], track['explicit'],
track['preview_url'], track['album']['images'][1]['url'])
users_response.append(user_dict)
_track_df = pd.DataFrame(users_response, columns=['ind','artist_name', 'song_name',
'id', 'external_urls', 'explicit', 'preview', 'image'])
_track_df = _track_df.drop(['ind'], axis=1)
_track_df['preview'] = _track_df['preview'].apply(get_rid_of_nulls)
'''start index count from 1 instead of 0'''
_track_df.index += 1
'''apply the function'''
_audiofeat = get_audio_features(_track_df['id'])
'''creat columns names for the dataframe'''
_audiofeat = pd.DataFrame(_audiofeat, columns=['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence', 'tempo', 'type', 'id', 'uri', 'track_href', 'analysis_url', 'duration_ms', 'time_signature'])
_audiofeat_df = _audiofeat.drop(['analysis_url', 'track_href', 'type', 'uri'], axis=1)
tracks_plus_df = _track_df.merge(_audiofeat_df, how='left', left_on='id', right_on='id')
tracks_plus_df.index += 1
return (json.dumps(json.loads(tracks_plus_df.to_json(orient='index')), indent=2))
@app.route('/predict/<track_id>', methods=['GET', 'POST'])
def dj_rec(track_id):
'''get similar songs depending on the audio features'''
neighbors = 4
max_distance = 5.0
'''
[:-10] will return only 10 closest songs to the original track_id
by removing [:-10], code will return 20 songs.
It will take double of time to make a prediction though'''
rel_artists = sp.artist_related_artists(sp.track(track_id=track_id)['artists'][0]['id'])['artists'][:-10]
artist_log = []
for a in rel_artists:
artist_log.append(a['id'])
feat_log = []
for artist in artist_log:
for track in sp.artist_top_tracks(artist)['tracks']:
feat_log.append(sp.audio_features(track['id'])[0])
catalog = pd.DataFrame.from_dict(feat_log)
root = pd.DataFrame.from_dict(sp.audio_features(tracks=[track_id]))
merged_df = root.append(catalog, ignore_index=True)
dropped_df = merged_df.drop(columns=['uri', 'track_href', 'id', 'duration_ms', 'time_signature', 'mode', 'loudness', 'type', 'analysis_url'])
scaled_df = StandardScaler().fit_transform(dropped_df)
trans_array = scaled_df.copy()
trans_array[:, 0] = [u*2.4 for u in trans_array[:, 0]] # acousticness
trans_array[:, 1] = [((u*u)**0.5)*u for u in trans_array[:, 1]] # danceability
trans_array[:, 2] = [u*1.7 for u in trans_array[:, 2]] # energy
trans_array[:, 3] = [u*1.4 for u in trans_array[:, 3]] # instrumentalness
trans_array[:, 4] = [u*0.9 for u in trans_array[:, 4]] # key
trans_array[:, 5] = [u*1.0 for u in trans_array[:, 5]] # liveness
trans_array[:, 6] = [u*1.0 for u in trans_array[:, 6]] # speechiness
trans_array[:, 7] = [u*1.1 for u in trans_array[:, 7]] # tempo
trans_array[:, 8] = [u*2.5 for u in trans_array[:, 8]] # valence
knn = NearestNeighbors()
knn.fit(trans_array)
rec = knn.kneighbors(trans_array[[0]], n_neighbors=neighbors+1)
predict_response = []
for n in range(1, neighbors+1):
if rec[0][0][n] <= max_distance:
pred_dict = (merged_df.loc[rec[1][0][n], 'id'], rec[0][0][n])
predict_response.append(pred_dict)
pred = pd.DataFrame(predict_response, columns=['recommendation', 'distance'])
df_predict_tracks = pd.DataFrame() # create dataframe
a = [sp.track(ii)['artists'][0]['name'] for ii in pred['recommendation']]
b = [sp.track(ii)['name'] for ii in pred['recommendation']]
c = [sp.track(ii)['id'] for ii in pred['recommendation']]
d = [sp.track(ii)['external_urls']['spotify'] for ii in pred['recommendation']]
e = [sp.track(ii)['explicit'] for ii in pred['recommendation']]
f = [sp.track(ii)['preview_url'] for ii in pred['recommendation']]
g = [sp.track(ii)['album']['images'][1]['url'] for ii in pred['recommendation']]
# Save the results
df_predict_tracks['artist_name'] = a
df_predict_tracks['song_name'] = b
df_predict_tracks['id'] = c
df_predict_tracks['url'] = d
df_predict_tracks['explicit'] = e
df_predict_tracks['preview'] = f
df_predict_tracks['image'] = g
df_predict_tracks['preview'] = df_predict_tracks['preview'].apply(get_rid_of_nulls)
df_predict_tracks.index += 1
return json.dumps(json.loads(df_predict_tracks.to_json(orient='index')), indent=2)
if __name__ == "__main__":
app.run(debug=True)
|
en
| 0.732099
|
# load environment variables check if app is live search for songs search for songs in desired format # orient='values', 'records', 'index', 'columns' get songs with audio features start index count from 1 instead of 0 apply the function creat columns names for the dataframe get similar songs depending on the audio features [:-10] will return only 10 closest songs to the original track_id by removing [:-10], code will return 20 songs. It will take double of time to make a prediction though # acousticness # danceability # energy # instrumentalness # key # liveness # speechiness # tempo # valence # create dataframe # Save the results
| 2.781782
| 3
|
nova/virt/xenapi/pool.py
|
bopopescu/zknova
| 0
|
6628873
|
<filename>nova/virt/xenapi/pool.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Citrix Systems, Inc.
# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Pool-related functions (join, eject, etc).
"""
import urlparse
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
LOG = logging.getLogger(__name__)
xenapi_pool_opts = [
cfg.BoolOpt('use_join_force',
default=True,
help='To use for hosts with different CPUs'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_pool_opts)
CONF.import_opt('host', 'nova.netconf')
class ResourcePool(object):
"""
Implements resource pool operations.
"""
def __init__(self, session, virtapi):
host_ref = session.get_xenapi_host()
host_rec = session.call_xenapi('host.get_record', host_ref)
self._host_name = host_rec['hostname']
self._host_addr = host_rec['address']
self._host_uuid = host_rec['uuid']
self._session = session
self._virtapi = virtapi
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error):
"""Undo aggregate operation when pool error raised."""
try:
if set_error:
metadata = {pool_states.KEY: pool_states.ERROR}
self._virtapi.aggregate_metadata_add(context, aggregate,
metadata)
op(context, aggregate, host)
except Exception:
aggregate_id = aggregate['id']
LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable state '
'during operation on %(host)s') % locals())
def add_to_aggregate(self, context, aggregate, host, slave_info=None):
"""Add a compute host to an aggregate."""
if not pool_states.is_hv_pool(aggregate['metadetails']):
return
invalid = {pool_states.CHANGING: 'setup in progress',
pool_states.DISMISSED: 'aggregate deleted',
pool_states.ERROR: 'aggregate in error'}
if (aggregate['metadetails'][pool_states.KEY] in invalid.keys()):
raise exception.InvalidAggregateAction(
action='add host',
aggregate_id=aggregate['id'],
reason=aggregate['metadetails'][pool_states.KEY])
if (aggregate['metadetails'][pool_states.KEY] == pool_states.CREATED):
self._virtapi.aggregate_metadata_add(context, aggregate,
{pool_states.KEY:
pool_states.CHANGING})
if len(aggregate['hosts']) == 1:
# this is the first host of the pool -> make it master
self._init_pool(aggregate['id'], aggregate['name'])
# save metadata so that we can find the master again
metadata = {'master_compute': host,
host: self._host_uuid,
pool_states.KEY: pool_states.ACTIVE}
self._virtapi.aggregate_metadata_add(context, aggregate,
metadata)
else:
# the pool is already up and running, we need to figure out
# whether we can serve the request from this host or not.
master_compute = aggregate['metadetails']['master_compute']
if master_compute == CONF.host and master_compute != host:
# this is the master -> do a pool-join
# To this aim, nova compute on the slave has to go down.
# NOTE: it is assumed that ONLY nova compute is running now
self._join_slave(aggregate['id'], host,
slave_info.get('compute_uuid'),
slave_info.get('url'), slave_info.get('user'),
slave_info.get('passwd'))
metadata = {host: slave_info.get('xenhost_uuid'), }
self._virtapi.aggregate_metadata_add(context, aggregate,
metadata)
elif master_compute and master_compute != host:
# send rpc cast to master, asking to add the following
# host with specified credentials.
slave_info = self._create_slave_info()
self.compute_rpcapi.add_aggregate_host(
context, aggregate, host, master_compute, slave_info)
def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
"""Remove a compute host from an aggregate."""
slave_info = slave_info or dict()
if not pool_states.is_hv_pool(aggregate['metadetails']):
return
invalid = {pool_states.CREATED: 'no hosts to remove',
pool_states.CHANGING: 'setup in progress',
pool_states.DISMISSED: 'aggregate deleted', }
if aggregate['metadetails'][pool_states.KEY] in invalid.keys():
raise exception.InvalidAggregateAction(
action='remove host',
aggregate_id=aggregate['id'],
reason=invalid[aggregate['metadetails'][pool_states.KEY]])
master_compute = aggregate['metadetails']['master_compute']
if master_compute == CONF.host and master_compute != host:
# this is the master -> instruct it to eject a host from the pool
host_uuid = aggregate['metadetails'][host]
self._eject_slave(aggregate['id'],
slave_info.get('compute_uuid'), host_uuid)
self._virtapi.aggregate_metadata_delete(context, aggregate,
host)
elif master_compute == host:
# Remove master from its own pool -> destroy pool only if the
# master is on its own, otherwise raise fault. Destroying a
# pool made only by master is fictional
if len(aggregate['hosts']) > 1:
# NOTE: this could be avoided by doing a master
# re-election, but this is simpler for now.
raise exception.InvalidAggregateAction(
aggregate_id=aggregate['id'],
action='remove_from_aggregate',
reason=_('Unable to eject %(host)s '
'from the pool; pool not empty')
% locals())
self._clear_pool(aggregate['id'])
for key in ['master_compute', host]:
self._virtapi.aggregate_metadata_delete(context,
aggregate, key)
elif master_compute and master_compute != host:
# A master exists -> forward pool-eject request to master
slave_info = self._create_slave_info()
self.compute_rpcapi.remove_aggregate_host(
context, aggregate['id'], host, master_compute, slave_info)
else:
# this shouldn't have happened
raise exception.AggregateError(aggregate_id=aggregate['id'],
action='remove_from_aggregate',
reason=_('Unable to eject %(host)s '
'from the pool; No master found')
% locals())
def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd):
"""Joins a slave into a XenServer resource pool."""
try:
args = {'compute_uuid': compute_uuid,
'url': url,
'user': user,
'password': <PASSWORD>,
'force': jsonutils.dumps(CONF.use_join_force),
'master_addr': self._host_addr,
'master_user': CONF.xenapi_connection_username,
'master_pass': CONF.xenapi_connection_password, }
self._session.call_plugin('xenhost', 'host_join', args)
except self._session.XenAPI.Failure as e:
LOG.error(_("Pool-Join failed: %(e)s") % locals())
raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate',
reason=_('Unable to join %(host)s '
'in the pool') % locals())
def _eject_slave(self, aggregate_id, compute_uuid, host_uuid):
"""Eject a slave from a XenServer resource pool."""
try:
# shutdown nova-compute; if there are other VMs running, e.g.
# guest instances, the eject will fail. That's a precaution
# to deal with the fact that the admin should evacuate the host
# first. The eject wipes out the host completely.
vm_ref = self._session.call_xenapi('VM.get_by_uuid', compute_uuid)
self._session.call_xenapi("VM.clean_shutdown", vm_ref)
host_ref = self._session.call_xenapi('host.get_by_uuid', host_uuid)
self._session.call_xenapi("pool.eject", host_ref)
except self._session.XenAPI.Failure as e:
LOG.error(_("Pool-eject failed: %(e)s") % locals())
raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate',
reason=str(e.details))
def _init_pool(self, aggregate_id, aggregate_name):
"""Set the name label of a XenServer pool."""
try:
pool_ref = self._session.call_xenapi("pool.get_all")[0]
self._session.call_xenapi("pool.set_name_label",
pool_ref, aggregate_name)
except self._session.XenAPI.Failure as e:
LOG.error(_("Unable to set up pool: %(e)s.") % locals())
raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate',
reason=str(e.details))
def _clear_pool(self, aggregate_id):
"""Clear the name label of a XenServer pool."""
try:
pool_ref = self._session.call_xenapi('pool.get_all')[0]
self._session.call_xenapi('pool.set_name_label', pool_ref, '')
except self._session.XenAPI.Failure as e:
LOG.error(_("Pool-set_name_label failed: %(e)s") % locals())
raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate',
reason=str(e.details))
def _create_slave_info(self):
"""XenServer specific info needed to join the hypervisor pool."""
# replace the address from the xenapi connection url
# because this might be 169.254.0.1, i.e. xenapi
# NOTE: password in clear is not great, but it'll do for now
sender_url = swap_xapi_host(
CONF.xenapi_connection_url, self._host_addr)
return {
"url": sender_url,
"user": CONF.xenapi_connection_username,
"passwd": CONF.xenapi_connection_password,
"compute_uuid": vm_utils.get_this_vm_uuid(),
"xenhost_uuid": self._host_uuid,
}
def swap_xapi_host(url, host_addr):
"""Replace the XenServer address present in 'url' with 'host_addr'."""
temp_url = urlparse.urlparse(url)
_netloc, sep, port = temp_url.netloc.partition(':')
return url.replace(temp_url.netloc, '%s%s%s' % (host_addr, sep, port))
|
<filename>nova/virt/xenapi/pool.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Citrix Systems, Inc.
# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Pool-related functions (join, eject, etc).
"""
import urlparse
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
LOG = logging.getLogger(__name__)
xenapi_pool_opts = [
cfg.BoolOpt('use_join_force',
default=True,
help='To use for hosts with different CPUs'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_pool_opts)
CONF.import_opt('host', 'nova.netconf')
class ResourcePool(object):
"""
Implements resource pool operations.
"""
def __init__(self, session, virtapi):
host_ref = session.get_xenapi_host()
host_rec = session.call_xenapi('host.get_record', host_ref)
self._host_name = host_rec['hostname']
self._host_addr = host_rec['address']
self._host_uuid = host_rec['uuid']
self._session = session
self._virtapi = virtapi
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error):
"""Undo aggregate operation when pool error raised."""
try:
if set_error:
metadata = {pool_states.KEY: pool_states.ERROR}
self._virtapi.aggregate_metadata_add(context, aggregate,
metadata)
op(context, aggregate, host)
except Exception:
aggregate_id = aggregate['id']
LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable state '
'during operation on %(host)s') % locals())
def add_to_aggregate(self, context, aggregate, host, slave_info=None):
"""Add a compute host to an aggregate."""
if not pool_states.is_hv_pool(aggregate['metadetails']):
return
invalid = {pool_states.CHANGING: 'setup in progress',
pool_states.DISMISSED: 'aggregate deleted',
pool_states.ERROR: 'aggregate in error'}
if (aggregate['metadetails'][pool_states.KEY] in invalid.keys()):
raise exception.InvalidAggregateAction(
action='add host',
aggregate_id=aggregate['id'],
reason=aggregate['metadetails'][pool_states.KEY])
if (aggregate['metadetails'][pool_states.KEY] == pool_states.CREATED):
self._virtapi.aggregate_metadata_add(context, aggregate,
{pool_states.KEY:
pool_states.CHANGING})
if len(aggregate['hosts']) == 1:
# this is the first host of the pool -> make it master
self._init_pool(aggregate['id'], aggregate['name'])
# save metadata so that we can find the master again
metadata = {'master_compute': host,
host: self._host_uuid,
pool_states.KEY: pool_states.ACTIVE}
self._virtapi.aggregate_metadata_add(context, aggregate,
metadata)
else:
# the pool is already up and running, we need to figure out
# whether we can serve the request from this host or not.
master_compute = aggregate['metadetails']['master_compute']
if master_compute == CONF.host and master_compute != host:
# this is the master -> do a pool-join
# To this aim, nova compute on the slave has to go down.
# NOTE: it is assumed that ONLY nova compute is running now
self._join_slave(aggregate['id'], host,
slave_info.get('compute_uuid'),
slave_info.get('url'), slave_info.get('user'),
slave_info.get('passwd'))
metadata = {host: slave_info.get('xenhost_uuid'), }
self._virtapi.aggregate_metadata_add(context, aggregate,
metadata)
elif master_compute and master_compute != host:
# send rpc cast to master, asking to add the following
# host with specified credentials.
slave_info = self._create_slave_info()
self.compute_rpcapi.add_aggregate_host(
context, aggregate, host, master_compute, slave_info)
def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
"""Remove a compute host from an aggregate."""
slave_info = slave_info or dict()
if not pool_states.is_hv_pool(aggregate['metadetails']):
return
invalid = {pool_states.CREATED: 'no hosts to remove',
pool_states.CHANGING: 'setup in progress',
pool_states.DISMISSED: 'aggregate deleted', }
if aggregate['metadetails'][pool_states.KEY] in invalid.keys():
raise exception.InvalidAggregateAction(
action='remove host',
aggregate_id=aggregate['id'],
reason=invalid[aggregate['metadetails'][pool_states.KEY]])
master_compute = aggregate['metadetails']['master_compute']
if master_compute == CONF.host and master_compute != host:
# this is the master -> instruct it to eject a host from the pool
host_uuid = aggregate['metadetails'][host]
self._eject_slave(aggregate['id'],
slave_info.get('compute_uuid'), host_uuid)
self._virtapi.aggregate_metadata_delete(context, aggregate,
host)
elif master_compute == host:
# Remove master from its own pool -> destroy pool only if the
# master is on its own, otherwise raise fault. Destroying a
# pool made only by master is fictional
if len(aggregate['hosts']) > 1:
# NOTE: this could be avoided by doing a master
# re-election, but this is simpler for now.
raise exception.InvalidAggregateAction(
aggregate_id=aggregate['id'],
action='remove_from_aggregate',
reason=_('Unable to eject %(host)s '
'from the pool; pool not empty')
% locals())
self._clear_pool(aggregate['id'])
for key in ['master_compute', host]:
self._virtapi.aggregate_metadata_delete(context,
aggregate, key)
elif master_compute and master_compute != host:
# A master exists -> forward pool-eject request to master
slave_info = self._create_slave_info()
self.compute_rpcapi.remove_aggregate_host(
context, aggregate['id'], host, master_compute, slave_info)
else:
# this shouldn't have happened
raise exception.AggregateError(aggregate_id=aggregate['id'],
action='remove_from_aggregate',
reason=_('Unable to eject %(host)s '
'from the pool; No master found')
% locals())
def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd):
"""Joins a slave into a XenServer resource pool."""
try:
args = {'compute_uuid': compute_uuid,
'url': url,
'user': user,
'password': <PASSWORD>,
'force': jsonutils.dumps(CONF.use_join_force),
'master_addr': self._host_addr,
'master_user': CONF.xenapi_connection_username,
'master_pass': CONF.xenapi_connection_password, }
self._session.call_plugin('xenhost', 'host_join', args)
except self._session.XenAPI.Failure as e:
LOG.error(_("Pool-Join failed: %(e)s") % locals())
raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate',
reason=_('Unable to join %(host)s '
'in the pool') % locals())
def _eject_slave(self, aggregate_id, compute_uuid, host_uuid):
"""Eject a slave from a XenServer resource pool."""
try:
# shutdown nova-compute; if there are other VMs running, e.g.
# guest instances, the eject will fail. That's a precaution
# to deal with the fact that the admin should evacuate the host
# first. The eject wipes out the host completely.
vm_ref = self._session.call_xenapi('VM.get_by_uuid', compute_uuid)
self._session.call_xenapi("VM.clean_shutdown", vm_ref)
host_ref = self._session.call_xenapi('host.get_by_uuid', host_uuid)
self._session.call_xenapi("pool.eject", host_ref)
except self._session.XenAPI.Failure as e:
LOG.error(_("Pool-eject failed: %(e)s") % locals())
raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate',
reason=str(e.details))
def _init_pool(self, aggregate_id, aggregate_name):
"""Set the name label of a XenServer pool."""
try:
pool_ref = self._session.call_xenapi("pool.get_all")[0]
self._session.call_xenapi("pool.set_name_label",
pool_ref, aggregate_name)
except self._session.XenAPI.Failure as e:
LOG.error(_("Unable to set up pool: %(e)s.") % locals())
raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate',
reason=str(e.details))
def _clear_pool(self, aggregate_id):
"""Clear the name label of a XenServer pool."""
try:
pool_ref = self._session.call_xenapi('pool.get_all')[0]
self._session.call_xenapi('pool.set_name_label', pool_ref, '')
except self._session.XenAPI.Failure as e:
LOG.error(_("Pool-set_name_label failed: %(e)s") % locals())
raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate',
reason=str(e.details))
def _create_slave_info(self):
"""XenServer specific info needed to join the hypervisor pool."""
# replace the address from the xenapi connection url
# because this might be 169.254.0.1, i.e. xenapi
# NOTE: password in clear is not great, but it'll do for now
sender_url = swap_xapi_host(
CONF.xenapi_connection_url, self._host_addr)
return {
"url": sender_url,
"user": CONF.xenapi_connection_username,
"passwd": CONF.xenapi_connection_password,
"compute_uuid": vm_utils.get_this_vm_uuid(),
"xenhost_uuid": self._host_uuid,
}
def swap_xapi_host(url, host_addr):
"""Replace the XenServer address present in 'url' with 'host_addr'."""
temp_url = urlparse.urlparse(url)
_netloc, sep, port = temp_url.netloc.partition(':')
return url.replace(temp_url.netloc, '%s%s%s' % (host_addr, sep, port))
|
en
| 0.895238
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 Citrix Systems, Inc. # Copyright 2010 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Management class for Pool-related functions (join, eject, etc). Implements resource pool operations. Undo aggregate operation when pool error raised. Add a compute host to an aggregate. # this is the first host of the pool -> make it master # save metadata so that we can find the master again # the pool is already up and running, we need to figure out # whether we can serve the request from this host or not. # this is the master -> do a pool-join # To this aim, nova compute on the slave has to go down. # NOTE: it is assumed that ONLY nova compute is running now # send rpc cast to master, asking to add the following # host with specified credentials. Remove a compute host from an aggregate. # this is the master -> instruct it to eject a host from the pool # Remove master from its own pool -> destroy pool only if the # master is on its own, otherwise raise fault. Destroying a # pool made only by master is fictional # NOTE: this could be avoided by doing a master # re-election, but this is simpler for now. # A master exists -> forward pool-eject request to master # this shouldn't have happened Joins a slave into a XenServer resource pool. Eject a slave from a XenServer resource pool. # shutdown nova-compute; if there are other VMs running, e.g. # guest instances, the eject will fail. That's a precaution # to deal with the fact that the admin should evacuate the host # first. The eject wipes out the host completely. Set the name label of a XenServer pool. Clear the name label of a XenServer pool. XenServer specific info needed to join the hypervisor pool. # replace the address from the xenapi connection url # because this might be 169.254.0.1, i.e. xenapi # NOTE: password in clear is not great, but it'll do for now Replace the XenServer address present in 'url' with 'host_addr'.
| 1.948844
| 2
|
Code/DAY 2/GitHub_API_Java_REST.py
|
Viswalahiri/Internship_Agilitix
| 0
|
6628874
|
<filename>Code/DAY 2/GitHub_API_Java_REST.py
import requests
import json
from pprint import pprint
for i in range(10):
url = f"https://api.github.com/search/repositories?q=language:Java&topic=REST"
data = requests.get(url).json()
f = open("Java_Rest.json", "w")
with open("Java_Rest.json", "a") as outfile:
json.dump(data,outfile)
|
<filename>Code/DAY 2/GitHub_API_Java_REST.py
import requests
import json
from pprint import pprint
for i in range(10):
url = f"https://api.github.com/search/repositories?q=language:Java&topic=REST"
data = requests.get(url).json()
f = open("Java_Rest.json", "w")
with open("Java_Rest.json", "a") as outfile:
json.dump(data,outfile)
|
none
| 1
| 3.175736
| 3
|
|
bip_utils/substrate/conf/__init__.py
|
MIPPLTeam/bip_utils
| 149
|
6628875
|
from bip_utils.substrate.conf.substrate_coins import SubstrateCoins
from bip_utils.substrate.conf.substrate_coin_conf import SubstrateCoinConf
from bip_utils.substrate.conf.substrate_conf import SubstrateConf
from bip_utils.substrate.conf.substrate_conf_getter import SubstrateConfGetter
|
from bip_utils.substrate.conf.substrate_coins import SubstrateCoins
from bip_utils.substrate.conf.substrate_coin_conf import SubstrateCoinConf
from bip_utils.substrate.conf.substrate_conf import SubstrateConf
from bip_utils.substrate.conf.substrate_conf_getter import SubstrateConfGetter
|
none
| 1
| 1.051392
| 1
|
|
test/model/test_boats.py
|
vishalbelsare/sparsereg
| 49
|
6628876
|
<reponame>vishalbelsare/sparsereg
import numpy as np
import pytest
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.exceptions import FitFailedWarning
from sparsereg.model import BoATS, STRidge, fit_with_noise
np.random.seed(42)
@pytest.mark.parametrize("lmc", [LinearRegression, Ridge, STRidge])
@pytest.mark.parametrize("sigma", [0.01, 0.05, 0.1])
def test_fit_with_noise(data, lmc, sigma):
x, y = data
coef, intercept = fit_with_noise(x, y, sigma, 0.3, 200, lmc=lmc)
assert len(coef) == x.shape[1]
assert abs(coef[0]) <= 0.015
def test_boats(data):
boat = BoATS(sigma=0.05, alpha=0.2, n=100).fit(*data)
assert boat.coef_[0] <= 0.015
assert abs(boat.intercept_ - 3) <= 1e-4
def test_boats_raise(data):
with pytest.raises(FitFailedWarning):
BoATS(sigma=0.05, alpha=2, n=100).fit(*data)
|
import numpy as np
import pytest
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.exceptions import FitFailedWarning
from sparsereg.model import BoATS, STRidge, fit_with_noise
np.random.seed(42)
@pytest.mark.parametrize("lmc", [LinearRegression, Ridge, STRidge])
@pytest.mark.parametrize("sigma", [0.01, 0.05, 0.1])
def test_fit_with_noise(data, lmc, sigma):
x, y = data
coef, intercept = fit_with_noise(x, y, sigma, 0.3, 200, lmc=lmc)
assert len(coef) == x.shape[1]
assert abs(coef[0]) <= 0.015
def test_boats(data):
boat = BoATS(sigma=0.05, alpha=0.2, n=100).fit(*data)
assert boat.coef_[0] <= 0.015
assert abs(boat.intercept_ - 3) <= 1e-4
def test_boats_raise(data):
with pytest.raises(FitFailedWarning):
BoATS(sigma=0.05, alpha=2, n=100).fit(*data)
|
none
| 1
| 2.264782
| 2
|
|
whereamigeo/whereamigeo/helper.py
|
MLH-Fellowship/hackathon-geography
| 0
|
6628877
|
from functools import reduce
from whereamigeo.mappings import map, word_to_plus_code_mapping
sep = '.'
def get_word(code: str):
return map[code]
def get_code(word: str):
return word_to_plus_code_mapping[word]
def get_olc_array(olc: str, inc: int):
code = olc.replace('+', '')
return [code[i: i + inc] for i in range(0, len(code), inc)]
def single_char_phrase(olc: str):
char_arr = get_olc_array(olc, 1)
return reduce(lambda acc, s: acc + sep + get_word(s), char_arr, '')[1:]
def double_char_phrase(olc: str):
# Split string into array of 2 char elements.
char_arr = get_olc_array(olc, 2)
# Convert coded array to word phrase.
return reduce(lambda acc, s: acc + sep + get_word(s), char_arr, '')[1:]
def olc_to_phrase(olc: str, single: bool = False):
if single:
return single_char_phrase(olc)
else:
return double_char_phrase(olc)
def phrase_to_olc(phrase: str):
word_arr = phrase.split(sep)
code_arr = [get_code(word_arr[w]) for w in range(0, len(word_arr))]
code = reduce(lambda acc, c: acc + c, code_arr, '')
if len(code) % 2 == 0:
return code[:-2] + '+' + code[-2:]
return code[:-3] + '+' + code[-3:]
|
from functools import reduce
from whereamigeo.mappings import map, word_to_plus_code_mapping
sep = '.'
def get_word(code: str):
return map[code]
def get_code(word: str):
return word_to_plus_code_mapping[word]
def get_olc_array(olc: str, inc: int):
code = olc.replace('+', '')
return [code[i: i + inc] for i in range(0, len(code), inc)]
def single_char_phrase(olc: str):
char_arr = get_olc_array(olc, 1)
return reduce(lambda acc, s: acc + sep + get_word(s), char_arr, '')[1:]
def double_char_phrase(olc: str):
# Split string into array of 2 char elements.
char_arr = get_olc_array(olc, 2)
# Convert coded array to word phrase.
return reduce(lambda acc, s: acc + sep + get_word(s), char_arr, '')[1:]
def olc_to_phrase(olc: str, single: bool = False):
if single:
return single_char_phrase(olc)
else:
return double_char_phrase(olc)
def phrase_to_olc(phrase: str):
word_arr = phrase.split(sep)
code_arr = [get_code(word_arr[w]) for w in range(0, len(word_arr))]
code = reduce(lambda acc, c: acc + c, code_arr, '')
if len(code) % 2 == 0:
return code[:-2] + '+' + code[-2:]
return code[:-3] + '+' + code[-3:]
|
en
| 0.605455
|
# Split string into array of 2 char elements. # Convert coded array to word phrase.
| 3.504435
| 4
|
pyxnvme/examples/device_info.py
|
karlowich/xNVMe
| 83
|
6628878
|
<gh_stars>10-100
#!/usr/bin/env python3
from xnvme import CAPI as capi
def main():
"""Example entry point"""
dev = capi.xnvme_dev_open(b"/dev/nvme0n1")
capi.xnvme_dev_pr(dev, 0x0)
geo = capi.xnvme_dev_get_geo(dev)
if not geo:
return
capi.xnvme_geo_pr(geo, 0x0)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
from xnvme import CAPI as capi
def main():
"""Example entry point"""
dev = capi.xnvme_dev_open(b"/dev/nvme0n1")
capi.xnvme_dev_pr(dev, 0x0)
geo = capi.xnvme_dev_get_geo(dev)
if not geo:
return
capi.xnvme_geo_pr(geo, 0x0)
if __name__ == "__main__":
main()
|
en
| 0.194516
|
#!/usr/bin/env python3 Example entry point
| 2.361639
| 2
|
utils.py
|
ikuroNoriiwa/shellcode_transform
| 2
|
6628879
|
<reponame>ikuroNoriiwa/shellcode_transform<filename>utils.py
#!/usr/bin/python3
from sys import exit
def error(msg):
print("\033[31mError: {}\033[0m".format(msg))
exit(1)
|
#!/usr/bin/python3
from sys import exit
def error(msg):
print("\033[31mError: {}\033[0m".format(msg))
exit(1)
|
fr
| 0.386793
|
#!/usr/bin/python3
| 2.418152
| 2
|
src/tests/fft/fft-conv-poc.py
|
ppwwyyxx/haDNN
| 23
|
6628880
|
<reponame>ppwwyyxx/haDNN
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: fft-conv-poc.py
# Author: <NAME> <<EMAIL>>
import numpy as np
import scipy.signal
import numpy.fft
import cv2
image2d = np.random.rand(200, 200)
image2dp = np.pad(image2d, ((1,1),(1,1)), mode='constant') # pad then fft
kernel = np.random.rand(3,3)
img_f = np.fft.fft2(image2dp)
krn_f = np.fft.fft2(kernel, s=image2dp.shape)
conv = np.fft.ifft2(img_f*krn_f).real
conv = conv[2:,2:] # 2 == pad*2 = 3//2 * 2
conv2 = scipy.signal.convolve2d(image2d, kernel, mode='same', boundary='fill')
print conv
print conv2
diff = conv2 - conv
print np.abs(diff).max()
#from IPython import embed; embed()
|
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: fft-conv-poc.py
# Author: <NAME> <<EMAIL>>
import numpy as np
import scipy.signal
import numpy.fft
import cv2
image2d = np.random.rand(200, 200)
image2dp = np.pad(image2d, ((1,1),(1,1)), mode='constant') # pad then fft
kernel = np.random.rand(3,3)
img_f = np.fft.fft2(image2dp)
krn_f = np.fft.fft2(kernel, s=image2dp.shape)
conv = np.fft.ifft2(img_f*krn_f).real
conv = conv[2:,2:] # 2 == pad*2 = 3//2 * 2
conv2 = scipy.signal.convolve2d(image2d, kernel, mode='same', boundary='fill')
print conv
print conv2
diff = conv2 - conv
print np.abs(diff).max()
#from IPython import embed; embed()
|
en
| 0.318492
|
#!/usr/bin/env python2 # -*- coding: UTF-8 -*- # File: fft-conv-poc.py # Author: <NAME> <<EMAIL>> # pad then fft # 2 == pad*2 = 3//2 * 2 #from IPython import embed; embed()
| 2.628398
| 3
|
tests/models/test_sensorinstance.py
|
shashijangra/airflow-1
| 2
|
6628881
|
<filename>tests/models/test_sensorinstance.py
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.models import SensorInstance
from airflow.providers.apache.hive.sensors.named_hive_partition import NamedHivePartitionSensor
from airflow.sensors.python import PythonSensor
class SensorInstanceTest(unittest.TestCase):
def test_get_classpath(self):
# Test the classpath in/out airflow
obj1 = NamedHivePartitionSensor(
partition_names=['test_partition'],
task_id='meta_partition_test_1')
obj1_classpath = SensorInstance.get_classpath(obj1)
obj1_importpath = "airflow.providers.apache.hive." \
"sensors.named_hive_partition.NamedHivePartitionSensor"
self.assertEqual(obj1_classpath, obj1_importpath)
def test_callable():
return
obj3 = PythonSensor(python_callable=test_callable,
task_id='python_sensor_test')
obj3_classpath = SensorInstance.get_classpath(obj3)
obj3_importpath = "airflow.sensors.python.PythonSensor"
self.assertEqual(obj3_classpath, obj3_importpath)
|
<filename>tests/models/test_sensorinstance.py
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.models import SensorInstance
from airflow.providers.apache.hive.sensors.named_hive_partition import NamedHivePartitionSensor
from airflow.sensors.python import PythonSensor
class SensorInstanceTest(unittest.TestCase):
def test_get_classpath(self):
# Test the classpath in/out airflow
obj1 = NamedHivePartitionSensor(
partition_names=['test_partition'],
task_id='meta_partition_test_1')
obj1_classpath = SensorInstance.get_classpath(obj1)
obj1_importpath = "airflow.providers.apache.hive." \
"sensors.named_hive_partition.NamedHivePartitionSensor"
self.assertEqual(obj1_classpath, obj1_importpath)
def test_callable():
return
obj3 = PythonSensor(python_callable=test_callable,
task_id='python_sensor_test')
obj3_classpath = SensorInstance.get_classpath(obj3)
obj3_importpath = "airflow.sensors.python.PythonSensor"
self.assertEqual(obj3_classpath, obj3_importpath)
|
en
| 0.857657
|
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Test the classpath in/out airflow
| 1.949998
| 2
|
angr/angr/engines/vex/statements/base.py
|
Ruide/angr-dev
| 0
|
6628882
|
import logging
l = logging.getLogger("angr.engines.vex.statements.base")
class SimIRStmt(object):
"""A class for symbolically translating VEX IRStmts."""
def __init__(self, stmt, state):
self.stmt = stmt
self.state = state
# references by the statement
self.actions = []
self._constraints = [ ]
def process(self):
"""
Process the statement, applying its effects on the state.
"""
# this is where we would choose between different analysis modes
self._execute()
def _execute(self):
raise NotImplementedError()
def _translate_expr(self, expr):
"""Translates an IRExpr into a SimIRExpr."""
e = translate_expr(expr, self.state)
self._record_expr(e)
return e
def _translate_exprs(self, exprs):
"""Translates a sequence of IRExprs into SimIRExprs."""
return [ self._translate_expr(e) for e in exprs ]
def _record_expr(self, expr):
"""Records the references of an expression."""
self.actions.extend(expr.actions)
def _add_constraints(self, *constraints):
"""Adds constraints to the state."""
self._constraints.extend(constraints)
self.state.add_constraints(*constraints)
def _write_tmp(self, tmp, v, reg_deps, tmp_deps):
"""
Writes an expression to a tmp.
"""
self.state.scratch.store_tmp(tmp, v, reg_deps, tmp_deps)
from ..expressions import translate_expr
|
import logging
l = logging.getLogger("angr.engines.vex.statements.base")
class SimIRStmt(object):
"""A class for symbolically translating VEX IRStmts."""
def __init__(self, stmt, state):
self.stmt = stmt
self.state = state
# references by the statement
self.actions = []
self._constraints = [ ]
def process(self):
"""
Process the statement, applying its effects on the state.
"""
# this is where we would choose between different analysis modes
self._execute()
def _execute(self):
raise NotImplementedError()
def _translate_expr(self, expr):
"""Translates an IRExpr into a SimIRExpr."""
e = translate_expr(expr, self.state)
self._record_expr(e)
return e
def _translate_exprs(self, exprs):
"""Translates a sequence of IRExprs into SimIRExprs."""
return [ self._translate_expr(e) for e in exprs ]
def _record_expr(self, expr):
"""Records the references of an expression."""
self.actions.extend(expr.actions)
def _add_constraints(self, *constraints):
"""Adds constraints to the state."""
self._constraints.extend(constraints)
self.state.add_constraints(*constraints)
def _write_tmp(self, tmp, v, reg_deps, tmp_deps):
"""
Writes an expression to a tmp.
"""
self.state.scratch.store_tmp(tmp, v, reg_deps, tmp_deps)
from ..expressions import translate_expr
|
en
| 0.873851
|
A class for symbolically translating VEX IRStmts. # references by the statement Process the statement, applying its effects on the state. # this is where we would choose between different analysis modes Translates an IRExpr into a SimIRExpr. Translates a sequence of IRExprs into SimIRExprs. Records the references of an expression. Adds constraints to the state. Writes an expression to a tmp.
| 2.721287
| 3
|
tests/testCallbacksRegex.py
|
while-loop/Twitch-IRC
| 0
|
6628883
|
<gh_stars>0
import inspect
import time
import unittest
from types import FunctionType
from twitchirc.irc import IRC
class TestCallbacks(unittest.TestCase):
CHANNEL = "testchannel"
VIEWER = "testviewer"
USER = "testuser"
tests = None
def setUp(self):
if not TestCallbacks.tests:
TestCallbacks.tests = {}
for x, y in TestCallbacks.__dict__.items():
if type(y) == FunctionType and y.func_name[:4] == "test" and "zzzzz" not in y.func_name:
TestCallbacks.tests[y.func_name] = False
self.login = ":tmi.twitch.tv 001 " + TestCallbacks.USER + "\r\n :tmi.twitch.tv 376 " + TestCallbacks.USER
def tearDown(self):
time.sleep(.005) # allow the callbacks to be executed
def test_on_message(self):
MESSAGE = "testmessage"
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, message):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(message, MESSAGE)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onMessage(self, channel, viewer, message):
callback(self, channel, viewer, message)
msg = ":{viewer}!{viewer}@{viewer}.tmi.twitch.tv PRIVMSG #{channel} :{message}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL, message=MESSAGE)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_command(self):
SHEBANG = "!"
COMMAND = "command"
VALUE = "value"
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, command, value):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(command, COMMAND)
self.assertEqual(value, VALUE)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onCommand(self, channel, viewer, command, value):
callback(self, channel, viewer, command, value)
msg = ":{viewer}!{viewer}@{viewer}.<EMAIL> PRIVMSG #{channel} :{shebang}{command} {value}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL, shebang=SHEBANG, command=COMMAND, value=VALUE)
chat = TESTIRC("noauth", TestCallbacks.USER, cmdShebang=SHEBANG)
chat.onResponse(msg)
def test_on_command_with_shebang(self):
SHEBANG = '#'
COMMAND = "shebangcommand"
VALUE = "shebangvalue"
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, command, value):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(command, COMMAND)
self.assertEqual(value, VALUE)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onCommand(self, channel, viewer, command, value):
callback(self, channel, viewer, command, value)
msg = ":{viewer}!{viewer}@{viewer}.<EMAIL> PRIVMSG #{channel} :{shebang}{command} {value}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL, shebang=SHEBANG, command=COMMAND, value=VALUE)
chat = TESTIRC("noauth", TestCallbacks.USER, cmdShebang=SHEBANG)
chat.onResponse(msg)
def test_on_join(self):
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, state):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(state, IRC.JOIN)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onJoinPart(self, channel, viewer, state):
callback(self, channel, viewer, state)
msg = ":{viewer}!{viewer}@{viewer}.tmi.twitch.tv JOIN #{channel}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_part(self):
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, state):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(state, IRC.PART)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onJoinPart(self, channel, viewer, state):
callback(self, channel, viewer, state)
msg = ":{viewer}!{viewer}@{viewer}.tmi.twitch.tv PART #{channel}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_mode_op(self):
OPCODE = IRC.OP
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, opcode):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(opcode, OPCODE)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onMode(self, channel, viewer, opcode):
callback(self, channel, viewer, opcode)
msg = ":jtv MODE #{channel} {op}o {viewer}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL, op=OPCODE)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_mode_deop(self):
OPCODE = IRC.DEOP
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, opcode):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(opcode, OPCODE)
TestCallbacks.tests[func] = True
msg = ":jtv MODE #{channel} {op}o {viewer}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL, op=OPCODE)
class TESTIRC(IRC):
def onMode(self, channel, viewer, opcode):
callback(self, channel, viewer, opcode)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_notice_already_r9k_on(self):
ID = IRC.ID_ALREADY_R9K_ON
MSG = "This room is already in r9k mode."
func = inspect.stack()[0][3]
def callback(irc, channel, msgid, message):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(msgid, ID)
self.assertEqual(message, MSG)
TestCallbacks.tests[func] = True
msg = "@msg-id={id} :tmi.twitch.tv NOTICE #{channel} :{message}".format(
id=ID, channel=TestCallbacks.CHANNEL, message=MSG)
class TESTIRC(IRC):
def onNotice(self, channel, msgid, message):
callback(self, channel, msgid, message)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_notice_emote_only_on(self):
ID = IRC.ID_EMOTE_ONLY_ON
MSG = "This room is now in emote-only mode."
func = inspect.stack()[0][3]
def callback(irc, channel, msgid, message):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(msgid, ID)
self.assertEqual(message, MSG)
TestCallbacks.tests[func] = True
msg = "@msg-id={id} :tmi.twitch.tv NOTICE #{channel} :{message}".format(
id=ID, channel=TestCallbacks.CHANNEL, message=MSG)
class TESTIRC(IRC):
def onNotice(self, channel, msgid, message):
callback(self, channel, msgid, message)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_host_target(self):
HOSTING = "hostingchannel"
TARGET = self.CHANNEL
AMOUNT = 8675309
func = inspect.stack()[0][3]
def callback(irc, hosting, target, amount):
self.assertIsNotNone(irc)
self.assertEqual(hosting, HOSTING)
self.assertEqual(target, TARGET)
self.assertEqual(amount, AMOUNT)
TestCallbacks.tests[func] = True
msg = ":tmi.twitch.tv HOSTTARGET #{hosting} :{target} {amount}".format(
hosting=HOSTING, target=TARGET, amount=AMOUNT)
class TESTIRC(IRC):
def onHostTarget(self, hosting, target, amount):
callback(self, hosting, target, amount)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_stop_host_target(self):
HOSTING = "hostingchannel"
AMOUNT = 9001
TARGET = None
func = inspect.stack()[0][3]
def callback(irc, hosting, target, amount):
self.assertIsNotNone(irc)
self.assertEqual(hosting, HOSTING)
self.assertEqual(target, TARGET)
self.assertEqual(amount, AMOUNT)
TestCallbacks.tests[func] = True
msg = ":tmi.twitch.tv HOSTTARGET #{hosting} :- {amount}".format(
hosting=HOSTING, amount=AMOUNT)
class TESTIRC(IRC):
def onHostTarget(self, hosting, target, amount):
callback(self, hosting, target, amount)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_clear_chat_viewer(self):
func = inspect.stack()[0][3]
def callback(irc, channel, viewer):
self.assertIsNotNone(irc)
self.assertEqual(channel, self.CHANNEL)
self.assertEqual(viewer, self.VIEWER)
TestCallbacks.tests[func] = True
msg = ":tmi.twitch.tv CLEARCHAT #{channel} :{viewer}".format(
channel=self.CHANNEL, viewer=self.VIEWER)
class TESTIRC(IRC):
def onClearChat(self, channel, viewer):
callback(self, channel, viewer)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_clear_chat_channel(self):
func = inspect.stack()[0][3]
def callback(irc, channel, viewer):
self.assertIsNotNone(irc)
self.assertEqual(channel, self.CHANNEL)
self.assertEqual(viewer, None)
TestCallbacks.tests[func] = True
msg = ":tmi.twitch.tv CLEARCHAT #{channel}".format(
channel=self.CHANNEL)
class TESTIRC(IRC):
def onClearChat(self, channel, viewer):
callback(self, channel, viewer)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_usernotice(self):
MSG = "Yo, NiCe StReAm BrUh!"
func = inspect.stack()[0][3]
def callback(irc, channel, message):
self.assertIsNotNone(irc)
self.assertEqual(channel, self.CHANNEL)
self.assertEqual(message, MSG)
TestCallbacks.tests[func] = True
msg = ":tmi.twitch.tv USERNOTICE #{channel} :{message}".format(
channel=self.CHANNEL, message=MSG)
class TESTIRC(IRC):
def onUserNotice(self, channel, message):
callback(self, channel, message)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_response_override(self):
func = inspect.stack()[0][3]
msg = ":{viewer}!{viewer}@{viewer}.tmi.twitch.tv PRIVMSG #{channel} :{message}" \
.format(viewer=self.VIEWER, channel=self.CHANNEL, message="This is a message")
def callback(irc, line):
self.assertIsNotNone(irc)
self.assertEqual(line, msg.replace("\r\n", ""))
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onResponse(self, line):
callback(self, line)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_ping_override(self):
func = inspect.stack()[0][3]
def callback(irc):
self.assertIsNotNone(irc)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onPing(self):
callback(self)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onPing()
def test_on_reconnect_override(self):
func = inspect.stack()[0][3]
def callback(irc):
self.assertIsNotNone(irc)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onReconnect(self):
callback(self)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onReconnect()
def test_zzzzz_tests_passed(self):
for func, passed in TestCallbacks.tests.iteritems():
self.assertTrue(passed, "Failed Function: {}".format(func))
if __name__ == '__main__':
unittest.main()
|
import inspect
import time
import unittest
from types import FunctionType
from twitchirc.irc import IRC
class TestCallbacks(unittest.TestCase):
CHANNEL = "testchannel"
VIEWER = "testviewer"
USER = "testuser"
tests = None
def setUp(self):
if not TestCallbacks.tests:
TestCallbacks.tests = {}
for x, y in TestCallbacks.__dict__.items():
if type(y) == FunctionType and y.func_name[:4] == "test" and "zzzzz" not in y.func_name:
TestCallbacks.tests[y.func_name] = False
self.login = ":tmi.twitch.tv 001 " + TestCallbacks.USER + "\r\n :tmi.twitch.tv 376 " + TestCallbacks.USER
def tearDown(self):
time.sleep(.005) # allow the callbacks to be executed
def test_on_message(self):
MESSAGE = "testmessage"
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, message):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(message, MESSAGE)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onMessage(self, channel, viewer, message):
callback(self, channel, viewer, message)
msg = ":{viewer}!{viewer}@{viewer}.tmi.twitch.tv PRIVMSG #{channel} :{message}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL, message=MESSAGE)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_command(self):
SHEBANG = "!"
COMMAND = "command"
VALUE = "value"
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, command, value):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(command, COMMAND)
self.assertEqual(value, VALUE)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onCommand(self, channel, viewer, command, value):
callback(self, channel, viewer, command, value)
msg = ":{viewer}!{viewer}@{viewer}.<EMAIL> PRIVMSG #{channel} :{shebang}{command} {value}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL, shebang=SHEBANG, command=COMMAND, value=VALUE)
chat = TESTIRC("noauth", TestCallbacks.USER, cmdShebang=SHEBANG)
chat.onResponse(msg)
def test_on_command_with_shebang(self):
SHEBANG = '#'
COMMAND = "shebangcommand"
VALUE = "shebangvalue"
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, command, value):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(command, COMMAND)
self.assertEqual(value, VALUE)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onCommand(self, channel, viewer, command, value):
callback(self, channel, viewer, command, value)
msg = ":{viewer}!{viewer}@{viewer}.<EMAIL> PRIVMSG #{channel} :{shebang}{command} {value}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL, shebang=SHEBANG, command=COMMAND, value=VALUE)
chat = TESTIRC("noauth", TestCallbacks.USER, cmdShebang=SHEBANG)
chat.onResponse(msg)
def test_on_join(self):
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, state):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(state, IRC.JOIN)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onJoinPart(self, channel, viewer, state):
callback(self, channel, viewer, state)
msg = ":{viewer}!{viewer}@{viewer}.tmi.twitch.tv JOIN #{channel}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_part(self):
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, state):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(state, IRC.PART)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onJoinPart(self, channel, viewer, state):
callback(self, channel, viewer, state)
msg = ":{viewer}!{viewer}@{viewer}.tmi.twitch.tv PART #{channel}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_mode_op(self):
OPCODE = IRC.OP
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, opcode):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(opcode, OPCODE)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onMode(self, channel, viewer, opcode):
callback(self, channel, viewer, opcode)
msg = ":jtv MODE #{channel} {op}o {viewer}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL, op=OPCODE)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_mode_deop(self):
OPCODE = IRC.DEOP
func = inspect.stack()[0][3]
def callback(irc, channel, viewer, opcode):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(viewer, TestCallbacks.VIEWER)
self.assertEqual(opcode, OPCODE)
TestCallbacks.tests[func] = True
msg = ":jtv MODE #{channel} {op}o {viewer}".format(
viewer=TestCallbacks.VIEWER, channel=TestCallbacks.CHANNEL, op=OPCODE)
class TESTIRC(IRC):
def onMode(self, channel, viewer, opcode):
callback(self, channel, viewer, opcode)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_notice_already_r9k_on(self):
ID = IRC.ID_ALREADY_R9K_ON
MSG = "This room is already in r9k mode."
func = inspect.stack()[0][3]
def callback(irc, channel, msgid, message):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(msgid, ID)
self.assertEqual(message, MSG)
TestCallbacks.tests[func] = True
msg = "@msg-id={id} :tmi.twitch.tv NOTICE #{channel} :{message}".format(
id=ID, channel=TestCallbacks.CHANNEL, message=MSG)
class TESTIRC(IRC):
def onNotice(self, channel, msgid, message):
callback(self, channel, msgid, message)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_notice_emote_only_on(self):
ID = IRC.ID_EMOTE_ONLY_ON
MSG = "This room is now in emote-only mode."
func = inspect.stack()[0][3]
def callback(irc, channel, msgid, message):
self.assertIsNotNone(irc)
self.assertEqual(channel, TestCallbacks.CHANNEL)
self.assertEqual(msgid, ID)
self.assertEqual(message, MSG)
TestCallbacks.tests[func] = True
msg = "@msg-id={id} :tmi.twitch.tv NOTICE #{channel} :{message}".format(
id=ID, channel=TestCallbacks.CHANNEL, message=MSG)
class TESTIRC(IRC):
def onNotice(self, channel, msgid, message):
callback(self, channel, msgid, message)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_host_target(self):
HOSTING = "hostingchannel"
TARGET = self.CHANNEL
AMOUNT = 8675309
func = inspect.stack()[0][3]
def callback(irc, hosting, target, amount):
self.assertIsNotNone(irc)
self.assertEqual(hosting, HOSTING)
self.assertEqual(target, TARGET)
self.assertEqual(amount, AMOUNT)
TestCallbacks.tests[func] = True
msg = ":tmi.twitch.tv HOSTTARGET #{hosting} :{target} {amount}".format(
hosting=HOSTING, target=TARGET, amount=AMOUNT)
class TESTIRC(IRC):
def onHostTarget(self, hosting, target, amount):
callback(self, hosting, target, amount)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_stop_host_target(self):
HOSTING = "hostingchannel"
AMOUNT = 9001
TARGET = None
func = inspect.stack()[0][3]
def callback(irc, hosting, target, amount):
self.assertIsNotNone(irc)
self.assertEqual(hosting, HOSTING)
self.assertEqual(target, TARGET)
self.assertEqual(amount, AMOUNT)
TestCallbacks.tests[func] = True
msg = ":tmi.twitch.tv HOSTTARGET #{hosting} :- {amount}".format(
hosting=HOSTING, amount=AMOUNT)
class TESTIRC(IRC):
def onHostTarget(self, hosting, target, amount):
callback(self, hosting, target, amount)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_clear_chat_viewer(self):
func = inspect.stack()[0][3]
def callback(irc, channel, viewer):
self.assertIsNotNone(irc)
self.assertEqual(channel, self.CHANNEL)
self.assertEqual(viewer, self.VIEWER)
TestCallbacks.tests[func] = True
msg = ":tmi.twitch.tv CLEARCHAT #{channel} :{viewer}".format(
channel=self.CHANNEL, viewer=self.VIEWER)
class TESTIRC(IRC):
def onClearChat(self, channel, viewer):
callback(self, channel, viewer)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_clear_chat_channel(self):
func = inspect.stack()[0][3]
def callback(irc, channel, viewer):
self.assertIsNotNone(irc)
self.assertEqual(channel, self.CHANNEL)
self.assertEqual(viewer, None)
TestCallbacks.tests[func] = True
msg = ":tmi.twitch.tv CLEARCHAT #{channel}".format(
channel=self.CHANNEL)
class TESTIRC(IRC):
def onClearChat(self, channel, viewer):
callback(self, channel, viewer)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_usernotice(self):
MSG = "Yo, NiCe StReAm BrUh!"
func = inspect.stack()[0][3]
def callback(irc, channel, message):
self.assertIsNotNone(irc)
self.assertEqual(channel, self.CHANNEL)
self.assertEqual(message, MSG)
TestCallbacks.tests[func] = True
msg = ":tmi.twitch.tv USERNOTICE #{channel} :{message}".format(
channel=self.CHANNEL, message=MSG)
class TESTIRC(IRC):
def onUserNotice(self, channel, message):
callback(self, channel, message)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_response_override(self):
func = inspect.stack()[0][3]
msg = ":{viewer}!{viewer}@{viewer}.tmi.twitch.tv PRIVMSG #{channel} :{message}" \
.format(viewer=self.VIEWER, channel=self.CHANNEL, message="This is a message")
def callback(irc, line):
self.assertIsNotNone(irc)
self.assertEqual(line, msg.replace("\r\n", ""))
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onResponse(self, line):
callback(self, line)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onResponse(msg)
def test_on_ping_override(self):
func = inspect.stack()[0][3]
def callback(irc):
self.assertIsNotNone(irc)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onPing(self):
callback(self)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onPing()
def test_on_reconnect_override(self):
func = inspect.stack()[0][3]
def callback(irc):
self.assertIsNotNone(irc)
TestCallbacks.tests[func] = True
class TESTIRC(IRC):
def onReconnect(self):
callback(self)
chat = TESTIRC("noauth", TestCallbacks.USER)
chat.onReconnect()
def test_zzzzz_tests_passed(self):
for func, passed in TestCallbacks.tests.iteritems():
self.assertTrue(passed, "Failed Function: {}".format(func))
if __name__ == '__main__':
unittest.main()
|
en
| 0.289586
|
# allow the callbacks to be executed #{channel} :{message}".format( #{channel} :{shebang}{command} {value}".format( #{channel} :{shebang}{command} {value}".format( #{channel}".format( #{channel}".format( #{channel} {op}o {viewer}".format( #{channel} {op}o {viewer}".format( #{channel} :{message}".format( #{channel} :{message}".format( #{hosting} :{target} {amount}".format( #{hosting} :- {amount}".format( #{channel} :{viewer}".format( #{channel}".format( #{channel} :{message}".format( #{channel} :{message}" \
| 2.512167
| 3
|
topConfigs/crabConfigPPSample.py
|
mverwe/UserCode
| 0
|
6628884
|
<filename>topConfigs/crabConfigPPSample.py
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.section_('General')
config.General.requestName = 'HighPtJet80_v1'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = False
config.section_('JobType')
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'runForestAOD_pp_DATA_75X.py'
config.section_('Data')
config.Data.inputDataset = '/HighPtJet80/Run2015E-PromptReco-v1/AOD'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = "EventAwareLumiBased"
config.Data.unitsPerJob = 30000
config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions15/5TeV/Cert_262081-262273_5TeV_PromptReco_Collisions15_25ns_JSON_v2.txt'
config.Data.outLFNDirBase = '/store/group/cmst3/group/hintt/mverweij/PP5TeV/data'
config.Data.publication = False #True
config.Data.outputDatasetTag = ''
config.section_('User')
config.section_('Site')
#config.Site.whitelist = ['T2_US_MIT']
#config.Site.blacklist = ['T2_US_Nebraska','T2_US_Florida','T2_US_Wisconsin','T2_US_Caltech']
config.Site.storageSite = 'T2_CH_CERN'
|
<filename>topConfigs/crabConfigPPSample.py
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.section_('General')
config.General.requestName = 'HighPtJet80_v1'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = False
config.section_('JobType')
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'runForestAOD_pp_DATA_75X.py'
config.section_('Data')
config.Data.inputDataset = '/HighPtJet80/Run2015E-PromptReco-v1/AOD'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = "EventAwareLumiBased"
config.Data.unitsPerJob = 30000
config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions15/5TeV/Cert_262081-262273_5TeV_PromptReco_Collisions15_25ns_JSON_v2.txt'
config.Data.outLFNDirBase = '/store/group/cmst3/group/hintt/mverweij/PP5TeV/data'
config.Data.publication = False #True
config.Data.outputDatasetTag = ''
config.section_('User')
config.section_('Site')
#config.Site.whitelist = ['T2_US_MIT']
#config.Site.blacklist = ['T2_US_Nebraska','T2_US_Florida','T2_US_Wisconsin','T2_US_Caltech']
config.Site.storageSite = 'T2_CH_CERN'
|
en
| 0.289717
|
#config.Data.inputDBS = 'phys03' #True #config.Site.whitelist = ['T2_US_MIT'] #config.Site.blacklist = ['T2_US_Nebraska','T2_US_Florida','T2_US_Wisconsin','T2_US_Caltech']
| 1.568125
| 2
|
services/traction/api/core/config.py
|
ianco/traction
| 0
|
6628885
|
import logging
import os
from enum import Enum
from functools import lru_cache
from typing import Optional
from pydantic import BaseSettings, PostgresDsn
logger = logging.getLogger(__name__)
class EnvironmentEnum(str, Enum):
PRODUCTION = "production"
LOCAL = "local"
class GlobalConfig(BaseSettings):
TITLE: str = "Traction"
DESCRIPTION: str = "A digital wallet solution for organizations"
ENVIRONMENT: EnvironmentEnum
DEBUG: bool = False
TESTING: bool = False
TIMEZONE: str = "UTC"
# the following defaults match up with default values in scripts/.env.example
# these MUST be all set in non-local environments.
PSQL_HOST: str = os.environ.get("POSTGRESQL_HOST", "localhost")
PSQL_PORT: int = os.environ.get("POSTGRESQL_PORT", 5432)
PSQL_DB: str = os.environ.get("POSTGRESQL_DB", "traction")
PSQL_USER: str = os.environ.get("TRACTION_DB_USER", "tractionuser")
PSQL_PASS: str = os.environ.get("TRACTION_DB_USER_PWD", "<PASSWORD>")
PSQL_ADMIN_USER: str = os.environ.get("TRACTION_DB_ADMIN", "tractionadminuser")
PSQL_ADMIN_PASS: str = os.environ.get("TRACTION_DB_ADMIN_PWD", "<PASSWORD>")
# application connection is async
# fmt: off
SQLALCHEMY_DATABASE_URI: PostgresDsn = (
f"postgresql+asyncpg://{PSQL_USER}:{PSQL_PASS}@{PSQL_HOST}:{PSQL_PORT}/{PSQL_DB}" # noqa: E501
)
# migrations connection uses owner role and is synchronous
SQLALCHEMY_DATABASE_ADMIN_URI: PostgresDsn = (
f"postgresql://{PSQL_ADMIN_USER}:{PSQL_ADMIN_PASS}@{PSQL_HOST}:{PSQL_PORT}/{PSQL_DB}" # noqa: E501
)
# fmt: on
ACAPY_ADMIN_URL: str = os.environ.get("ACAPY_ADMIN_URL", "http://localhost:8031")
ACAPY_ADMIN_URL_API_KEY: str = os.environ.get(
"ACAPY_ADMIN_URL_API_KEY", "change-me"
)
TRACTION_API_ADMIN_KEY: str = os.environ.get("TRACTION_API_ADMIN_KEY", "change-me")
DB_ECHO_LOG: bool = False
# Api V1 prefix
API_V1_STR = "/v1"
# openssl rand -hex 32
JWT_SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
JWT_ALGORITHM = "HS256"
JWT_ACCESS_TOKEN_EXPIRE_MINUTES = 300
class Config:
case_sensitive = True
class LocalConfig(GlobalConfig):
"""Local configurations."""
DEBUG: bool = True
ENVIRONMENT: EnvironmentEnum = EnvironmentEnum.LOCAL
class ProdConfig(GlobalConfig):
"""Production configurations."""
DEBUG: bool = False
ENVIRONMENT: EnvironmentEnum = EnvironmentEnum.PRODUCTION
class FactoryConfig:
def __init__(self, environment: Optional[str]):
self.environment = environment
def __call__(self) -> GlobalConfig:
if self.environment == EnvironmentEnum.LOCAL.value:
return LocalConfig()
return ProdConfig()
@lru_cache()
def get_configuration() -> GlobalConfig:
return FactoryConfig(os.environ.get("ENVIRONMENT"))()
settings = get_configuration()
|
import logging
import os
from enum import Enum
from functools import lru_cache
from typing import Optional
from pydantic import BaseSettings, PostgresDsn
logger = logging.getLogger(__name__)
class EnvironmentEnum(str, Enum):
PRODUCTION = "production"
LOCAL = "local"
class GlobalConfig(BaseSettings):
TITLE: str = "Traction"
DESCRIPTION: str = "A digital wallet solution for organizations"
ENVIRONMENT: EnvironmentEnum
DEBUG: bool = False
TESTING: bool = False
TIMEZONE: str = "UTC"
# the following defaults match up with default values in scripts/.env.example
# these MUST be all set in non-local environments.
PSQL_HOST: str = os.environ.get("POSTGRESQL_HOST", "localhost")
PSQL_PORT: int = os.environ.get("POSTGRESQL_PORT", 5432)
PSQL_DB: str = os.environ.get("POSTGRESQL_DB", "traction")
PSQL_USER: str = os.environ.get("TRACTION_DB_USER", "tractionuser")
PSQL_PASS: str = os.environ.get("TRACTION_DB_USER_PWD", "<PASSWORD>")
PSQL_ADMIN_USER: str = os.environ.get("TRACTION_DB_ADMIN", "tractionadminuser")
PSQL_ADMIN_PASS: str = os.environ.get("TRACTION_DB_ADMIN_PWD", "<PASSWORD>")
# application connection is async
# fmt: off
SQLALCHEMY_DATABASE_URI: PostgresDsn = (
f"postgresql+asyncpg://{PSQL_USER}:{PSQL_PASS}@{PSQL_HOST}:{PSQL_PORT}/{PSQL_DB}" # noqa: E501
)
# migrations connection uses owner role and is synchronous
SQLALCHEMY_DATABASE_ADMIN_URI: PostgresDsn = (
f"postgresql://{PSQL_ADMIN_USER}:{PSQL_ADMIN_PASS}@{PSQL_HOST}:{PSQL_PORT}/{PSQL_DB}" # noqa: E501
)
# fmt: on
ACAPY_ADMIN_URL: str = os.environ.get("ACAPY_ADMIN_URL", "http://localhost:8031")
ACAPY_ADMIN_URL_API_KEY: str = os.environ.get(
"ACAPY_ADMIN_URL_API_KEY", "change-me"
)
TRACTION_API_ADMIN_KEY: str = os.environ.get("TRACTION_API_ADMIN_KEY", "change-me")
DB_ECHO_LOG: bool = False
# Api V1 prefix
API_V1_STR = "/v1"
# openssl rand -hex 32
JWT_SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
JWT_ALGORITHM = "HS256"
JWT_ACCESS_TOKEN_EXPIRE_MINUTES = 300
class Config:
case_sensitive = True
class LocalConfig(GlobalConfig):
"""Local configurations."""
DEBUG: bool = True
ENVIRONMENT: EnvironmentEnum = EnvironmentEnum.LOCAL
class ProdConfig(GlobalConfig):
"""Production configurations."""
DEBUG: bool = False
ENVIRONMENT: EnvironmentEnum = EnvironmentEnum.PRODUCTION
class FactoryConfig:
def __init__(self, environment: Optional[str]):
self.environment = environment
def __call__(self) -> GlobalConfig:
if self.environment == EnvironmentEnum.LOCAL.value:
return LocalConfig()
return ProdConfig()
@lru_cache()
def get_configuration() -> GlobalConfig:
return FactoryConfig(os.environ.get("ENVIRONMENT"))()
settings = get_configuration()
|
en
| 0.766426
|
# the following defaults match up with default values in scripts/.env.example # these MUST be all set in non-local environments. # application connection is async # fmt: off # noqa: E501 # migrations connection uses owner role and is synchronous # noqa: E501 # fmt: on # Api V1 prefix # openssl rand -hex 32 Local configurations. Production configurations.
| 2.367408
| 2
|
syft/codes.py
|
shaunak-gupta/PySyft
| 1
|
6628886
|
<gh_stars>1-10
class MSGTYPE(object):
CMD = 1
OBJ = 2
OBJ_REQ = 3
OBJ_DEL = 4
EXCEPTION = 5
IS_NONE = 6
code2MSGTYPE = {}
code2MSGTYPE[1] = "CMD"
code2MSGTYPE[2] = "OBJ"
code2MSGTYPE[3] = "OBJ_REQ"
code2MSGTYPE[4] = "OBJ_DEL"
code2MSGTYPE[5] = "EXCEPTION"
code2MSGTYPE[6] = "IS_NONE"
|
class MSGTYPE(object):
CMD = 1
OBJ = 2
OBJ_REQ = 3
OBJ_DEL = 4
EXCEPTION = 5
IS_NONE = 6
code2MSGTYPE = {}
code2MSGTYPE[1] = "CMD"
code2MSGTYPE[2] = "OBJ"
code2MSGTYPE[3] = "OBJ_REQ"
code2MSGTYPE[4] = "OBJ_DEL"
code2MSGTYPE[5] = "EXCEPTION"
code2MSGTYPE[6] = "IS_NONE"
|
none
| 1
| 2.289822
| 2
|
|
frappe-bench/apps/erpnext/erpnext/patches/v4_2/fix_gl_entries_for_stock_transactions.py
|
Semicheche/foa_frappe_docker
| 0
|
6628887
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
from frappe.utils import flt
def execute():
from erpnext.stock.stock_balance import repost
repost(allow_zero_rate=True, only_actual=True)
frappe.reload_doctype("Account")
warehouse_account = frappe.db.sql("""select name, master_name from tabAccount
where ifnull(account_type, '') = 'Warehouse'""")
if warehouse_account:
warehouses = [d[1] for d in warehouse_account]
accounts = [d[0] for d in warehouse_account]
stock_vouchers = frappe.db.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where sle.warehouse in (%s)
order by sle.posting_date""" %
', '.join(['%s']*len(warehouses)), tuple(warehouses))
rejected = []
for voucher_type, voucher_no in stock_vouchers:
stock_bal = frappe.db.sql("""select sum(stock_value_difference) from `tabStock Ledger Entry`
where voucher_type=%s and voucher_no =%s and warehouse in (%s)""" %
('%s', '%s', ', '.join(['%s']*len(warehouses))), tuple([voucher_type, voucher_no] + warehouses))
account_bal = frappe.db.sql("""select ifnull(sum(ifnull(debit, 0) - ifnull(credit, 0)), 0)
from `tabGL Entry`
where voucher_type=%s and voucher_no =%s and account in (%s)
group by voucher_type, voucher_no""" %
('%s', '%s', ', '.join(['%s']*len(accounts))), tuple([voucher_type, voucher_no] + accounts))
if stock_bal and account_bal and abs(flt(stock_bal[0][0]) - flt(account_bal[0][0])) > 0.1:
try:
print(voucher_type, voucher_no, stock_bal[0][0], account_bal[0][0])
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
voucher = frappe.get_doc(voucher_type, voucher_no)
voucher.make_gl_entries(repost_future_gle=False)
frappe.db.commit()
except Exception as e:
print(frappe.get_traceback())
rejected.append([voucher_type, voucher_no])
frappe.db.rollback()
print("Failed to repost: ")
print(rejected)
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
from frappe.utils import flt
def execute():
from erpnext.stock.stock_balance import repost
repost(allow_zero_rate=True, only_actual=True)
frappe.reload_doctype("Account")
warehouse_account = frappe.db.sql("""select name, master_name from tabAccount
where ifnull(account_type, '') = 'Warehouse'""")
if warehouse_account:
warehouses = [d[1] for d in warehouse_account]
accounts = [d[0] for d in warehouse_account]
stock_vouchers = frappe.db.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where sle.warehouse in (%s)
order by sle.posting_date""" %
', '.join(['%s']*len(warehouses)), tuple(warehouses))
rejected = []
for voucher_type, voucher_no in stock_vouchers:
stock_bal = frappe.db.sql("""select sum(stock_value_difference) from `tabStock Ledger Entry`
where voucher_type=%s and voucher_no =%s and warehouse in (%s)""" %
('%s', '%s', ', '.join(['%s']*len(warehouses))), tuple([voucher_type, voucher_no] + warehouses))
account_bal = frappe.db.sql("""select ifnull(sum(ifnull(debit, 0) - ifnull(credit, 0)), 0)
from `tabGL Entry`
where voucher_type=%s and voucher_no =%s and account in (%s)
group by voucher_type, voucher_no""" %
('%s', '%s', ', '.join(['%s']*len(accounts))), tuple([voucher_type, voucher_no] + accounts))
if stock_bal and account_bal and abs(flt(stock_bal[0][0]) - flt(account_bal[0][0])) > 0.1:
try:
print(voucher_type, voucher_no, stock_bal[0][0], account_bal[0][0])
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
voucher = frappe.get_doc(voucher_type, voucher_no)
voucher.make_gl_entries(repost_future_gle=False)
frappe.db.commit()
except Exception as e:
print(frappe.get_traceback())
rejected.append([voucher_type, voucher_no])
frappe.db.rollback()
print("Failed to repost: ")
print(rejected)
|
en
| 0.494912
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt select name, master_name from tabAccount where ifnull(account_type, '') = 'Warehouse' select distinct sle.voucher_type, sle.voucher_no from `tabStock Ledger Entry` sle where sle.warehouse in (%s) order by sle.posting_date select sum(stock_value_difference) from `tabStock Ledger Entry` where voucher_type=%s and voucher_no =%s and warehouse in (%s) select ifnull(sum(ifnull(debit, 0) - ifnull(credit, 0)), 0) from `tabGL Entry` where voucher_type=%s and voucher_no =%s and account in (%s) group by voucher_type, voucher_no delete from `tabGL Entry` where voucher_type=%s and voucher_no=%s
| 1.960664
| 2
|
test_service.py
|
GovWizely/lambda-endpoint-freshen
| 0
|
6628888
|
import pytest
import vcr
from service import handler
@vcr.use_cassette()
def test_handler(monkeypatch):
"""Reads from the `test_handler` cassette and processes the request
"""
monkeypatch.setenv("API_KEY", "mykey")
event = dict(freshen_url="https://api.trade.gov/v1/some_endpoint/freshen.json?api_key=")
resp = handler(event, None)
assert resp is True
@vcr.use_cassette()
def test_handler_fails(monkeypatch):
"""Reads from the `test_handler_fails` cassette and processes the invalid request
"""
monkeypatch.setenv("API_KEY", "badkey")
event = dict(freshen_url="https://api.trade.gov/v1/some_endpoint/freshen.json?api_key=")
resp = handler(event, None)
assert resp is False
@vcr.use_cassette()
def test_handler_raises(monkeypatch):
"""Reads from the `test_handler_raises` cassette and processes the request that raises
"""
monkeypatch.setenv("API_KEY", "mykey")
event = dict(freshen_url="https://hostdoesnotexist.gov/v1/some_endpoint/freshen.json?api_key=")
resp = handler(event, None)
assert resp is False
def test_api_key_set(monkeypatch):
""" Ensures exception raised if api key is not set
"""
monkeypatch.delenv("API_KEY", raising=False)
event = dict(freshen_url="doesn't matter")
with pytest.raises(KeyError):
handler(event, None)
|
import pytest
import vcr
from service import handler
@vcr.use_cassette()
def test_handler(monkeypatch):
"""Reads from the `test_handler` cassette and processes the request
"""
monkeypatch.setenv("API_KEY", "mykey")
event = dict(freshen_url="https://api.trade.gov/v1/some_endpoint/freshen.json?api_key=")
resp = handler(event, None)
assert resp is True
@vcr.use_cassette()
def test_handler_fails(monkeypatch):
"""Reads from the `test_handler_fails` cassette and processes the invalid request
"""
monkeypatch.setenv("API_KEY", "badkey")
event = dict(freshen_url="https://api.trade.gov/v1/some_endpoint/freshen.json?api_key=")
resp = handler(event, None)
assert resp is False
@vcr.use_cassette()
def test_handler_raises(monkeypatch):
"""Reads from the `test_handler_raises` cassette and processes the request that raises
"""
monkeypatch.setenv("API_KEY", "mykey")
event = dict(freshen_url="https://hostdoesnotexist.gov/v1/some_endpoint/freshen.json?api_key=")
resp = handler(event, None)
assert resp is False
def test_api_key_set(monkeypatch):
""" Ensures exception raised if api key is not set
"""
monkeypatch.delenv("API_KEY", raising=False)
event = dict(freshen_url="doesn't matter")
with pytest.raises(KeyError):
handler(event, None)
|
en
| 0.772495
|
Reads from the `test_handler` cassette and processes the request Reads from the `test_handler_fails` cassette and processes the invalid request Reads from the `test_handler_raises` cassette and processes the request that raises Ensures exception raised if api key is not set
| 2.425919
| 2
|
quantum/plugins/nec/drivers/pfc.py
|
hyunsun/quantum
| 1
|
6628889
|
<gh_stars>1-10
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: <NAME>
# @author: <NAME>
import re
import uuid
from quantum.plugins.nec.common import ofc_client
from quantum.plugins.nec.db import api as ndb
from quantum.plugins.nec import ofc_driver_base
class PFCDriverBase(ofc_driver_base.OFCDriverBase):
"""Base Class for PDC Drivers
PFCDriverBase provides methods to handle PFC resources through REST API.
This uses ofc resource path instead of ofc resource ID.
The class implements the API for PFC V4.0 or later.
"""
def __init__(self, conf_ofc):
self.client = ofc_client.OFCClient(host=conf_ofc.host,
port=conf_ofc.port,
use_ssl=conf_ofc.use_ssl,
key_file=conf_ofc.key_file,
cert_file=conf_ofc.cert_file)
@classmethod
def filter_supported(cls):
return False
def _generate_pfc_str(self, raw_str):
"""Generate PFC acceptable String"""
return re.sub(r'[^0-9a-zA-Z]', '_', raw_str)
def _generate_pfc_id(self, id_str):
"""Generate ID on PFC
Currently, PFC ID must be less than 32.
Shorten UUID string length from 36 to 31 by follows:
* delete UUID Version and hyphen (see RFC4122)
* ensure str length
"""
try:
# openstack.common.uuidutils.is_uuid_like() returns
# False for KeyStone tenant_id, so uuid.UUID is used
# directly here to accept tenant_id as UUID string
uuid_str = str(uuid.UUID(id_str)).replace('-', '')
uuid_no_version = uuid_str[:12] + uuid_str[13:]
return uuid_no_version[:31]
except:
return self._generate_pfc_str(id_str)[:31]
def _generate_pfc_description(self, desc):
"""Generate Description on PFC
Currently, PFC Description must be less than 128.
"""
return self._generate_pfc_str(desc)[:127]
def create_tenant(self, description, tenant_id=None):
ofc_tenant_id = self._generate_pfc_id(tenant_id)
body = {'id': ofc_tenant_id}
res = self.client.post('/tenants', body=body)
return '/tenants/' + ofc_tenant_id
def delete_tenant(self, ofc_tenant_id):
return self.client.delete(ofc_tenant_id)
def create_network(self, ofc_tenant_id, description, network_id=None):
path = "%s/networks" % ofc_tenant_id
pfc_desc = self._generate_pfc_description(description)
body = {'description': pfc_desc}
res = self.client.post(path, body=body)
ofc_network_id = res['id']
return path + '/' + ofc_network_id
def delete_network(self, ofc_network_id):
return self.client.delete(ofc_network_id)
def create_port(self, ofc_network_id, portinfo,
port_id=None):
path = "%s/ports" % ofc_network_id
body = {'datapath_id': portinfo.datapath_id,
'port': str(portinfo.port_no),
'vid': str(portinfo.vlan_id)}
res = self.client.post(path, body=body)
ofc_port_id = res['id']
return path + '/' + ofc_port_id
def delete_port(self, ofc_port_id):
return self.client.delete(ofc_port_id)
def convert_ofc_tenant_id(self, context, ofc_tenant_id):
# If ofc_tenant_id starts with '/', it is already new-style
if ofc_tenant_id[0] == '/':
return ofc_tenant_id
return '/tenants/%s' % ofc_tenant_id
def convert_ofc_network_id(self, context, ofc_network_id, tenant_id):
# If ofc_network_id starts with '/', it is already new-style
if ofc_network_id[0] == '/':
return ofc_network_id
ofc_tenant_id = ndb.get_ofc_id_lookup_both(
context.session, 'ofc_tenant', tenant_id)
ofc_tenant_id = self.convert_ofc_tenant_id(context, ofc_tenant_id)
params = dict(tenant=ofc_tenant_id, network=ofc_network_id)
return '%(tenant)s/networks/%(network)s' % params
def convert_ofc_port_id(self, context, ofc_port_id, tenant_id, network_id):
# If ofc_port_id starts with '/', it is already new-style
if ofc_port_id[0] == '/':
return ofc_port_id
ofc_network_id = ndb.get_ofc_id_lookup_both(
context.session, 'ofc_network', network_id)
ofc_network_id = self.convert_ofc_network_id(
context, ofc_network_id, tenant_id)
params = dict(network=ofc_network_id, port=ofc_port_id)
return '%(network)s/ports/%(port)s' % params
class PFCV3Driver(PFCDriverBase):
def create_tenant(self, description, tenant_id):
ofc_tenant_id = self._generate_pfc_id(tenant_id)
return "/tenants/" + ofc_tenant_id
def delete_tenant(self, ofc_tenant_id):
pass
class PFCV4Driver(PFCDriverBase):
pass
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: <NAME>
# @author: <NAME>
import re
import uuid
from quantum.plugins.nec.common import ofc_client
from quantum.plugins.nec.db import api as ndb
from quantum.plugins.nec import ofc_driver_base
class PFCDriverBase(ofc_driver_base.OFCDriverBase):
"""Base Class for PDC Drivers
PFCDriverBase provides methods to handle PFC resources through REST API.
This uses ofc resource path instead of ofc resource ID.
The class implements the API for PFC V4.0 or later.
"""
def __init__(self, conf_ofc):
self.client = ofc_client.OFCClient(host=conf_ofc.host,
port=conf_ofc.port,
use_ssl=conf_ofc.use_ssl,
key_file=conf_ofc.key_file,
cert_file=conf_ofc.cert_file)
@classmethod
def filter_supported(cls):
return False
def _generate_pfc_str(self, raw_str):
"""Generate PFC acceptable String"""
return re.sub(r'[^0-9a-zA-Z]', '_', raw_str)
def _generate_pfc_id(self, id_str):
"""Generate ID on PFC
Currently, PFC ID must be less than 32.
Shorten UUID string length from 36 to 31 by follows:
* delete UUID Version and hyphen (see RFC4122)
* ensure str length
"""
try:
# openstack.common.uuidutils.is_uuid_like() returns
# False for KeyStone tenant_id, so uuid.UUID is used
# directly here to accept tenant_id as UUID string
uuid_str = str(uuid.UUID(id_str)).replace('-', '')
uuid_no_version = uuid_str[:12] + uuid_str[13:]
return uuid_no_version[:31]
except:
return self._generate_pfc_str(id_str)[:31]
def _generate_pfc_description(self, desc):
"""Generate Description on PFC
Currently, PFC Description must be less than 128.
"""
return self._generate_pfc_str(desc)[:127]
def create_tenant(self, description, tenant_id=None):
ofc_tenant_id = self._generate_pfc_id(tenant_id)
body = {'id': ofc_tenant_id}
res = self.client.post('/tenants', body=body)
return '/tenants/' + ofc_tenant_id
def delete_tenant(self, ofc_tenant_id):
return self.client.delete(ofc_tenant_id)
def create_network(self, ofc_tenant_id, description, network_id=None):
path = "%s/networks" % ofc_tenant_id
pfc_desc = self._generate_pfc_description(description)
body = {'description': pfc_desc}
res = self.client.post(path, body=body)
ofc_network_id = res['id']
return path + '/' + ofc_network_id
def delete_network(self, ofc_network_id):
return self.client.delete(ofc_network_id)
def create_port(self, ofc_network_id, portinfo,
port_id=None):
path = "%s/ports" % ofc_network_id
body = {'datapath_id': portinfo.datapath_id,
'port': str(portinfo.port_no),
'vid': str(portinfo.vlan_id)}
res = self.client.post(path, body=body)
ofc_port_id = res['id']
return path + '/' + ofc_port_id
def delete_port(self, ofc_port_id):
return self.client.delete(ofc_port_id)
def convert_ofc_tenant_id(self, context, ofc_tenant_id):
# If ofc_tenant_id starts with '/', it is already new-style
if ofc_tenant_id[0] == '/':
return ofc_tenant_id
return '/tenants/%s' % ofc_tenant_id
def convert_ofc_network_id(self, context, ofc_network_id, tenant_id):
# If ofc_network_id starts with '/', it is already new-style
if ofc_network_id[0] == '/':
return ofc_network_id
ofc_tenant_id = ndb.get_ofc_id_lookup_both(
context.session, 'ofc_tenant', tenant_id)
ofc_tenant_id = self.convert_ofc_tenant_id(context, ofc_tenant_id)
params = dict(tenant=ofc_tenant_id, network=ofc_network_id)
return '%(tenant)s/networks/%(network)s' % params
def convert_ofc_port_id(self, context, ofc_port_id, tenant_id, network_id):
# If ofc_port_id starts with '/', it is already new-style
if ofc_port_id[0] == '/':
return ofc_port_id
ofc_network_id = ndb.get_ofc_id_lookup_both(
context.session, 'ofc_network', network_id)
ofc_network_id = self.convert_ofc_network_id(
context, ofc_network_id, tenant_id)
params = dict(network=ofc_network_id, port=ofc_port_id)
return '%(network)s/ports/%(port)s' % params
class PFCV3Driver(PFCDriverBase):
def create_tenant(self, description, tenant_id):
ofc_tenant_id = self._generate_pfc_id(tenant_id)
return "/tenants/" + ofc_tenant_id
def delete_tenant(self, ofc_tenant_id):
pass
class PFCV4Driver(PFCDriverBase):
pass
|
en
| 0.774938
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # @author: <NAME> # @author: <NAME> Base Class for PDC Drivers PFCDriverBase provides methods to handle PFC resources through REST API. This uses ofc resource path instead of ofc resource ID. The class implements the API for PFC V4.0 or later. Generate PFC acceptable String Generate ID on PFC Currently, PFC ID must be less than 32. Shorten UUID string length from 36 to 31 by follows: * delete UUID Version and hyphen (see RFC4122) * ensure str length # openstack.common.uuidutils.is_uuid_like() returns # False for KeyStone tenant_id, so uuid.UUID is used # directly here to accept tenant_id as UUID string Generate Description on PFC Currently, PFC Description must be less than 128. # If ofc_tenant_id starts with '/', it is already new-style # If ofc_network_id starts with '/', it is already new-style # If ofc_port_id starts with '/', it is already new-style
| 2.009288
| 2
|
gui/v1.1/track_gui2.py
|
vt-gs/tracking
| 0
|
6628890
|
<reponame>vt-gs/tracking
#!/usr/bin/env python
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4 import Qt
import PyQt4.Qwt5 as Qwt
import numpy as np
from datetime import datetime as date
import sys
from az_QwtDial import *
from el_QwtDial import *
import time
from gpredict import *
class main_widget(QtGui.QWidget):
def __init__(self):
super(main_widget, self).__init__()
self.initUI()
def initUI(self):
self.grid = QtGui.QGridLayout()
self.setLayout(self.grid)
class MainWindow(QtGui.QMainWindow):
def __init__(self, ip, port):
#QtGui.QMainWindow.__init__(self)
super(MainWindow, self).__init__()
self.resize(1000, 525)
self.setMinimumWidth(800)
#self.setMaximumWidth(900)
self.setMinimumHeight(450)
#self.setMaximumHeight(700)
self.setWindowTitle('VTGS Tracking GUI v1.1')
self.setContentsMargins(0,0,0,0)
self.main_window = main_widget()
self.setCentralWidget(self.main_window)
self.ip = ip
self.port = port
self.cur_az = 0
self.tar_az = 0
self.cur_el = 0
self.tar_el = 0
self.pred_az = 0.0
self.pred_el = 0.0
self.home_az = 0.0
self.home_el = 0.0
self.callback = None #Callback accessor for tracking control
self.update_rate = 250 #Feedback Query Auto Update Interval in milliseconds
self.gpredict = None #Callback accessor for gpredict thread control
self.pred_conn_stat = 0 #Gpredict Connection Status, 0=Disconnected, 1=Listening, 2=Connected
self.autoTrack = False #auto track mode, True = Auto, False = Manual
self.statusBar().showMessage("| Disconnected | Manual | Current Az: 000.0 | Current El: 000.0 |")
self.initUI()
self.darken()
self.setFocus()
def initUI(self):
self.initTabControl()
self.initMainTab()
self.initCalTab()
self.initTimers()
self.connectSignals()
self.show()
def initTimers(self):
self.updateTimer = QtCore.QTimer(self)
#self.updateTimer.setInterval(self.update_rate)
self.updateTimer.start(self.update_rate)
#Timer used to Poll the GPredict Server thread for updates
self.predictTimer = QtCore.QTimer(self)
self.predictTimer.setInterval(self.update_rate)
def connectSignals(self):
self.azPlusPtOneButton.clicked.connect(self.azPlusPtOneButtonClicked)
self.azPlusOneButton.clicked.connect(self.azPlusOneButtonClicked)
self.azPlusTenButton.clicked.connect(self.azPlusTenButtonClicked)
self.azMinusPtOneButton.clicked.connect(self.azMinusPtOneButtonClicked)
self.azMinusOneButton.clicked.connect(self.azMinusOneButtonClicked)
self.azMinusTenButton.clicked.connect(self.azMinusTenButtonClicked)
self.azTextBox.returnPressed.connect(self.azTextBoxReturnPressed)
self.elPlusPtOneButton.clicked.connect(self.elPlusPtOneButtonClicked)
self.elPlusOneButton.clicked.connect(self.elPlusOneButtonClicked)
self.elPlusTenButton.clicked.connect(self.elPlusTenButtonClicked)
self.elMinusPtOneButton.clicked.connect(self.elMinusPtOneButtonClicked)
self.elMinusOneButton.clicked.connect(self.elMinusOneButtonClicked)
self.elMinusTenButton.clicked.connect(self.elMinusTenButtonClicked)
self.elTextBox.returnPressed.connect(self.elTextBoxReturnPressed)
self.predictButton.clicked.connect(self.predictButtonEvent)
self.queryButton.clicked.connect(self.queryButtonEvent)
self.StopButton.clicked.connect(self.stopButtonEvent)
self.homeButton.clicked.connect(self.homeButtonEvent)
self.updateButton.clicked.connect(self.updateButtonEvent)
self.autoQuery_cb.stateChanged.connect(self.catchAutoQueryEvent)
QtCore.QObject.connect(self.updateTimer, QtCore.SIGNAL('timeout()'), self.queryButtonEvent)
QtCore.QObject.connect(self.predictTimer, QtCore.SIGNAL('timeout()'), self.predictTimerEvent)
QtCore.QObject.connect(self.fb_query_rate_le, QtCore.SIGNAL('editingFinished()'), self.updateRate)
QtCore.QObject.connect(self.ipAddrTextBox, QtCore.SIGNAL('editingFinished()'), self.updateIPAddress)
QtCore.QObject.connect(self.portTextBox, QtCore.SIGNAL('editingFinished()'), self.updatePort)
self.ssidCombo.activated[int].connect(self.updateSSIDEvent)
def setCallback(self, callback):
self.callback = callback
def setGpredictCallback(self, callback):
self.gpredict = callback
def predictTimerEvent(self):
self.pred_conn_stat = self.gpredict.getConnectionStatus()
if self.pred_conn_stat == 2:
self.pred_az, self.pred_el = self.gpredict.getTargetAngles()
self.gpredict.updateCurrentAngles(self.cur_az, self.cur_el)
self.pred_az_lbl.setText(str(round(self.pred_az,1)))
self.pred_el_lbl.setText(str(round(self.pred_el,1)))
if self.autoTrack_cb.isChecked() == True:
self.tar_az = round(self.pred_az, 1)
self.tar_el = round(self.pred_el,1)
self.updateAzimuth()
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
self.updatePredictStatus()
def predictButtonEvent(self):
if self.pred_conn_stat == 0: #Disconnected, Start Connection Thread
ip = self.ipAddrTextBox.text()
port = int(self.portTextBox.text())
self.gpredict = Gpredict_Thread(self, ip, port,1)
self.gpredict.daemon = True
self.gpredict.start()
self.pred_conn_stat = 1 #listening
self.updatePredictStatus()
self.predictTimer.start()
elif ((self.pred_conn_stat == 1) or (self.pred_conn_stat == 2)):
self.gpredict.stop()
self.gpredict.join()
self.predictTimer.stop()
self.pred_conn_stat = 0
self.updatePredictStatus()
def updatePredictStatus(self):
if self.pred_conn_stat == 0: #Disconnected
self.predictButton.setText('Connect')
self.pred_status_lbl.setText("Disconnected")
self.pred_status_lbl.setStyleSheet("QLabel { font-weight:bold; color:rgb(255,0,0) ; }")
self.ipAddrTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.portTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.ipAddrTextBox.setEnabled(True)
self.portTextBox.setEnabled(True)
elif self.pred_conn_stat == 1: #Listening
self.predictButton.setText('Disconnect')
self.pred_status_lbl.setText("Listening...")
self.pred_status_lbl.setStyleSheet("QLabel { font-weight:bold; color:rgb(255,255,0) ; }")
self.ipAddrTextBox.setStyleSheet("QLineEdit {background-color:rgb(225,225,225); color:rgb(0,0,0);}")
self.portTextBox.setStyleSheet("QLineEdit {background-color:rgb(225,225,225); color:rgb(0,0,0);}")
self.ipAddrTextBox.setEnabled(False)
self.portTextBox.setEnabled(False)
elif self.pred_conn_stat == 2: #Connected
self.predictButton.setText('Disconnect')
self.pred_status_lbl.setText("Connected")
self.pred_status_lbl.setStyleSheet("QLabel { font-weight:bold; color:rgb(0,255,0) ; }")
self.ipAddrTextBox.setStyleSheet("QLineEdit {background-color:rgb(225,225,225); color:rgb(0,0,0);}")
self.portTextBox.setStyleSheet("QLineEdit {background-color:rgb(225,225,225); color:rgb(0,0,0);}")
self.ipAddrTextBox.setEnabled(False)
self.portTextBox.setEnabled(False)
def updateSSIDEvent(self, idx):
if idx == 0: #VUL
self.ssid = 'VUL'
elif idx == 1: #3M0
self.ssid = '3M0'
elif idx == 2: #4M5
self.ssid = '4M5'
elif idx == 3: #WX
self.ssid = 'WX'
print self.utc_ts() + "GUI | Updated Subsystem ID: " + self.ssid
self.callback.set_ssid(self.ssid)
def updateButtonEvent(self):
self.tar_az = float(self.azTextBox.text())
self.tar_el = float(self.elTextBox.text())
self.updateAzimuth()
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def homeButtonEvent(self):
self.tar_az = self.home_az
self.tar_el = self.home_el
self.updateAzimuth()
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def stopButtonEvent(self):
status, self.cur_az, self.cur_el = self.callback.set_stop()
if status != -1:
self.az_compass.set_cur_az(self.cur_az)
self.el_compass.set_cur_el(self.cur_el)
def queryButtonEvent(self):
status, self.cur_az, self.cur_el = self.callback.get_status()
if status != -1:
self.az_compass.set_cur_az(self.cur_az)
self.el_compass.set_cur_el(self.cur_el)
else:
self.autoQuery_cb.setCheckState(QtCore.Qt.Unchecked)
def catchAutoQueryEvent(self, state):
CheckState = (state == QtCore.Qt.Checked)
if CheckState == True:
self.updateTimer.start()
print self.utc_ts() + "GUI | Started Auto Update, Interval: " + str(self.update_rate) + " [ms]"
else:
self.updateTimer.stop()
print self.utc_ts() + "GUI | Stopped Auto Update"
def updateRate(self):
self.update_rate = float(self.fb_query_rate_le.text()) * 1000.0
self.updateTimer.setInterval(self.update_rate)
self.predictTimer.setInterval(self.update_rate)
print self.utc_ts() + "GUI | Updated Rate Interval to " + str(self.update_rate) + " [ms]"
def updateIPAddress(self):
self.ip = self.ipAddrTextBox.text()
def updatePort(self):
self.port = self.portTextBox.text()
def azTextBoxReturnPressed(self):
self.tar_az = float(self.azTextBox.text())
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def azPlusPtOneButtonClicked(self):
self.tar_az = self.tar_az + 0.1
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def azPlusOneButtonClicked(self):
self.tar_az = self.tar_az + 1
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def azPlusTenButtonClicked(self):
self.tar_az = self.tar_az + 10
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def azMinusPtOneButtonClicked(self):
self.tar_az = self.tar_az - 0.1
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def azMinusOneButtonClicked(self):
self.tar_az = self.tar_az - 1
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def azMinusTenButtonClicked(self):
self.tar_az = self.tar_az - 10
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def updateAzimuth(self):
if self.tar_az < -180.0:
self.tar_az = -180.0
self.azTextBox.setText(str(self.tar_az))
if self.tar_az > 540.0:
self.tar_az = 540.0
self.azTextBox.setText(str(self.tar_az))
self.az_compass.set_tar_az(self.tar_az)
def elTextBoxReturnPressed(self):
self.tar_el = float(self.elTextBox.text())
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def elPlusPtOneButtonClicked(self):
self.tar_el = self.tar_el + 0.1
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def elPlusOneButtonClicked(self):
self.tar_el = self.tar_el + 1
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def elPlusTenButtonClicked(self):
self.tar_el = self.tar_el + 10
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def elMinusPtOneButtonClicked(self):
self.tar_el = self.tar_el - 0.1
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def elMinusOneButtonClicked(self):
self.tar_el = self.tar_el - 1
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def elMinusTenButtonClicked(self):
self.tar_el = self.tar_el - 10
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def updateElevation(self):
if self.tar_el < 0:
self.tar_el = 0
self.elTextBox.setText(str(self.tar_el))
if self.tar_el > 180:
self.tar_el = 180
self.elTextBox.setText(str(self.tar_el))
self.el_compass.set_tar_el(self.tar_el)
def initTabControl(self):
self.tabs = QtGui.QTabWidget()
self.tabs.setTabPosition(QtGui.QTabWidget.South)
self.main_tab = QtGui.QWidget()
self.main_tab.grid = QtGui.QGridLayout()
self.tabs.addTab(self.main_tab,"Main")
self.main_tab.setAutoFillBackground(True)
p = self.main_tab.palette()
p.setColor(self.main_tab.backgroundRole(), QtCore.Qt.black)
self.main_tab.setPalette(p)
self.cal_tab = QtGui.QWidget()
self.cal_tab.grid = QtGui.QGridLayout()
self.tabs.addTab(self.cal_tab,"Cal")
self.cal_tab.setAutoFillBackground(True)
p = self.cal_tab.palette()
p.setColor(self.cal_tab.backgroundRole(), QtCore.Qt.black)
self.cal_tab.setPalette(p)
self.config_tab = QtGui.QWidget()
self.config_tab_grid = QtGui.QGridLayout()
self.tabs.addTab(self.config_tab,"Config")
self.config_tab.setAutoFillBackground(True)
p = self.config_tab.palette()
p.setColor(self.config_tab.backgroundRole(), QtCore.Qt.black)
self.config_tab.setPalette(p)
self.main_window.grid.addWidget(self.tabs)
def initMainTab(self):
self.initMainTabFrames()
self.initDials()
#Init Az Increment Control
self.initAzControls()
#Init El Increment Control
self.initElControls()
#Init Control Frame
self.initControlFrame()
self.main_tab_grid = QtGui.QGridLayout()
self.main_tab_grid.addWidget(self.control_fr ,0,0,2,2)
self.main_tab_grid.addWidget(self.az_dial_fr ,0,2,1,3)
self.main_tab_grid.addWidget(self.az_ctrl_fr ,1,2,1,3)
self.main_tab_grid.addWidget(self.el_dial_fr ,0,5,1,3)
self.main_tab_grid.addWidget(self.el_ctrl_fr ,1,5,1,3)
self.main_tab_grid.setRowStretch(0,1)
self.main_tab_grid.setColumnStretch(2,1)
self.main_tab_grid.setColumnStretch(5,1)
self.main_tab.setLayout(self.main_tab_grid)
def initMainTabFrames(self):
self.az_dial_fr = QtGui.QFrame(self)
self.az_dial_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.az_dial_fr_grid = QtGui.QGridLayout()
self.az_dial_fr.setLayout(self.az_dial_fr_grid)
self.az_ctrl_fr = QtGui.QFrame()
self.az_ctrl_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.el_dial_fr = QtGui.QFrame(self)
self.el_dial_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.el_dial_fr_grid = QtGui.QGridLayout()
self.el_dial_fr.setLayout(self.el_dial_fr_grid)
self.el_ctrl_fr = QtGui.QFrame()
self.el_ctrl_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.control_fr = QtGui.QFrame(self)
self.control_fr.setFrameShape(QtGui.QFrame.StyledPanel)
def initDials(self):
self.el_compass = el_QwtDial(self.el_dial_fr_grid)
self.az_compass = az_QwtDial(self.az_dial_fr_grid)
def initAzControls(self):
self.azMinusTenButton = QtGui.QPushButton(self.az_ctrl_fr)
self.azMinusTenButton.setText("-10.0")
self.azMinusTenButton.setMinimumWidth(45)
self.azMinusOneButton = QtGui.QPushButton(self.az_ctrl_fr)
self.azMinusOneButton.setText("-1.0")
self.azMinusOneButton.setMinimumWidth(45)
self.azMinusPtOneButton = QtGui.QPushButton(self.az_ctrl_fr)
self.azMinusPtOneButton.setText("-0.1")
self.azMinusPtOneButton.setMinimumWidth(45)
self.azPlusPtOneButton = QtGui.QPushButton(self.az_ctrl_fr)
self.azPlusPtOneButton.setText("+0.1")
self.azPlusPtOneButton.setMinimumWidth(45)
self.azPlusOneButton = QtGui.QPushButton(self.az_ctrl_fr)
self.azPlusOneButton.setText("+1.0")
self.azPlusOneButton.setMinimumWidth(45)
self.azPlusTenButton = QtGui.QPushButton(self.az_ctrl_fr)
self.azPlusTenButton.setText("+10.0")
self.azPlusTenButton.setMinimumWidth(45)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.azMinusTenButton)
hbox1.addWidget(self.azMinusOneButton)
hbox1.addWidget(self.azMinusPtOneButton)
hbox1.addWidget(self.azPlusPtOneButton)
hbox1.addWidget(self.azPlusOneButton)
hbox1.addWidget(self.azPlusTenButton)
self.az_ctrl_fr.setLayout(hbox1)
def initElControls(self):
self.elMinusTenButton = QtGui.QPushButton(self.el_ctrl_fr)
self.elMinusTenButton.setText("-10.0")
self.elMinusTenButton.setMinimumWidth(45)
self.elMinusOneButton = QtGui.QPushButton(self.el_ctrl_fr)
self.elMinusOneButton.setText("-1.0")
self.elMinusOneButton.setMinimumWidth(45)
self.elMinusPtOneButton = QtGui.QPushButton(self.el_ctrl_fr)
self.elMinusPtOneButton.setText("-0.1")
self.elMinusPtOneButton.setMinimumWidth(45)
self.elPlusPtOneButton = QtGui.QPushButton(self.el_ctrl_fr)
self.elPlusPtOneButton.setText("+0.1")
self.elPlusPtOneButton.setMinimumWidth(45)
self.elPlusOneButton = QtGui.QPushButton(self.el_ctrl_fr)
self.elPlusOneButton.setText("+1.0")
self.elPlusOneButton.setMinimumWidth(45)
self.elPlusTenButton = QtGui.QPushButton(self.el_ctrl_fr)
self.elPlusTenButton.setText("+10.0")
self.elPlusTenButton.setMinimumWidth(45)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.elMinusTenButton)
hbox1.addWidget(self.elMinusOneButton)
hbox1.addWidget(self.elMinusPtOneButton)
hbox1.addWidget(self.elPlusPtOneButton)
hbox1.addWidget(self.elPlusOneButton)
hbox1.addWidget(self.elPlusTenButton)
self.el_ctrl_fr.setLayout(hbox1)
def initControlFrame(self):
self.entry_fr = QtGui.QFrame(self)
self.entry_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.predict_fr = QtGui.QFrame(self)
self.predict_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.dir_fr = QtGui.QFrame(self)
self.dir_fr.setFrameShape(QtGui.QFrame.StyledPanel)
#self.dir_fr.setEnabled(False)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.entry_fr)
vbox.addWidget(self.dir_fr)
vbox.addWidget(self.predict_fr)
self.initEntryBoxControls()
self.initMotorCtrl()
self.initGpredict()
self.control_fr.setLayout(vbox)
def initEntryBoxControls(self):
self.updateButton = QtGui.QPushButton("Update")
self.homeButton = QtGui.QPushButton("Home")
self.queryButton = QtGui.QPushButton("Query")
self.azLabel = QtGui.QLabel("Az:")
self.azLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.azLabel.setStyleSheet("QLabel {color:rgb(0,0,255);}")
self.azTextBox = QtGui.QLineEdit()
self.azTextBox.setText("000.0")
self.azTextBox.setInputMask("#000.0;")
self.azTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.azTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.azTextBox.setMaxLength(5)
self.azTextBox.setFixedWidth(60)
self.azTextBox.setFixedHeight(20)
self.elLabel = QtGui.QLabel("El:")
self.elLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.elLabel.setStyleSheet("QLabel {color:rgb(0,0,255);}")
self.elTextBox = QtGui.QLineEdit(self.el_dial_fr)
self.elTextBox.setText("000.0")
self.elTextBox.setInputMask("000.0;")
self.elTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.elTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.elTextBox.setMaxLength(5)
self.elTextBox.setFixedWidth(60)
self.elTextBox.setFixedHeight(20)
self.ssidLabel = QtGui.QLabel("Subsystem:")
self.ssidLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.ssidLabel.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.ssidCombo = QtGui.QComboBox(self)
self.ssidCombo.addItem("VHF/UHF")
self.ssidCombo.addItem("3.0m Dish")
self.ssidCombo.addItem("4.5m Dish")
self.ssidCombo.addItem("NOAA WX")
self.fb_query_rate_le = QtGui.QLineEdit()
self.fb_query_rate_le.setText("0.25")
self.query_val = QtGui.QDoubleValidator()
self.fb_query_rate_le.setValidator(self.query_val)
self.fb_query_rate_le.setEchoMode(QtGui.QLineEdit.Normal)
self.fb_query_rate_le.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.fb_query_rate_le.setMaxLength(4)
self.fb_query_rate_le.setFixedWidth(50)
self.autoQuery_cb = QtGui.QCheckBox("Auto Query [s]")
self.autoQuery_cb.setStyleSheet("QCheckBox { background-color:rgb(0,0,0); color:rgb(255,0,0); }")
self.autoQuery_cb.setChecked(True)
az_hbox = QtGui.QHBoxLayout()
az_hbox.addWidget(self.azLabel)
az_hbox.addWidget(self.azTextBox)
el_hbox = QtGui.QHBoxLayout()
el_hbox.addWidget(self.elLabel)
el_hbox.addWidget(self.elTextBox)
btn_hbox = QtGui.QHBoxLayout()
btn_hbox.addWidget(self.homeButton)
btn_hbox.addWidget(self.updateButton)
ssid_hbox = QtGui.QHBoxLayout()
ssid_hbox.addWidget(self.queryButton)
ssid_hbox.addWidget(self.ssidCombo)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.autoQuery_cb)
hbox1.addWidget(self.fb_query_rate_le)
hbox2 = QtGui.QHBoxLayout()
hbox2.addLayout(az_hbox)
hbox2.addLayout(el_hbox)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox2)
vbox.addLayout(btn_hbox)
vbox.addLayout(ssid_hbox)
vbox.addLayout(hbox1)
self.entry_fr.setLayout(vbox)
def initMotorCtrl(self):
self.motor_lbl = QtGui.QLabel(" Direct Motor Drive ")
self.motor_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
self.motor_lbl.setFixedHeight(20)
self.motor_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.UpLeftButton = QtGui.QPushButton("U+L")
self.UpLeftButton.setFixedWidth(50)
self.UpLeftButton.setFixedHeight(20)
self.UpButton = QtGui.QPushButton("Up")
self.UpButton.setFixedWidth(50)
self.UpButton.setFixedHeight(20)
self.UpRightButton = QtGui.QPushButton("U+R")
self.UpRightButton.setFixedWidth(50)
self.UpRightButton.setFixedHeight(20)
self.LeftButton = QtGui.QPushButton("Left")
self.LeftButton.setFixedWidth(50)
self.LeftButton.setFixedHeight(20)
self.StopButton = QtGui.QPushButton("STOP!")
self.StopButton.setFixedWidth(50)
self.StopButton.setFixedHeight(20)
self.RightButton = QtGui.QPushButton("Right")
self.RightButton.setFixedWidth(50)
self.RightButton.setFixedHeight(20)
self.DnLeftButton = QtGui.QPushButton("D+L")
self.DnLeftButton.setFixedWidth(50)
self.DnLeftButton.setFixedHeight(20)
self.DownButton = QtGui.QPushButton("Down")
self.DownButton.setFixedWidth(50)
self.DownButton.setFixedHeight(20)
self.DnRightButton = QtGui.QPushButton("D+R")
self.DnRightButton.setFixedWidth(50)
self.DnRightButton.setFixedHeight(20)
vbox = QtGui.QVBoxLayout()
hbox1 = QtGui.QHBoxLayout()
hbox2 = QtGui.QHBoxLayout()
hbox3 = QtGui.QHBoxLayout()
hbox1.setContentsMargins(0,0,0,0)
hbox1.addWidget(self.UpLeftButton)
hbox1.addWidget(self.UpButton)
hbox1.addWidget(self.UpRightButton)
hbox2.setContentsMargins(0,0,0,0)
hbox2.addWidget(self.LeftButton)
hbox2.addWidget(self.StopButton)
hbox2.addWidget(self.RightButton)
hbox3.setContentsMargins(0,0,0,0)
hbox3.addWidget(self.DnLeftButton)
hbox3.addWidget(self.DownButton)
hbox3.addWidget(self.DnRightButton)
vbox.setContentsMargins(0,0,0,0)
vbox.addWidget(self.motor_lbl)
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
self.dir_fr.setLayout(vbox)
def initGpredict(self):
self.ipAddrTextBox = QtGui.QLineEdit()
self.ipAddrTextBox.setText('127.000.000.001')
self.ipAddrTextBox.setInputMask("000.000.000.000;")
self.ipAddrTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.ipAddrTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.ipAddrTextBox.setMaxLength(15)
self.portTextBox = QtGui.QLineEdit()
self.portTextBox.setText('4533')
self.port_validator = QtGui.QIntValidator()
self.port_validator.setRange(0,65535)
self.portTextBox.setValidator(self.port_validator)
self.portTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.portTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.portTextBox.setMaxLength(5)
self.portTextBox.setFixedWidth(50)
label = QtGui.QLabel('Status:')
label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
label.setStyleSheet("QLabel {color:rgb(255,255,255);}")
label.setFixedHeight(10)
self.pred_status_lbl = QtGui.QLabel('Disconnected')
self.pred_status_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.pred_status_lbl.setStyleSheet("QLabel {font-weight:bold; color:rgb(255,0,0);}")
self.pred_status_lbl.setFixedWidth(125)
self.pred_status_lbl.setFixedHeight(10)
self.predictButton = QtGui.QPushButton("Start Server")
lbl1 = QtGui.QLabel('Az:')
lbl1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl1.setStyleSheet("QLabel {color:rgb(255,255,255)}")
lbl1.setFixedWidth(25)
lbl1.setFixedHeight(10)
lbl2 = QtGui.QLabel('El:')
lbl2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl2.setStyleSheet("QLabel {color:rgb(255,255,255)}")
lbl2.setFixedWidth(25)
lbl2.setFixedHeight(10)
self.pred_az_lbl = QtGui.QLabel('XXX.X')
self.pred_az_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.pred_az_lbl.setStyleSheet("QLabel {color:rgb(255,255,255)}")
self.pred_az_lbl.setFixedWidth(50)
self.pred_az_lbl.setFixedHeight(10)
self.pred_el_lbl = QtGui.QLabel('XXX.X')
self.pred_el_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.pred_el_lbl.setStyleSheet("QLabel {color:rgb(255,255,255)}")
self.pred_el_lbl.setFixedWidth(50)
self.pred_el_lbl.setFixedHeight(10)
self.autoTrack_cb = QtGui.QCheckBox("Auto Track")
self.autoTrack_cb.setStyleSheet("QCheckBox { background-color:rgb(0,0,0); color:rgb(255,255,255); }")
self.autoTrack_cb.setFixedHeight(20)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.ipAddrTextBox)
hbox1.addWidget(self.portTextBox)
hbox2 = QtGui.QHBoxLayout()
hbox2.addWidget(label)
hbox2.addWidget(self.pred_status_lbl)
hbox3 = QtGui.QHBoxLayout()
hbox3.addWidget(lbl1)
hbox3.addWidget(self.pred_az_lbl)
hbox4 = QtGui.QHBoxLayout()
hbox4.addWidget(lbl2)
hbox4.addWidget(self.pred_el_lbl)
vbox1 = QtGui.QVBoxLayout()
vbox1.addLayout(hbox3)
vbox1.addLayout(hbox4)
hbox5 = QtGui.QHBoxLayout()
hbox5.addWidget(self.autoTrack_cb)
hbox5.addLayout(vbox1)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addWidget(self.predictButton)
vbox.addLayout(hbox2)
vbox.addLayout(hbox5)
self.predict_fr.setLayout(vbox)
def initCalTab(self):
#Init Set Angles
self.initCalAnglesControls()
#Init Motor Power Control
self.initMotorPower()
self.cal_tab_grid = QtGui.QGridLayout()
self.cal_tab_grid.addWidget(self.cal_angle_fr,0,0,1,1)
self.cal_tab_grid.addWidget(self.mot_power_fr,0,1,1,1)
self.cal_tab_grid.setColumnStretch(2,1)
self.cal_tab_grid.setRowStretch(2,1)
self.cal_tab.setLayout(self.cal_tab_grid)
def initMotorPower(self):
self.mot_power_fr = QtGui.QFrame(self)
self.mot_power_fr.setFrameShape(QtGui.QFrame.StyledPanel)
fr_lbl = QtGui.QLabel(" Motor Power ")
fr_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
fr_lbl.setStyleSheet("QLabel {text-decoration:underline; color:rgb(255,255,255);}")
self.setMotPowerButton = QtGui.QPushButton("Set")
self.azPowLabel = QtGui.QLabel("Azimuth:")
self.azPowLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.azPowLabel.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.azPowTextBox = QtGui.QLineEdit()
self.azPowTextBox.setText("64")
self.azPowTextBox.setInputMask("00;")
self.azPowTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.azPowTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.azPowTextBox.setMaxLength(2)
self.azPowTextBox.setFixedWidth(60)
self.elPowLabel = QtGui.QLabel("Elevation:")
self.elPowLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.elPowLabel.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.elPowTextBox = QtGui.QLineEdit(self.el_dial_fr)
self.elPowTextBox.setText("64")
self.elPowTextBox.setInputMask("00;")
self.elPowTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.elPowTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.elPowTextBox.setMaxLength(2)
self.elPowTextBox.setFixedWidth(60)
az_hbox = QtGui.QHBoxLayout()
az_hbox.addWidget(self.azPowLabel)
az_hbox.addWidget(self.azPowTextBox)
el_hbox = QtGui.QHBoxLayout()
el_hbox.addWidget(self.elPowLabel)
el_hbox.addWidget(self.elPowTextBox)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(fr_lbl)
vbox.addLayout(az_hbox)
vbox.addLayout(el_hbox)
vbox.addWidget(self.setMotPowerButton)
self.mot_power_fr.setLayout(vbox)
def initCalAnglesControls(self):
self.cal_angle_fr = QtGui.QFrame(self)
self.cal_angle_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.setCalButton = QtGui.QPushButton("Set")
self.setCalButton.setFixedWidth(60)
self.zeroButton = QtGui.QPushButton("Zero")
self.zeroButton.setFixedWidth(60)
fr_lbl = QtGui.QLabel(" Calibrate Angles ")
fr_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
fr_lbl.setStyleSheet("QLabel {text-decoration:underline; color:rgb(255,255,255);}")
self.azCalLabel = QtGui.QLabel("Azimuth:")
self.azCalLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.azCalLabel.setStyleSheet("QLabel {color:rgb(255,0,0);}")
self.azCalTextBox = QtGui.QLineEdit()
self.azCalTextBox.setText("000.0")
self.azCalTextBox.setInputMask("#000.0;")
self.azCalTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.azCalTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.azCalTextBox.setMaxLength(6)
self.azCalTextBox.setFixedWidth(60)
self.elCalLabel = QtGui.QLabel("Elevation:")
self.elCalLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.elCalLabel.setStyleSheet("QLabel {color:rgb(255,0,0);}")
self.elCalTextBox = QtGui.QLineEdit(self.el_dial_fr)
self.elCalTextBox.setText("000.0")
self.elCalTextBox.setInputMask("000.0;")
self.elCalTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.elCalTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.elCalTextBox.setMaxLength(6)
self.elCalTextBox.setFixedWidth(60)
az_hbox = QtGui.QHBoxLayout()
az_hbox.addWidget(self.azCalLabel)
az_hbox.addWidget(self.azCalTextBox)
el_hbox = QtGui.QHBoxLayout()
el_hbox.addWidget(self.elCalLabel)
el_hbox.addWidget(self.elCalTextBox)
btn_hbox= QtGui.QHBoxLayout()
btn_hbox.addWidget(self.zeroButton)
btn_hbox.addWidget(self.setCalButton)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(fr_lbl)
vbox.addLayout(az_hbox)
vbox.addLayout(el_hbox)
vbox.addLayout(btn_hbox)
self.cal_angle_fr.setLayout(vbox)
def darken(self):
palette = QtGui.QPalette()
palette.setColor(QtGui.QPalette.Background,QtCore.Qt.black)
palette.setColor(QtGui.QPalette.WindowText,QtCore.Qt.black)
palette.setColor(QtGui.QPalette.Text,QtCore.Qt.white)
self.setPalette(palette)
def utc_ts(self):
return str(date.utcnow()) + " UTC | "
|
#!/usr/bin/env python
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4 import Qt
import PyQt4.Qwt5 as Qwt
import numpy as np
from datetime import datetime as date
import sys
from az_QwtDial import *
from el_QwtDial import *
import time
from gpredict import *
class main_widget(QtGui.QWidget):
def __init__(self):
super(main_widget, self).__init__()
self.initUI()
def initUI(self):
self.grid = QtGui.QGridLayout()
self.setLayout(self.grid)
class MainWindow(QtGui.QMainWindow):
def __init__(self, ip, port):
#QtGui.QMainWindow.__init__(self)
super(MainWindow, self).__init__()
self.resize(1000, 525)
self.setMinimumWidth(800)
#self.setMaximumWidth(900)
self.setMinimumHeight(450)
#self.setMaximumHeight(700)
self.setWindowTitle('VTGS Tracking GUI v1.1')
self.setContentsMargins(0,0,0,0)
self.main_window = main_widget()
self.setCentralWidget(self.main_window)
self.ip = ip
self.port = port
self.cur_az = 0
self.tar_az = 0
self.cur_el = 0
self.tar_el = 0
self.pred_az = 0.0
self.pred_el = 0.0
self.home_az = 0.0
self.home_el = 0.0
self.callback = None #Callback accessor for tracking control
self.update_rate = 250 #Feedback Query Auto Update Interval in milliseconds
self.gpredict = None #Callback accessor for gpredict thread control
self.pred_conn_stat = 0 #Gpredict Connection Status, 0=Disconnected, 1=Listening, 2=Connected
self.autoTrack = False #auto track mode, True = Auto, False = Manual
self.statusBar().showMessage("| Disconnected | Manual | Current Az: 000.0 | Current El: 000.0 |")
self.initUI()
self.darken()
self.setFocus()
def initUI(self):
self.initTabControl()
self.initMainTab()
self.initCalTab()
self.initTimers()
self.connectSignals()
self.show()
def initTimers(self):
self.updateTimer = QtCore.QTimer(self)
#self.updateTimer.setInterval(self.update_rate)
self.updateTimer.start(self.update_rate)
#Timer used to Poll the GPredict Server thread for updates
self.predictTimer = QtCore.QTimer(self)
self.predictTimer.setInterval(self.update_rate)
def connectSignals(self):
self.azPlusPtOneButton.clicked.connect(self.azPlusPtOneButtonClicked)
self.azPlusOneButton.clicked.connect(self.azPlusOneButtonClicked)
self.azPlusTenButton.clicked.connect(self.azPlusTenButtonClicked)
self.azMinusPtOneButton.clicked.connect(self.azMinusPtOneButtonClicked)
self.azMinusOneButton.clicked.connect(self.azMinusOneButtonClicked)
self.azMinusTenButton.clicked.connect(self.azMinusTenButtonClicked)
self.azTextBox.returnPressed.connect(self.azTextBoxReturnPressed)
self.elPlusPtOneButton.clicked.connect(self.elPlusPtOneButtonClicked)
self.elPlusOneButton.clicked.connect(self.elPlusOneButtonClicked)
self.elPlusTenButton.clicked.connect(self.elPlusTenButtonClicked)
self.elMinusPtOneButton.clicked.connect(self.elMinusPtOneButtonClicked)
self.elMinusOneButton.clicked.connect(self.elMinusOneButtonClicked)
self.elMinusTenButton.clicked.connect(self.elMinusTenButtonClicked)
self.elTextBox.returnPressed.connect(self.elTextBoxReturnPressed)
self.predictButton.clicked.connect(self.predictButtonEvent)
self.queryButton.clicked.connect(self.queryButtonEvent)
self.StopButton.clicked.connect(self.stopButtonEvent)
self.homeButton.clicked.connect(self.homeButtonEvent)
self.updateButton.clicked.connect(self.updateButtonEvent)
self.autoQuery_cb.stateChanged.connect(self.catchAutoQueryEvent)
QtCore.QObject.connect(self.updateTimer, QtCore.SIGNAL('timeout()'), self.queryButtonEvent)
QtCore.QObject.connect(self.predictTimer, QtCore.SIGNAL('timeout()'), self.predictTimerEvent)
QtCore.QObject.connect(self.fb_query_rate_le, QtCore.SIGNAL('editingFinished()'), self.updateRate)
QtCore.QObject.connect(self.ipAddrTextBox, QtCore.SIGNAL('editingFinished()'), self.updateIPAddress)
QtCore.QObject.connect(self.portTextBox, QtCore.SIGNAL('editingFinished()'), self.updatePort)
self.ssidCombo.activated[int].connect(self.updateSSIDEvent)
def setCallback(self, callback):
self.callback = callback
def setGpredictCallback(self, callback):
self.gpredict = callback
def predictTimerEvent(self):
self.pred_conn_stat = self.gpredict.getConnectionStatus()
if self.pred_conn_stat == 2:
self.pred_az, self.pred_el = self.gpredict.getTargetAngles()
self.gpredict.updateCurrentAngles(self.cur_az, self.cur_el)
self.pred_az_lbl.setText(str(round(self.pred_az,1)))
self.pred_el_lbl.setText(str(round(self.pred_el,1)))
if self.autoTrack_cb.isChecked() == True:
self.tar_az = round(self.pred_az, 1)
self.tar_el = round(self.pred_el,1)
self.updateAzimuth()
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
self.updatePredictStatus()
def predictButtonEvent(self):
if self.pred_conn_stat == 0: #Disconnected, Start Connection Thread
ip = self.ipAddrTextBox.text()
port = int(self.portTextBox.text())
self.gpredict = Gpredict_Thread(self, ip, port,1)
self.gpredict.daemon = True
self.gpredict.start()
self.pred_conn_stat = 1 #listening
self.updatePredictStatus()
self.predictTimer.start()
elif ((self.pred_conn_stat == 1) or (self.pred_conn_stat == 2)):
self.gpredict.stop()
self.gpredict.join()
self.predictTimer.stop()
self.pred_conn_stat = 0
self.updatePredictStatus()
def updatePredictStatus(self):
if self.pred_conn_stat == 0: #Disconnected
self.predictButton.setText('Connect')
self.pred_status_lbl.setText("Disconnected")
self.pred_status_lbl.setStyleSheet("QLabel { font-weight:bold; color:rgb(255,0,0) ; }")
self.ipAddrTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.portTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.ipAddrTextBox.setEnabled(True)
self.portTextBox.setEnabled(True)
elif self.pred_conn_stat == 1: #Listening
self.predictButton.setText('Disconnect')
self.pred_status_lbl.setText("Listening...")
self.pred_status_lbl.setStyleSheet("QLabel { font-weight:bold; color:rgb(255,255,0) ; }")
self.ipAddrTextBox.setStyleSheet("QLineEdit {background-color:rgb(225,225,225); color:rgb(0,0,0);}")
self.portTextBox.setStyleSheet("QLineEdit {background-color:rgb(225,225,225); color:rgb(0,0,0);}")
self.ipAddrTextBox.setEnabled(False)
self.portTextBox.setEnabled(False)
elif self.pred_conn_stat == 2: #Connected
self.predictButton.setText('Disconnect')
self.pred_status_lbl.setText("Connected")
self.pred_status_lbl.setStyleSheet("QLabel { font-weight:bold; color:rgb(0,255,0) ; }")
self.ipAddrTextBox.setStyleSheet("QLineEdit {background-color:rgb(225,225,225); color:rgb(0,0,0);}")
self.portTextBox.setStyleSheet("QLineEdit {background-color:rgb(225,225,225); color:rgb(0,0,0);}")
self.ipAddrTextBox.setEnabled(False)
self.portTextBox.setEnabled(False)
def updateSSIDEvent(self, idx):
if idx == 0: #VUL
self.ssid = 'VUL'
elif idx == 1: #3M0
self.ssid = '3M0'
elif idx == 2: #4M5
self.ssid = '4M5'
elif idx == 3: #WX
self.ssid = 'WX'
print self.utc_ts() + "GUI | Updated Subsystem ID: " + self.ssid
self.callback.set_ssid(self.ssid)
def updateButtonEvent(self):
self.tar_az = float(self.azTextBox.text())
self.tar_el = float(self.elTextBox.text())
self.updateAzimuth()
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def homeButtonEvent(self):
self.tar_az = self.home_az
self.tar_el = self.home_el
self.updateAzimuth()
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def stopButtonEvent(self):
status, self.cur_az, self.cur_el = self.callback.set_stop()
if status != -1:
self.az_compass.set_cur_az(self.cur_az)
self.el_compass.set_cur_el(self.cur_el)
def queryButtonEvent(self):
status, self.cur_az, self.cur_el = self.callback.get_status()
if status != -1:
self.az_compass.set_cur_az(self.cur_az)
self.el_compass.set_cur_el(self.cur_el)
else:
self.autoQuery_cb.setCheckState(QtCore.Qt.Unchecked)
def catchAutoQueryEvent(self, state):
CheckState = (state == QtCore.Qt.Checked)
if CheckState == True:
self.updateTimer.start()
print self.utc_ts() + "GUI | Started Auto Update, Interval: " + str(self.update_rate) + " [ms]"
else:
self.updateTimer.stop()
print self.utc_ts() + "GUI | Stopped Auto Update"
def updateRate(self):
self.update_rate = float(self.fb_query_rate_le.text()) * 1000.0
self.updateTimer.setInterval(self.update_rate)
self.predictTimer.setInterval(self.update_rate)
print self.utc_ts() + "GUI | Updated Rate Interval to " + str(self.update_rate) + " [ms]"
def updateIPAddress(self):
self.ip = self.ipAddrTextBox.text()
def updatePort(self):
self.port = self.portTextBox.text()
def azTextBoxReturnPressed(self):
self.tar_az = float(self.azTextBox.text())
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def azPlusPtOneButtonClicked(self):
self.tar_az = self.tar_az + 0.1
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def azPlusOneButtonClicked(self):
self.tar_az = self.tar_az + 1
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def azPlusTenButtonClicked(self):
self.tar_az = self.tar_az + 10
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def azMinusPtOneButtonClicked(self):
self.tar_az = self.tar_az - 0.1
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def azMinusOneButtonClicked(self):
self.tar_az = self.tar_az - 1
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def azMinusTenButtonClicked(self):
self.tar_az = self.tar_az - 10
self.updateAzimuth()
self.callback.set_position(self.tar_az, self.tar_el)
def updateAzimuth(self):
if self.tar_az < -180.0:
self.tar_az = -180.0
self.azTextBox.setText(str(self.tar_az))
if self.tar_az > 540.0:
self.tar_az = 540.0
self.azTextBox.setText(str(self.tar_az))
self.az_compass.set_tar_az(self.tar_az)
def elTextBoxReturnPressed(self):
self.tar_el = float(self.elTextBox.text())
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def elPlusPtOneButtonClicked(self):
self.tar_el = self.tar_el + 0.1
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def elPlusOneButtonClicked(self):
self.tar_el = self.tar_el + 1
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def elPlusTenButtonClicked(self):
self.tar_el = self.tar_el + 10
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def elMinusPtOneButtonClicked(self):
self.tar_el = self.tar_el - 0.1
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def elMinusOneButtonClicked(self):
self.tar_el = self.tar_el - 1
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def elMinusTenButtonClicked(self):
self.tar_el = self.tar_el - 10
self.updateElevation()
self.callback.set_position(self.tar_az, self.tar_el)
def updateElevation(self):
if self.tar_el < 0:
self.tar_el = 0
self.elTextBox.setText(str(self.tar_el))
if self.tar_el > 180:
self.tar_el = 180
self.elTextBox.setText(str(self.tar_el))
self.el_compass.set_tar_el(self.tar_el)
def initTabControl(self):
self.tabs = QtGui.QTabWidget()
self.tabs.setTabPosition(QtGui.QTabWidget.South)
self.main_tab = QtGui.QWidget()
self.main_tab.grid = QtGui.QGridLayout()
self.tabs.addTab(self.main_tab,"Main")
self.main_tab.setAutoFillBackground(True)
p = self.main_tab.palette()
p.setColor(self.main_tab.backgroundRole(), QtCore.Qt.black)
self.main_tab.setPalette(p)
self.cal_tab = QtGui.QWidget()
self.cal_tab.grid = QtGui.QGridLayout()
self.tabs.addTab(self.cal_tab,"Cal")
self.cal_tab.setAutoFillBackground(True)
p = self.cal_tab.palette()
p.setColor(self.cal_tab.backgroundRole(), QtCore.Qt.black)
self.cal_tab.setPalette(p)
self.config_tab = QtGui.QWidget()
self.config_tab_grid = QtGui.QGridLayout()
self.tabs.addTab(self.config_tab,"Config")
self.config_tab.setAutoFillBackground(True)
p = self.config_tab.palette()
p.setColor(self.config_tab.backgroundRole(), QtCore.Qt.black)
self.config_tab.setPalette(p)
self.main_window.grid.addWidget(self.tabs)
def initMainTab(self):
self.initMainTabFrames()
self.initDials()
#Init Az Increment Control
self.initAzControls()
#Init El Increment Control
self.initElControls()
#Init Control Frame
self.initControlFrame()
self.main_tab_grid = QtGui.QGridLayout()
self.main_tab_grid.addWidget(self.control_fr ,0,0,2,2)
self.main_tab_grid.addWidget(self.az_dial_fr ,0,2,1,3)
self.main_tab_grid.addWidget(self.az_ctrl_fr ,1,2,1,3)
self.main_tab_grid.addWidget(self.el_dial_fr ,0,5,1,3)
self.main_tab_grid.addWidget(self.el_ctrl_fr ,1,5,1,3)
self.main_tab_grid.setRowStretch(0,1)
self.main_tab_grid.setColumnStretch(2,1)
self.main_tab_grid.setColumnStretch(5,1)
self.main_tab.setLayout(self.main_tab_grid)
def initMainTabFrames(self):
self.az_dial_fr = QtGui.QFrame(self)
self.az_dial_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.az_dial_fr_grid = QtGui.QGridLayout()
self.az_dial_fr.setLayout(self.az_dial_fr_grid)
self.az_ctrl_fr = QtGui.QFrame()
self.az_ctrl_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.el_dial_fr = QtGui.QFrame(self)
self.el_dial_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.el_dial_fr_grid = QtGui.QGridLayout()
self.el_dial_fr.setLayout(self.el_dial_fr_grid)
self.el_ctrl_fr = QtGui.QFrame()
self.el_ctrl_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.control_fr = QtGui.QFrame(self)
self.control_fr.setFrameShape(QtGui.QFrame.StyledPanel)
def initDials(self):
self.el_compass = el_QwtDial(self.el_dial_fr_grid)
self.az_compass = az_QwtDial(self.az_dial_fr_grid)
def initAzControls(self):
self.azMinusTenButton = QtGui.QPushButton(self.az_ctrl_fr)
self.azMinusTenButton.setText("-10.0")
self.azMinusTenButton.setMinimumWidth(45)
self.azMinusOneButton = QtGui.QPushButton(self.az_ctrl_fr)
self.azMinusOneButton.setText("-1.0")
self.azMinusOneButton.setMinimumWidth(45)
self.azMinusPtOneButton = QtGui.QPushButton(self.az_ctrl_fr)
self.azMinusPtOneButton.setText("-0.1")
self.azMinusPtOneButton.setMinimumWidth(45)
self.azPlusPtOneButton = QtGui.QPushButton(self.az_ctrl_fr)
self.azPlusPtOneButton.setText("+0.1")
self.azPlusPtOneButton.setMinimumWidth(45)
self.azPlusOneButton = QtGui.QPushButton(self.az_ctrl_fr)
self.azPlusOneButton.setText("+1.0")
self.azPlusOneButton.setMinimumWidth(45)
self.azPlusTenButton = QtGui.QPushButton(self.az_ctrl_fr)
self.azPlusTenButton.setText("+10.0")
self.azPlusTenButton.setMinimumWidth(45)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.azMinusTenButton)
hbox1.addWidget(self.azMinusOneButton)
hbox1.addWidget(self.azMinusPtOneButton)
hbox1.addWidget(self.azPlusPtOneButton)
hbox1.addWidget(self.azPlusOneButton)
hbox1.addWidget(self.azPlusTenButton)
self.az_ctrl_fr.setLayout(hbox1)
def initElControls(self):
self.elMinusTenButton = QtGui.QPushButton(self.el_ctrl_fr)
self.elMinusTenButton.setText("-10.0")
self.elMinusTenButton.setMinimumWidth(45)
self.elMinusOneButton = QtGui.QPushButton(self.el_ctrl_fr)
self.elMinusOneButton.setText("-1.0")
self.elMinusOneButton.setMinimumWidth(45)
self.elMinusPtOneButton = QtGui.QPushButton(self.el_ctrl_fr)
self.elMinusPtOneButton.setText("-0.1")
self.elMinusPtOneButton.setMinimumWidth(45)
self.elPlusPtOneButton = QtGui.QPushButton(self.el_ctrl_fr)
self.elPlusPtOneButton.setText("+0.1")
self.elPlusPtOneButton.setMinimumWidth(45)
self.elPlusOneButton = QtGui.QPushButton(self.el_ctrl_fr)
self.elPlusOneButton.setText("+1.0")
self.elPlusOneButton.setMinimumWidth(45)
self.elPlusTenButton = QtGui.QPushButton(self.el_ctrl_fr)
self.elPlusTenButton.setText("+10.0")
self.elPlusTenButton.setMinimumWidth(45)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.elMinusTenButton)
hbox1.addWidget(self.elMinusOneButton)
hbox1.addWidget(self.elMinusPtOneButton)
hbox1.addWidget(self.elPlusPtOneButton)
hbox1.addWidget(self.elPlusOneButton)
hbox1.addWidget(self.elPlusTenButton)
self.el_ctrl_fr.setLayout(hbox1)
def initControlFrame(self):
self.entry_fr = QtGui.QFrame(self)
self.entry_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.predict_fr = QtGui.QFrame(self)
self.predict_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.dir_fr = QtGui.QFrame(self)
self.dir_fr.setFrameShape(QtGui.QFrame.StyledPanel)
#self.dir_fr.setEnabled(False)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.entry_fr)
vbox.addWidget(self.dir_fr)
vbox.addWidget(self.predict_fr)
self.initEntryBoxControls()
self.initMotorCtrl()
self.initGpredict()
self.control_fr.setLayout(vbox)
def initEntryBoxControls(self):
self.updateButton = QtGui.QPushButton("Update")
self.homeButton = QtGui.QPushButton("Home")
self.queryButton = QtGui.QPushButton("Query")
self.azLabel = QtGui.QLabel("Az:")
self.azLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.azLabel.setStyleSheet("QLabel {color:rgb(0,0,255);}")
self.azTextBox = QtGui.QLineEdit()
self.azTextBox.setText("000.0")
self.azTextBox.setInputMask("#000.0;")
self.azTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.azTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.azTextBox.setMaxLength(5)
self.azTextBox.setFixedWidth(60)
self.azTextBox.setFixedHeight(20)
self.elLabel = QtGui.QLabel("El:")
self.elLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.elLabel.setStyleSheet("QLabel {color:rgb(0,0,255);}")
self.elTextBox = QtGui.QLineEdit(self.el_dial_fr)
self.elTextBox.setText("000.0")
self.elTextBox.setInputMask("000.0;")
self.elTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.elTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.elTextBox.setMaxLength(5)
self.elTextBox.setFixedWidth(60)
self.elTextBox.setFixedHeight(20)
self.ssidLabel = QtGui.QLabel("Subsystem:")
self.ssidLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.ssidLabel.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.ssidCombo = QtGui.QComboBox(self)
self.ssidCombo.addItem("VHF/UHF")
self.ssidCombo.addItem("3.0m Dish")
self.ssidCombo.addItem("4.5m Dish")
self.ssidCombo.addItem("NOAA WX")
self.fb_query_rate_le = QtGui.QLineEdit()
self.fb_query_rate_le.setText("0.25")
self.query_val = QtGui.QDoubleValidator()
self.fb_query_rate_le.setValidator(self.query_val)
self.fb_query_rate_le.setEchoMode(QtGui.QLineEdit.Normal)
self.fb_query_rate_le.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.fb_query_rate_le.setMaxLength(4)
self.fb_query_rate_le.setFixedWidth(50)
self.autoQuery_cb = QtGui.QCheckBox("Auto Query [s]")
self.autoQuery_cb.setStyleSheet("QCheckBox { background-color:rgb(0,0,0); color:rgb(255,0,0); }")
self.autoQuery_cb.setChecked(True)
az_hbox = QtGui.QHBoxLayout()
az_hbox.addWidget(self.azLabel)
az_hbox.addWidget(self.azTextBox)
el_hbox = QtGui.QHBoxLayout()
el_hbox.addWidget(self.elLabel)
el_hbox.addWidget(self.elTextBox)
btn_hbox = QtGui.QHBoxLayout()
btn_hbox.addWidget(self.homeButton)
btn_hbox.addWidget(self.updateButton)
ssid_hbox = QtGui.QHBoxLayout()
ssid_hbox.addWidget(self.queryButton)
ssid_hbox.addWidget(self.ssidCombo)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.autoQuery_cb)
hbox1.addWidget(self.fb_query_rate_le)
hbox2 = QtGui.QHBoxLayout()
hbox2.addLayout(az_hbox)
hbox2.addLayout(el_hbox)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox2)
vbox.addLayout(btn_hbox)
vbox.addLayout(ssid_hbox)
vbox.addLayout(hbox1)
self.entry_fr.setLayout(vbox)
def initMotorCtrl(self):
self.motor_lbl = QtGui.QLabel(" Direct Motor Drive ")
self.motor_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
self.motor_lbl.setFixedHeight(20)
self.motor_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.UpLeftButton = QtGui.QPushButton("U+L")
self.UpLeftButton.setFixedWidth(50)
self.UpLeftButton.setFixedHeight(20)
self.UpButton = QtGui.QPushButton("Up")
self.UpButton.setFixedWidth(50)
self.UpButton.setFixedHeight(20)
self.UpRightButton = QtGui.QPushButton("U+R")
self.UpRightButton.setFixedWidth(50)
self.UpRightButton.setFixedHeight(20)
self.LeftButton = QtGui.QPushButton("Left")
self.LeftButton.setFixedWidth(50)
self.LeftButton.setFixedHeight(20)
self.StopButton = QtGui.QPushButton("STOP!")
self.StopButton.setFixedWidth(50)
self.StopButton.setFixedHeight(20)
self.RightButton = QtGui.QPushButton("Right")
self.RightButton.setFixedWidth(50)
self.RightButton.setFixedHeight(20)
self.DnLeftButton = QtGui.QPushButton("D+L")
self.DnLeftButton.setFixedWidth(50)
self.DnLeftButton.setFixedHeight(20)
self.DownButton = QtGui.QPushButton("Down")
self.DownButton.setFixedWidth(50)
self.DownButton.setFixedHeight(20)
self.DnRightButton = QtGui.QPushButton("D+R")
self.DnRightButton.setFixedWidth(50)
self.DnRightButton.setFixedHeight(20)
vbox = QtGui.QVBoxLayout()
hbox1 = QtGui.QHBoxLayout()
hbox2 = QtGui.QHBoxLayout()
hbox3 = QtGui.QHBoxLayout()
hbox1.setContentsMargins(0,0,0,0)
hbox1.addWidget(self.UpLeftButton)
hbox1.addWidget(self.UpButton)
hbox1.addWidget(self.UpRightButton)
hbox2.setContentsMargins(0,0,0,0)
hbox2.addWidget(self.LeftButton)
hbox2.addWidget(self.StopButton)
hbox2.addWidget(self.RightButton)
hbox3.setContentsMargins(0,0,0,0)
hbox3.addWidget(self.DnLeftButton)
hbox3.addWidget(self.DownButton)
hbox3.addWidget(self.DnRightButton)
vbox.setContentsMargins(0,0,0,0)
vbox.addWidget(self.motor_lbl)
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
self.dir_fr.setLayout(vbox)
def initGpredict(self):
self.ipAddrTextBox = QtGui.QLineEdit()
self.ipAddrTextBox.setText('127.000.000.001')
self.ipAddrTextBox.setInputMask("000.000.000.000;")
self.ipAddrTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.ipAddrTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.ipAddrTextBox.setMaxLength(15)
self.portTextBox = QtGui.QLineEdit()
self.portTextBox.setText('4533')
self.port_validator = QtGui.QIntValidator()
self.port_validator.setRange(0,65535)
self.portTextBox.setValidator(self.port_validator)
self.portTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.portTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.portTextBox.setMaxLength(5)
self.portTextBox.setFixedWidth(50)
label = QtGui.QLabel('Status:')
label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
label.setStyleSheet("QLabel {color:rgb(255,255,255);}")
label.setFixedHeight(10)
self.pred_status_lbl = QtGui.QLabel('Disconnected')
self.pred_status_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.pred_status_lbl.setStyleSheet("QLabel {font-weight:bold; color:rgb(255,0,0);}")
self.pred_status_lbl.setFixedWidth(125)
self.pred_status_lbl.setFixedHeight(10)
self.predictButton = QtGui.QPushButton("Start Server")
lbl1 = QtGui.QLabel('Az:')
lbl1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl1.setStyleSheet("QLabel {color:rgb(255,255,255)}")
lbl1.setFixedWidth(25)
lbl1.setFixedHeight(10)
lbl2 = QtGui.QLabel('El:')
lbl2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl2.setStyleSheet("QLabel {color:rgb(255,255,255)}")
lbl2.setFixedWidth(25)
lbl2.setFixedHeight(10)
self.pred_az_lbl = QtGui.QLabel('XXX.X')
self.pred_az_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.pred_az_lbl.setStyleSheet("QLabel {color:rgb(255,255,255)}")
self.pred_az_lbl.setFixedWidth(50)
self.pred_az_lbl.setFixedHeight(10)
self.pred_el_lbl = QtGui.QLabel('XXX.X')
self.pred_el_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.pred_el_lbl.setStyleSheet("QLabel {color:rgb(255,255,255)}")
self.pred_el_lbl.setFixedWidth(50)
self.pred_el_lbl.setFixedHeight(10)
self.autoTrack_cb = QtGui.QCheckBox("Auto Track")
self.autoTrack_cb.setStyleSheet("QCheckBox { background-color:rgb(0,0,0); color:rgb(255,255,255); }")
self.autoTrack_cb.setFixedHeight(20)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.ipAddrTextBox)
hbox1.addWidget(self.portTextBox)
hbox2 = QtGui.QHBoxLayout()
hbox2.addWidget(label)
hbox2.addWidget(self.pred_status_lbl)
hbox3 = QtGui.QHBoxLayout()
hbox3.addWidget(lbl1)
hbox3.addWidget(self.pred_az_lbl)
hbox4 = QtGui.QHBoxLayout()
hbox4.addWidget(lbl2)
hbox4.addWidget(self.pred_el_lbl)
vbox1 = QtGui.QVBoxLayout()
vbox1.addLayout(hbox3)
vbox1.addLayout(hbox4)
hbox5 = QtGui.QHBoxLayout()
hbox5.addWidget(self.autoTrack_cb)
hbox5.addLayout(vbox1)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addWidget(self.predictButton)
vbox.addLayout(hbox2)
vbox.addLayout(hbox5)
self.predict_fr.setLayout(vbox)
def initCalTab(self):
#Init Set Angles
self.initCalAnglesControls()
#Init Motor Power Control
self.initMotorPower()
self.cal_tab_grid = QtGui.QGridLayout()
self.cal_tab_grid.addWidget(self.cal_angle_fr,0,0,1,1)
self.cal_tab_grid.addWidget(self.mot_power_fr,0,1,1,1)
self.cal_tab_grid.setColumnStretch(2,1)
self.cal_tab_grid.setRowStretch(2,1)
self.cal_tab.setLayout(self.cal_tab_grid)
def initMotorPower(self):
self.mot_power_fr = QtGui.QFrame(self)
self.mot_power_fr.setFrameShape(QtGui.QFrame.StyledPanel)
fr_lbl = QtGui.QLabel(" Motor Power ")
fr_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
fr_lbl.setStyleSheet("QLabel {text-decoration:underline; color:rgb(255,255,255);}")
self.setMotPowerButton = QtGui.QPushButton("Set")
self.azPowLabel = QtGui.QLabel("Azimuth:")
self.azPowLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.azPowLabel.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.azPowTextBox = QtGui.QLineEdit()
self.azPowTextBox.setText("64")
self.azPowTextBox.setInputMask("00;")
self.azPowTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.azPowTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.azPowTextBox.setMaxLength(2)
self.azPowTextBox.setFixedWidth(60)
self.elPowLabel = QtGui.QLabel("Elevation:")
self.elPowLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.elPowLabel.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.elPowTextBox = QtGui.QLineEdit(self.el_dial_fr)
self.elPowTextBox.setText("64")
self.elPowTextBox.setInputMask("00;")
self.elPowTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.elPowTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.elPowTextBox.setMaxLength(2)
self.elPowTextBox.setFixedWidth(60)
az_hbox = QtGui.QHBoxLayout()
az_hbox.addWidget(self.azPowLabel)
az_hbox.addWidget(self.azPowTextBox)
el_hbox = QtGui.QHBoxLayout()
el_hbox.addWidget(self.elPowLabel)
el_hbox.addWidget(self.elPowTextBox)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(fr_lbl)
vbox.addLayout(az_hbox)
vbox.addLayout(el_hbox)
vbox.addWidget(self.setMotPowerButton)
self.mot_power_fr.setLayout(vbox)
def initCalAnglesControls(self):
self.cal_angle_fr = QtGui.QFrame(self)
self.cal_angle_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.setCalButton = QtGui.QPushButton("Set")
self.setCalButton.setFixedWidth(60)
self.zeroButton = QtGui.QPushButton("Zero")
self.zeroButton.setFixedWidth(60)
fr_lbl = QtGui.QLabel(" Calibrate Angles ")
fr_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
fr_lbl.setStyleSheet("QLabel {text-decoration:underline; color:rgb(255,255,255);}")
self.azCalLabel = QtGui.QLabel("Azimuth:")
self.azCalLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.azCalLabel.setStyleSheet("QLabel {color:rgb(255,0,0);}")
self.azCalTextBox = QtGui.QLineEdit()
self.azCalTextBox.setText("000.0")
self.azCalTextBox.setInputMask("#000.0;")
self.azCalTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.azCalTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.azCalTextBox.setMaxLength(6)
self.azCalTextBox.setFixedWidth(60)
self.elCalLabel = QtGui.QLabel("Elevation:")
self.elCalLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.elCalLabel.setStyleSheet("QLabel {color:rgb(255,0,0);}")
self.elCalTextBox = QtGui.QLineEdit(self.el_dial_fr)
self.elCalTextBox.setText("000.0")
self.elCalTextBox.setInputMask("000.0;")
self.elCalTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.elCalTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.elCalTextBox.setMaxLength(6)
self.elCalTextBox.setFixedWidth(60)
az_hbox = QtGui.QHBoxLayout()
az_hbox.addWidget(self.azCalLabel)
az_hbox.addWidget(self.azCalTextBox)
el_hbox = QtGui.QHBoxLayout()
el_hbox.addWidget(self.elCalLabel)
el_hbox.addWidget(self.elCalTextBox)
btn_hbox= QtGui.QHBoxLayout()
btn_hbox.addWidget(self.zeroButton)
btn_hbox.addWidget(self.setCalButton)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(fr_lbl)
vbox.addLayout(az_hbox)
vbox.addLayout(el_hbox)
vbox.addLayout(btn_hbox)
self.cal_angle_fr.setLayout(vbox)
def darken(self):
palette = QtGui.QPalette()
palette.setColor(QtGui.QPalette.Background,QtCore.Qt.black)
palette.setColor(QtGui.QPalette.WindowText,QtCore.Qt.black)
palette.setColor(QtGui.QPalette.Text,QtCore.Qt.white)
self.setPalette(palette)
def utc_ts(self):
return str(date.utcnow()) + " UTC | "
|
en
| 0.390684
|
#!/usr/bin/env python #QtGui.QMainWindow.__init__(self) #self.setMaximumWidth(900) #self.setMaximumHeight(700) #Callback accessor for tracking control #Feedback Query Auto Update Interval in milliseconds #Callback accessor for gpredict thread control #Gpredict Connection Status, 0=Disconnected, 1=Listening, 2=Connected #auto track mode, True = Auto, False = Manual #self.updateTimer.setInterval(self.update_rate) #Timer used to Poll the GPredict Server thread for updates #Disconnected, Start Connection Thread #listening #Disconnected #Listening #Connected #VUL #3M0 #4M5 #WX #Init Az Increment Control #Init El Increment Control #Init Control Frame #self.dir_fr.setEnabled(False) #Init Set Angles #Init Motor Power Control
| 2.131649
| 2
|
sigal/gallery.py
|
riton/sigal
| 0
|
6628891
|
<filename>sigal/gallery.py
# Copyright (c) 2009-2018 - <NAME>
# Copyright (c) 2013 - <NAME>
# Copyright (c) 2014 - <NAME>
# Copyright (c) 2015 - <NAME>.
# Copyright (c) 2017 - <NAME>
# Copyright (c) 2018 - <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import fnmatch
import locale
import logging
import multiprocessing
import os
import pickle
import random
import sys
import zipfile
from click import progressbar, get_terminal_size
from collections import defaultdict
from datetime import datetime
from itertools import cycle
from os.path import isfile, join, splitext
from urllib.parse import quote as url_quote
from . import image, video, signals
from .image import (process_image, get_exif_tags, get_exif_data, get_size,
get_iptc_data)
from .settings import get_thumb
from .utils import (Devnull, copy, check_or_create_dir, url_from_path,
read_markdown, cached_property, is_valid_html5_video,
get_mime)
from .video import process_video
from .writer import AlbumPageWriter, AlbumListPageWriter
class Media:
"""Base Class for media files.
Attributes:
- ``type``: ``"image"`` or ``"video"``.
- ``filename``: Filename of the resized image.
- ``thumbnail``: Location of the corresponding thumbnail image.
- ``big``: If not None, location of the unmodified image.
- ``big_url``: If not None, url of the unmodified image.
- ``exif``: If not None contains a dict with the most common tags. For more
information, see :ref:`simple-exif-data`.
- ``raw_exif``: If not ``None``, it contains the raw EXIF tags.
"""
type = ''
def __init__(self, filename, path, settings):
self.src_filename = self.filename = filename
self.path = path
self.settings = settings
self.ext = os.path.splitext(filename)[1].lower()
self.src_path = join(settings['source'], path, filename)
self.dst_path = join(settings['destination'], path, filename)
self.thumb_name = get_thumb(self.settings, self.filename)
self.thumb_path = join(settings['destination'], path, self.thumb_name)
self.logger = logging.getLogger(__name__)
self._get_metadata()
# default: title is the filename
if not self.title:
self.title = self.filename
signals.media_initialized.send(self)
def __repr__(self):
return "<%s>(%r)" % (self.__class__.__name__, str(self))
def __str__(self):
return join(self.path, self.filename)
@property
def url(self):
"""URL of the media."""
return url_from_path(self.filename)
@property
def big(self):
"""Path to the original image, if ``keep_orig`` is set (relative to the
album directory). Copy the file if needed.
"""
if self.settings['keep_orig']:
s = self.settings
if s['use_orig']:
# The image *is* the original, just use it
return self.filename
orig_path = join(s['destination'], self.path, s['orig_dir'])
check_or_create_dir(orig_path)
big_path = join(orig_path, self.src_filename)
if not isfile(big_path):
copy(self.src_path, big_path, symlink=s['orig_link'],
rellink=self.settings['rel_link'])
return join(s['orig_dir'], self.src_filename)
@property
def big_url(self):
"""URL of the original media."""
if self.big is not None:
return url_from_path(self.big)
@property
def thumbnail(self):
"""Path to the thumbnail image (relative to the album directory)."""
if not isfile(self.thumb_path):
self.logger.debug('Generating thumbnail for %r', self)
path = (self.dst_path if os.path.exists(self.dst_path)
else self.src_path)
try:
# if thumbnail is missing (if settings['make_thumbs'] is False)
s = self.settings
if self.type == 'image':
image.generate_thumbnail(
path, self.thumb_path, s['thumb_size'],
fit=s['thumb_fit'])
elif self.type == 'video':
video.generate_thumbnail(
path, self.thumb_path, s['thumb_size'],
s['thumb_video_delay'], fit=s['thumb_fit'],
converter=s['video_converter'])
except Exception as e:
self.logger.error('Failed to generate thumbnail: %s', e)
return
return url_from_path(self.thumb_name)
def _get_metadata(self):
""" Get image metadata from filename.md: title, description, meta."""
self.description = ''
self.meta = {}
self.title = ''
descfile = splitext(self.src_path)[0] + '.md'
if isfile(descfile):
meta = read_markdown(descfile)
for key, val in meta.items():
setattr(self, key, val)
def _get_file_date(self):
stat = os.stat(self.src_path)
return datetime.fromtimestamp(stat.st_mtime)
class Image(Media):
"""Gather all informations on an image file."""
type = 'image'
@cached_property
def date(self):
return (self.exif and self.exif.get('dateobj', None) or
self._get_file_date())
@cached_property
def exif(self):
datetime_format = self.settings['datetime_format']
return (get_exif_tags(self.raw_exif, datetime_format=datetime_format)
if self.raw_exif and self.ext in ('.jpg', '.jpeg') else None)
def _get_metadata(self):
super(Image, self)._get_metadata()
# If a title or description hasn't been obtained by other means, look
# for the information in IPTC fields
if self.title and self.description:
# Nothing to do - we already have title and description
return
try:
iptc_data = get_iptc_data(self.src_path)
except Exception as e:
self.logger.warning('Could not read IPTC data from %s: %s',
self.src_path, e)
else:
if not self.title and iptc_data.get('title'):
self.title = iptc_data['title']
if not self.description and iptc_data.get('description'):
self.description = iptc_data['description']
@cached_property
def raw_exif(self):
try:
return (get_exif_data(self.src_path)
if self.ext in ('.jpg', '.jpeg') else None)
except Exception as e:
self.logger.warning('Could not read EXIF data from %s: %s',
self.src_path, e)
@cached_property
def size(self):
return get_size(self.dst_path)
@cached_property
def thumb_size(self):
return get_size(self.thumb_path)
def has_location(self):
return self.exif is not None and 'gps' in self.exif
class Video(Media):
"""Gather all informations on a video file."""
type = 'video'
def __init__(self, filename, path, settings):
super(Video, self).__init__(filename, path, settings)
base, ext = splitext(filename)
self.src_filename = filename
self.date = self._get_file_date()
if not settings['use_orig'] or not is_valid_html5_video(ext):
video_format = settings['video_format']
ext = '.' + video_format
self.filename = base + ext
self.mime = get_mime(ext)
self.dst_path = join(settings['destination'], path, base + ext)
else:
self.mime = get_mime(ext)
class Album:
"""Gather all informations on an album.
Attributes:
:var description_file: Name of the Markdown file which gives information
on an album
:ivar index_url: URL to the index page.
:ivar output_file: Name of the output HTML file
:ivar meta: Meta data from the Markdown file.
:ivar description: description from the Markdown file.
For details how to annotate your albums with meta data, see
:doc:`album_information`.
"""
description_file = "index.md"
def __init__(self, path, settings, dirnames, filenames, gallery):
self.path = path
self.name = path.split(os.path.sep)[-1]
self.gallery = gallery
self.settings = settings
self.subdirs = dirnames
self.output_file = settings['output_filename']
self._thumbnail = None
if path == '.':
self.src_path = settings['source']
self.dst_path = settings['destination']
else:
self.src_path = join(settings['source'], path)
self.dst_path = join(settings['destination'], path)
self.logger = logging.getLogger(__name__)
self._get_metadata()
# optionally add index.html to the URLs
self.url_ext = self.output_file if settings['index_in_url'] else ''
self.index_url = url_from_path(os.path.relpath(
settings['destination'], self.dst_path)) + '/' + self.url_ext
#: List of all medias in the album (:class:`~sigal.gallery.Image` and
#: :class:`~sigal.gallery.Video`).
self.medias = medias = []
self.medias_count = defaultdict(int)
for f in filenames:
ext = splitext(f)[1]
if ext.lower() in settings['img_extensions']:
media = Image(f, self.path, settings)
elif ext.lower() in settings['video_extensions']:
media = Video(f, self.path, settings)
else:
continue
self.medias_count[media.type] += 1
medias.append(media)
signals.album_initialized.send(self)
def __repr__(self):
return "<%s>(path=%r, title=%r)" % (self.__class__.__name__, self.path,
self.title)
def __str__(self):
return ('{} : '.format(self.path) +
', '.join('{} {}s'.format(count, _type)
for _type, count in self.medias_count.items()))
def __len__(self):
return len(self.medias)
def __iter__(self):
return iter(self.medias)
def _get_metadata(self):
"""Get album metadata from `description_file` (`index.md`):
-> title, thumbnail image, description
"""
descfile = join(self.src_path, self.description_file)
self.description = ''
self.meta = {}
# default: get title from directory name
self.title = os.path.basename(self.path if self.path != '.'
else self.src_path)
if isfile(descfile):
meta = read_markdown(descfile)
for key, val in meta.items():
setattr(self, key, val)
try:
self.author = self.meta['author'][0]
except KeyError:
self.author = self.settings.get('author')
def create_output_directories(self):
"""Create output directories for thumbnails and original images."""
check_or_create_dir(self.dst_path)
if self.medias:
check_or_create_dir(join(self.dst_path,
self.settings['thumb_dir']))
if self.medias and self.settings['keep_orig']:
self.orig_path = join(self.dst_path, self.settings['orig_dir'])
check_or_create_dir(self.orig_path)
def sort_subdirs(self, albums_sort_attr):
if self.subdirs:
if albums_sort_attr:
root_path = self.path if self.path != '.' else ''
if albums_sort_attr.startswith("meta."):
meta_key = albums_sort_attr.split(".", 1)[1]
key = lambda s: locale.strxfrm(
self.gallery.albums[join(root_path, s)].meta.get(meta_key, [''])[0])
else:
key = lambda s: locale.strxfrm(
getattr(self.gallery.albums[join(root_path, s)],
albums_sort_attr))
else:
key = locale.strxfrm
self.subdirs.sort(key=key,
reverse=self.settings['albums_sort_reverse'])
signals.albums_sorted.send(self)
def sort_medias(self, medias_sort_attr):
if self.medias:
if medias_sort_attr == 'date':
key = lambda s: s.date or datetime.now()
elif medias_sort_attr.startswith('meta.'):
meta_key = medias_sort_attr.split(".", 1)[1]
key = lambda s: locale.strxfrm(s.meta.get(meta_key, [''])[0])
else:
key = lambda s: locale.strxfrm(getattr(s, medias_sort_attr))
self.medias.sort(key=key,
reverse=self.settings['medias_sort_reverse'])
signals.medias_sorted.send(self)
@property
def images(self):
"""List of images (:class:`~sigal.gallery.Image`)."""
for media in self.medias:
if media.type == 'image':
yield media
@property
def videos(self):
"""List of videos (:class:`~sigal.gallery.Video`)."""
for media in self.medias:
if media.type == 'video':
yield media
@property
def albums(self):
"""List of :class:`~sigal.gallery.Album` objects for each
sub-directory.
"""
root_path = self.path if self.path != '.' else ''
return [self.gallery.albums[join(root_path, path)]
for path in self.subdirs]
@property
def url(self):
"""URL of the album, relative to its parent."""
url = self.name.encode('utf-8')
return url_quote(url) + '/' + self.url_ext
@property
def thumbnail(self):
"""Path to the thumbnail of the album."""
if self._thumbnail:
# stop if it is already set
return self._thumbnail
# Test the thumbnail from the Markdown file.
thumbnail = self.meta.get('thumbnail', [''])[0]
if thumbnail and isfile(join(self.src_path, thumbnail)):
self._thumbnail = url_from_path(join(
self.name, get_thumb(self.settings, thumbnail)))
self.logger.debug("Thumbnail for %r : %s", self, self._thumbnail)
return self._thumbnail
else:
# find and return the first landscape image
for f in self.medias:
ext = splitext(f.filename)[1]
if ext.lower() in self.settings['img_extensions']:
# Use f.size if available as it is quicker (in cache), but
# fallback to the size of src_path if dst_path is missing
size = f.size
if size is None:
size = get_size(f.src_path)
if size['width'] > size['height']:
self._thumbnail = (url_quote(self.name) + '/' +
f.thumbnail)
self.logger.debug(
"Use 1st landscape image as thumbnail for %r : %s",
self, self._thumbnail)
return self._thumbnail
# else simply return the 1st media file
if not self._thumbnail and self.medias:
for media in self.medias:
if media.thumbnail is not None:
self._thumbnail = (url_quote(self.name) + '/' +
media.thumbnail)
break
else:
self.logger.warning("No thumbnail found for %r", self)
return None
self.logger.debug("Use the 1st image as thumbnail for %r : %s",
self, self._thumbnail)
return self._thumbnail
# use the thumbnail of their sub-directories
if not self._thumbnail:
for path, album in self.gallery.get_albums(self.path):
if album.thumbnail:
self._thumbnail = (url_quote(self.name) + '/' +
album.thumbnail)
self.logger.debug(
"Using thumbnail from sub-directory for %r : %s",
self, self._thumbnail)
return self._thumbnail
self.logger.error('Thumbnail not found for %r', self)
return None
@property
def random_thumbnail(self):
try:
return url_from_path(join(self.name,
random.choice(self.medias).thumbnail))
except IndexError:
return self.thumbnail
@property
def breadcrumb(self):
"""List of ``(url, title)`` tuples defining the current breadcrumb
path.
"""
if self.path == '.':
return []
path = self.path
breadcrumb = [((self.url_ext or '.'), self.title)]
while True:
path = os.path.normpath(os.path.join(path, '..'))
if path == '.':
break
url = (url_from_path(os.path.relpath(path, self.path)) + '/' +
self.url_ext)
breadcrumb.append((url, self.gallery.albums[path].title))
breadcrumb.reverse()
return breadcrumb
@property
def show_map(self):
"""Check if we have at least one photo with GPS location in the album
"""
return any(image.has_location() for image in self.images)
@cached_property
def zip(self):
"""Make a ZIP archive with all media files and return its path.
If the ``zip_gallery`` setting is set,it contains the location of a zip
archive with all original images of the corresponding directory.
"""
zip_gallery = self.settings['zip_gallery']
if zip_gallery and len(self) > 0:
zip_gallery = zip_gallery.format(album=self)
archive_path = join(self.dst_path, zip_gallery)
if (self.settings.get('zip_skip_if_exists', False) and
isfile(archive_path)):
self.logger.debug("Archive %s already created, passing",
archive_path)
return zip_gallery
archive = zipfile.ZipFile(archive_path, 'w', allowZip64=True)
attr = ('src_path' if self.settings['zip_media_format'] == 'orig'
else 'dst_path')
for p in self:
path = getattr(p, attr)
try:
archive.write(path, os.path.split(path)[1])
except OSError as e:
self.logger.warn('Failed to add %s to the ZIP: %s', p, e)
archive.close()
self.logger.debug('Created ZIP archive %s', archive_path)
return zip_gallery
class Gallery(object):
def __init__(self, settings, ncpu=None):
self.settings = settings
self.logger = logging.getLogger(__name__)
self.stats = defaultdict(int)
self.init_pool(ncpu)
check_or_create_dir(settings['destination'])
# Build the list of directories with images
albums = self.albums = {}
src_path = self.settings['source']
ignore_dirs = settings['ignore_directories']
ignore_files = settings['ignore_files']
progressChars = cycle(["/", "-", "\\", "|"])
show_progress = (self.logger.getEffectiveLevel() >= logging.WARNING and
os.isatty(sys.stdout.fileno()))
self.progressbar_target = None if show_progress else Devnull()
for path, dirs, files in os.walk(src_path, followlinks=True,
topdown=False):
if show_progress:
print("\rCollecting albums " + next(progressChars), end="")
relpath = os.path.relpath(path, src_path)
# Test if the directory match the ignore_dirs settings
if ignore_dirs and any(fnmatch.fnmatch(relpath, ignore)
for ignore in ignore_dirs):
self.logger.info('Ignoring %s', relpath)
continue
# Remove files that match the ignore_files settings
if ignore_files:
files_path = {join(relpath, f) for f in files}
for ignore in ignore_files:
files_path -= set(fnmatch.filter(files_path, ignore))
self.logger.debug('Files before filtering: %r', files)
files = [os.path.split(f)[1] for f in files_path]
self.logger.debug('Files after filtering: %r', files)
# Remove sub-directories that have been ignored in a previous
# iteration (as topdown=False, sub-directories are processed before
# their parent
for d in dirs[:]:
path = join(relpath, d) if relpath != '.' else d
if path not in albums.keys():
dirs.remove(d)
album = Album(relpath, settings, dirs, files, self)
if not album.medias and not album.albums:
self.logger.info('Skip empty album: %r', album)
else:
album.create_output_directories()
albums[relpath] = album
print("\rCollecting albums, done.")
with progressbar(albums.values(), label="%16s" % "Sorting albums",
file=self.progressbar_target) as progress_albums:
for album in progress_albums:
album.sort_subdirs(settings['albums_sort_attr'])
with progressbar(albums.values(), label="%16s" % "Sorting media",
file=self.progressbar_target) as progress_albums:
for album in progress_albums:
album.sort_medias(settings['medias_sort_attr'])
self.logger.debug('Albums:\n%r', albums.values())
signals.gallery_initialized.send(self)
@property
def title(self):
"""Title of the gallery."""
return self.settings['title'] or self.albums['.'].title
def init_pool(self, ncpu):
try:
cpu_count = multiprocessing.cpu_count()
except NotImplementedError:
cpu_count = 1
if ncpu is None:
ncpu = cpu_count
else:
try:
ncpu = int(ncpu)
except ValueError:
self.logger.error('ncpu should be an integer value')
ncpu = cpu_count
self.logger.info("Using %s cores", ncpu)
if ncpu > 1:
self.pool = multiprocessing.Pool(processes=ncpu)
else:
self.pool = None
def get_albums(self, path):
"""Return the list of all sub-directories of path."""
for name in self.albums[path].subdirs:
subdir = os.path.normpath(join(path, name))
yield subdir, self.albums[subdir]
for subname, album in self.get_albums(subdir):
yield subname, self.albums[subdir]
def build(self, force=False):
"Create the image gallery"
if not self.albums:
self.logger.warning("No albums found.")
return
def log_func(x):
# 63 is the total length of progressbar, label, percentage, etc
available_length = get_terminal_size()[0] - 64
if x and available_length > 10:
return x.name[:available_length]
else:
return ""
try:
with progressbar(self.albums.values(), label="Collecting files",
item_show_func=log_func, show_eta=False,
file=self.progressbar_target) as albums:
media_list = [f for album in albums
for f in self.process_dir(album, force=force)]
except KeyboardInterrupt:
sys.exit('Interrupted')
bar_opt = {'label': "Processing files",
'show_pos': True,
'file': self.progressbar_target}
failed_files = []
if self.pool:
try:
with progressbar(length=len(media_list), **bar_opt) as bar:
for res in self.pool.imap_unordered(worker, media_list):
if res:
failed_files.append(res)
bar.update(1)
self.pool.close()
self.pool.join()
except KeyboardInterrupt:
self.pool.terminate()
sys.exit('Interrupted')
except pickle.PicklingError:
self.logger.critical(
"Failed to process files with the multiprocessing feature."
" This can be caused by some module import or object "
"defined in the settings file, which can't be serialized.",
exc_info=True)
sys.exit('Abort')
else:
with progressbar(media_list, **bar_opt) as medias:
for media_item in medias:
res = process_file(media_item)
if res:
failed_files.append(res)
if failed_files:
self.remove_files(failed_files)
if self.settings['write_html']:
album_writer = AlbumPageWriter(self.settings,
index_title=self.title)
album_list_writer = AlbumListPageWriter(self.settings,
index_title=self.title)
with progressbar(self.albums.values(),
label="%16s" % "Writing files",
item_show_func=log_func, show_eta=False,
file=self.progressbar_target) as albums:
for album in albums:
if album.albums:
if album.medias:
self.logger.warning(
"Album %s contains sub-albums and images. "
"Please move images to their own sub-album. "
"Images in album %s will not be visible.",
album.title, album.title
)
album_list_writer.write(album)
else:
album_writer.write(album)
print('')
signals.gallery_build.send(self)
def remove_files(self, files):
self.logger.error('Some files have failed to be processed:')
for path, filename in files:
self.logger.error(' - %s/%s', path, filename)
album = self.albums[path]
for f in album.medias:
if f.filename == filename:
self.stats[f.type + '_failed'] += 1
album.medias.remove(f)
break
self.logger.error('You can run "sigal build" in verbose (--verbose) or'
' debug (--debug) mode to get more details.')
def process_dir(self, album, force=False):
"""Process a list of images in a directory."""
for f in album:
if isfile(f.dst_path) and not force:
self.logger.info("%s exists - skipping", f.filename)
self.stats[f.type + '_skipped'] += 1
else:
self.stats[f.type] += 1
yield (f.type, f.path, f.filename, f.src_path, album.dst_path,
self.settings)
def process_file(args):
# args => ftype, path, filename, src_path, dst_path, settings
processor = process_image if args[0] == 'image' else process_video
ret = processor(*args[3:])
# If the processor return an error (ret != 0), then we return the path and
# filename of the failed file to the parent process.
return args[1:3] if ret else None
def worker(args):
try:
return process_file(args)
except KeyboardInterrupt:
pass
|
<filename>sigal/gallery.py
# Copyright (c) 2009-2018 - <NAME>
# Copyright (c) 2013 - <NAME>
# Copyright (c) 2014 - <NAME>
# Copyright (c) 2015 - <NAME>.
# Copyright (c) 2017 - <NAME>
# Copyright (c) 2018 - <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import fnmatch
import locale
import logging
import multiprocessing
import os
import pickle
import random
import sys
import zipfile
from click import progressbar, get_terminal_size
from collections import defaultdict
from datetime import datetime
from itertools import cycle
from os.path import isfile, join, splitext
from urllib.parse import quote as url_quote
from . import image, video, signals
from .image import (process_image, get_exif_tags, get_exif_data, get_size,
get_iptc_data)
from .settings import get_thumb
from .utils import (Devnull, copy, check_or_create_dir, url_from_path,
read_markdown, cached_property, is_valid_html5_video,
get_mime)
from .video import process_video
from .writer import AlbumPageWriter, AlbumListPageWriter
class Media:
"""Base Class for media files.
Attributes:
- ``type``: ``"image"`` or ``"video"``.
- ``filename``: Filename of the resized image.
- ``thumbnail``: Location of the corresponding thumbnail image.
- ``big``: If not None, location of the unmodified image.
- ``big_url``: If not None, url of the unmodified image.
- ``exif``: If not None contains a dict with the most common tags. For more
information, see :ref:`simple-exif-data`.
- ``raw_exif``: If not ``None``, it contains the raw EXIF tags.
"""
type = ''
def __init__(self, filename, path, settings):
self.src_filename = self.filename = filename
self.path = path
self.settings = settings
self.ext = os.path.splitext(filename)[1].lower()
self.src_path = join(settings['source'], path, filename)
self.dst_path = join(settings['destination'], path, filename)
self.thumb_name = get_thumb(self.settings, self.filename)
self.thumb_path = join(settings['destination'], path, self.thumb_name)
self.logger = logging.getLogger(__name__)
self._get_metadata()
# default: title is the filename
if not self.title:
self.title = self.filename
signals.media_initialized.send(self)
def __repr__(self):
return "<%s>(%r)" % (self.__class__.__name__, str(self))
def __str__(self):
return join(self.path, self.filename)
@property
def url(self):
"""URL of the media."""
return url_from_path(self.filename)
@property
def big(self):
"""Path to the original image, if ``keep_orig`` is set (relative to the
album directory). Copy the file if needed.
"""
if self.settings['keep_orig']:
s = self.settings
if s['use_orig']:
# The image *is* the original, just use it
return self.filename
orig_path = join(s['destination'], self.path, s['orig_dir'])
check_or_create_dir(orig_path)
big_path = join(orig_path, self.src_filename)
if not isfile(big_path):
copy(self.src_path, big_path, symlink=s['orig_link'],
rellink=self.settings['rel_link'])
return join(s['orig_dir'], self.src_filename)
@property
def big_url(self):
"""URL of the original media."""
if self.big is not None:
return url_from_path(self.big)
@property
def thumbnail(self):
"""Path to the thumbnail image (relative to the album directory)."""
if not isfile(self.thumb_path):
self.logger.debug('Generating thumbnail for %r', self)
path = (self.dst_path if os.path.exists(self.dst_path)
else self.src_path)
try:
# if thumbnail is missing (if settings['make_thumbs'] is False)
s = self.settings
if self.type == 'image':
image.generate_thumbnail(
path, self.thumb_path, s['thumb_size'],
fit=s['thumb_fit'])
elif self.type == 'video':
video.generate_thumbnail(
path, self.thumb_path, s['thumb_size'],
s['thumb_video_delay'], fit=s['thumb_fit'],
converter=s['video_converter'])
except Exception as e:
self.logger.error('Failed to generate thumbnail: %s', e)
return
return url_from_path(self.thumb_name)
def _get_metadata(self):
""" Get image metadata from filename.md: title, description, meta."""
self.description = ''
self.meta = {}
self.title = ''
descfile = splitext(self.src_path)[0] + '.md'
if isfile(descfile):
meta = read_markdown(descfile)
for key, val in meta.items():
setattr(self, key, val)
def _get_file_date(self):
stat = os.stat(self.src_path)
return datetime.fromtimestamp(stat.st_mtime)
class Image(Media):
"""Gather all informations on an image file."""
type = 'image'
@cached_property
def date(self):
return (self.exif and self.exif.get('dateobj', None) or
self._get_file_date())
@cached_property
def exif(self):
datetime_format = self.settings['datetime_format']
return (get_exif_tags(self.raw_exif, datetime_format=datetime_format)
if self.raw_exif and self.ext in ('.jpg', '.jpeg') else None)
def _get_metadata(self):
super(Image, self)._get_metadata()
# If a title or description hasn't been obtained by other means, look
# for the information in IPTC fields
if self.title and self.description:
# Nothing to do - we already have title and description
return
try:
iptc_data = get_iptc_data(self.src_path)
except Exception as e:
self.logger.warning('Could not read IPTC data from %s: %s',
self.src_path, e)
else:
if not self.title and iptc_data.get('title'):
self.title = iptc_data['title']
if not self.description and iptc_data.get('description'):
self.description = iptc_data['description']
@cached_property
def raw_exif(self):
try:
return (get_exif_data(self.src_path)
if self.ext in ('.jpg', '.jpeg') else None)
except Exception as e:
self.logger.warning('Could not read EXIF data from %s: %s',
self.src_path, e)
@cached_property
def size(self):
return get_size(self.dst_path)
@cached_property
def thumb_size(self):
return get_size(self.thumb_path)
def has_location(self):
return self.exif is not None and 'gps' in self.exif
class Video(Media):
"""Gather all informations on a video file."""
type = 'video'
def __init__(self, filename, path, settings):
super(Video, self).__init__(filename, path, settings)
base, ext = splitext(filename)
self.src_filename = filename
self.date = self._get_file_date()
if not settings['use_orig'] or not is_valid_html5_video(ext):
video_format = settings['video_format']
ext = '.' + video_format
self.filename = base + ext
self.mime = get_mime(ext)
self.dst_path = join(settings['destination'], path, base + ext)
else:
self.mime = get_mime(ext)
class Album:
"""Gather all informations on an album.
Attributes:
:var description_file: Name of the Markdown file which gives information
on an album
:ivar index_url: URL to the index page.
:ivar output_file: Name of the output HTML file
:ivar meta: Meta data from the Markdown file.
:ivar description: description from the Markdown file.
For details how to annotate your albums with meta data, see
:doc:`album_information`.
"""
description_file = "index.md"
def __init__(self, path, settings, dirnames, filenames, gallery):
self.path = path
self.name = path.split(os.path.sep)[-1]
self.gallery = gallery
self.settings = settings
self.subdirs = dirnames
self.output_file = settings['output_filename']
self._thumbnail = None
if path == '.':
self.src_path = settings['source']
self.dst_path = settings['destination']
else:
self.src_path = join(settings['source'], path)
self.dst_path = join(settings['destination'], path)
self.logger = logging.getLogger(__name__)
self._get_metadata()
# optionally add index.html to the URLs
self.url_ext = self.output_file if settings['index_in_url'] else ''
self.index_url = url_from_path(os.path.relpath(
settings['destination'], self.dst_path)) + '/' + self.url_ext
#: List of all medias in the album (:class:`~sigal.gallery.Image` and
#: :class:`~sigal.gallery.Video`).
self.medias = medias = []
self.medias_count = defaultdict(int)
for f in filenames:
ext = splitext(f)[1]
if ext.lower() in settings['img_extensions']:
media = Image(f, self.path, settings)
elif ext.lower() in settings['video_extensions']:
media = Video(f, self.path, settings)
else:
continue
self.medias_count[media.type] += 1
medias.append(media)
signals.album_initialized.send(self)
def __repr__(self):
return "<%s>(path=%r, title=%r)" % (self.__class__.__name__, self.path,
self.title)
def __str__(self):
return ('{} : '.format(self.path) +
', '.join('{} {}s'.format(count, _type)
for _type, count in self.medias_count.items()))
def __len__(self):
return len(self.medias)
def __iter__(self):
return iter(self.medias)
def _get_metadata(self):
"""Get album metadata from `description_file` (`index.md`):
-> title, thumbnail image, description
"""
descfile = join(self.src_path, self.description_file)
self.description = ''
self.meta = {}
# default: get title from directory name
self.title = os.path.basename(self.path if self.path != '.'
else self.src_path)
if isfile(descfile):
meta = read_markdown(descfile)
for key, val in meta.items():
setattr(self, key, val)
try:
self.author = self.meta['author'][0]
except KeyError:
self.author = self.settings.get('author')
def create_output_directories(self):
"""Create output directories for thumbnails and original images."""
check_or_create_dir(self.dst_path)
if self.medias:
check_or_create_dir(join(self.dst_path,
self.settings['thumb_dir']))
if self.medias and self.settings['keep_orig']:
self.orig_path = join(self.dst_path, self.settings['orig_dir'])
check_or_create_dir(self.orig_path)
def sort_subdirs(self, albums_sort_attr):
if self.subdirs:
if albums_sort_attr:
root_path = self.path if self.path != '.' else ''
if albums_sort_attr.startswith("meta."):
meta_key = albums_sort_attr.split(".", 1)[1]
key = lambda s: locale.strxfrm(
self.gallery.albums[join(root_path, s)].meta.get(meta_key, [''])[0])
else:
key = lambda s: locale.strxfrm(
getattr(self.gallery.albums[join(root_path, s)],
albums_sort_attr))
else:
key = locale.strxfrm
self.subdirs.sort(key=key,
reverse=self.settings['albums_sort_reverse'])
signals.albums_sorted.send(self)
def sort_medias(self, medias_sort_attr):
if self.medias:
if medias_sort_attr == 'date':
key = lambda s: s.date or datetime.now()
elif medias_sort_attr.startswith('meta.'):
meta_key = medias_sort_attr.split(".", 1)[1]
key = lambda s: locale.strxfrm(s.meta.get(meta_key, [''])[0])
else:
key = lambda s: locale.strxfrm(getattr(s, medias_sort_attr))
self.medias.sort(key=key,
reverse=self.settings['medias_sort_reverse'])
signals.medias_sorted.send(self)
@property
def images(self):
"""List of images (:class:`~sigal.gallery.Image`)."""
for media in self.medias:
if media.type == 'image':
yield media
@property
def videos(self):
"""List of videos (:class:`~sigal.gallery.Video`)."""
for media in self.medias:
if media.type == 'video':
yield media
@property
def albums(self):
"""List of :class:`~sigal.gallery.Album` objects for each
sub-directory.
"""
root_path = self.path if self.path != '.' else ''
return [self.gallery.albums[join(root_path, path)]
for path in self.subdirs]
@property
def url(self):
"""URL of the album, relative to its parent."""
url = self.name.encode('utf-8')
return url_quote(url) + '/' + self.url_ext
@property
def thumbnail(self):
"""Path to the thumbnail of the album."""
if self._thumbnail:
# stop if it is already set
return self._thumbnail
# Test the thumbnail from the Markdown file.
thumbnail = self.meta.get('thumbnail', [''])[0]
if thumbnail and isfile(join(self.src_path, thumbnail)):
self._thumbnail = url_from_path(join(
self.name, get_thumb(self.settings, thumbnail)))
self.logger.debug("Thumbnail for %r : %s", self, self._thumbnail)
return self._thumbnail
else:
# find and return the first landscape image
for f in self.medias:
ext = splitext(f.filename)[1]
if ext.lower() in self.settings['img_extensions']:
# Use f.size if available as it is quicker (in cache), but
# fallback to the size of src_path if dst_path is missing
size = f.size
if size is None:
size = get_size(f.src_path)
if size['width'] > size['height']:
self._thumbnail = (url_quote(self.name) + '/' +
f.thumbnail)
self.logger.debug(
"Use 1st landscape image as thumbnail for %r : %s",
self, self._thumbnail)
return self._thumbnail
# else simply return the 1st media file
if not self._thumbnail and self.medias:
for media in self.medias:
if media.thumbnail is not None:
self._thumbnail = (url_quote(self.name) + '/' +
media.thumbnail)
break
else:
self.logger.warning("No thumbnail found for %r", self)
return None
self.logger.debug("Use the 1st image as thumbnail for %r : %s",
self, self._thumbnail)
return self._thumbnail
# use the thumbnail of their sub-directories
if not self._thumbnail:
for path, album in self.gallery.get_albums(self.path):
if album.thumbnail:
self._thumbnail = (url_quote(self.name) + '/' +
album.thumbnail)
self.logger.debug(
"Using thumbnail from sub-directory for %r : %s",
self, self._thumbnail)
return self._thumbnail
self.logger.error('Thumbnail not found for %r', self)
return None
@property
def random_thumbnail(self):
try:
return url_from_path(join(self.name,
random.choice(self.medias).thumbnail))
except IndexError:
return self.thumbnail
@property
def breadcrumb(self):
"""List of ``(url, title)`` tuples defining the current breadcrumb
path.
"""
if self.path == '.':
return []
path = self.path
breadcrumb = [((self.url_ext or '.'), self.title)]
while True:
path = os.path.normpath(os.path.join(path, '..'))
if path == '.':
break
url = (url_from_path(os.path.relpath(path, self.path)) + '/' +
self.url_ext)
breadcrumb.append((url, self.gallery.albums[path].title))
breadcrumb.reverse()
return breadcrumb
@property
def show_map(self):
"""Check if we have at least one photo with GPS location in the album
"""
return any(image.has_location() for image in self.images)
@cached_property
def zip(self):
"""Make a ZIP archive with all media files and return its path.
If the ``zip_gallery`` setting is set,it contains the location of a zip
archive with all original images of the corresponding directory.
"""
zip_gallery = self.settings['zip_gallery']
if zip_gallery and len(self) > 0:
zip_gallery = zip_gallery.format(album=self)
archive_path = join(self.dst_path, zip_gallery)
if (self.settings.get('zip_skip_if_exists', False) and
isfile(archive_path)):
self.logger.debug("Archive %s already created, passing",
archive_path)
return zip_gallery
archive = zipfile.ZipFile(archive_path, 'w', allowZip64=True)
attr = ('src_path' if self.settings['zip_media_format'] == 'orig'
else 'dst_path')
for p in self:
path = getattr(p, attr)
try:
archive.write(path, os.path.split(path)[1])
except OSError as e:
self.logger.warn('Failed to add %s to the ZIP: %s', p, e)
archive.close()
self.logger.debug('Created ZIP archive %s', archive_path)
return zip_gallery
class Gallery(object):
def __init__(self, settings, ncpu=None):
self.settings = settings
self.logger = logging.getLogger(__name__)
self.stats = defaultdict(int)
self.init_pool(ncpu)
check_or_create_dir(settings['destination'])
# Build the list of directories with images
albums = self.albums = {}
src_path = self.settings['source']
ignore_dirs = settings['ignore_directories']
ignore_files = settings['ignore_files']
progressChars = cycle(["/", "-", "\\", "|"])
show_progress = (self.logger.getEffectiveLevel() >= logging.WARNING and
os.isatty(sys.stdout.fileno()))
self.progressbar_target = None if show_progress else Devnull()
for path, dirs, files in os.walk(src_path, followlinks=True,
topdown=False):
if show_progress:
print("\rCollecting albums " + next(progressChars), end="")
relpath = os.path.relpath(path, src_path)
# Test if the directory match the ignore_dirs settings
if ignore_dirs and any(fnmatch.fnmatch(relpath, ignore)
for ignore in ignore_dirs):
self.logger.info('Ignoring %s', relpath)
continue
# Remove files that match the ignore_files settings
if ignore_files:
files_path = {join(relpath, f) for f in files}
for ignore in ignore_files:
files_path -= set(fnmatch.filter(files_path, ignore))
self.logger.debug('Files before filtering: %r', files)
files = [os.path.split(f)[1] for f in files_path]
self.logger.debug('Files after filtering: %r', files)
# Remove sub-directories that have been ignored in a previous
# iteration (as topdown=False, sub-directories are processed before
# their parent
for d in dirs[:]:
path = join(relpath, d) if relpath != '.' else d
if path not in albums.keys():
dirs.remove(d)
album = Album(relpath, settings, dirs, files, self)
if not album.medias and not album.albums:
self.logger.info('Skip empty album: %r', album)
else:
album.create_output_directories()
albums[relpath] = album
print("\rCollecting albums, done.")
with progressbar(albums.values(), label="%16s" % "Sorting albums",
file=self.progressbar_target) as progress_albums:
for album in progress_albums:
album.sort_subdirs(settings['albums_sort_attr'])
with progressbar(albums.values(), label="%16s" % "Sorting media",
file=self.progressbar_target) as progress_albums:
for album in progress_albums:
album.sort_medias(settings['medias_sort_attr'])
self.logger.debug('Albums:\n%r', albums.values())
signals.gallery_initialized.send(self)
@property
def title(self):
"""Title of the gallery."""
return self.settings['title'] or self.albums['.'].title
def init_pool(self, ncpu):
try:
cpu_count = multiprocessing.cpu_count()
except NotImplementedError:
cpu_count = 1
if ncpu is None:
ncpu = cpu_count
else:
try:
ncpu = int(ncpu)
except ValueError:
self.logger.error('ncpu should be an integer value')
ncpu = cpu_count
self.logger.info("Using %s cores", ncpu)
if ncpu > 1:
self.pool = multiprocessing.Pool(processes=ncpu)
else:
self.pool = None
def get_albums(self, path):
"""Return the list of all sub-directories of path."""
for name in self.albums[path].subdirs:
subdir = os.path.normpath(join(path, name))
yield subdir, self.albums[subdir]
for subname, album in self.get_albums(subdir):
yield subname, self.albums[subdir]
def build(self, force=False):
"Create the image gallery"
if not self.albums:
self.logger.warning("No albums found.")
return
def log_func(x):
# 63 is the total length of progressbar, label, percentage, etc
available_length = get_terminal_size()[0] - 64
if x and available_length > 10:
return x.name[:available_length]
else:
return ""
try:
with progressbar(self.albums.values(), label="Collecting files",
item_show_func=log_func, show_eta=False,
file=self.progressbar_target) as albums:
media_list = [f for album in albums
for f in self.process_dir(album, force=force)]
except KeyboardInterrupt:
sys.exit('Interrupted')
bar_opt = {'label': "Processing files",
'show_pos': True,
'file': self.progressbar_target}
failed_files = []
if self.pool:
try:
with progressbar(length=len(media_list), **bar_opt) as bar:
for res in self.pool.imap_unordered(worker, media_list):
if res:
failed_files.append(res)
bar.update(1)
self.pool.close()
self.pool.join()
except KeyboardInterrupt:
self.pool.terminate()
sys.exit('Interrupted')
except pickle.PicklingError:
self.logger.critical(
"Failed to process files with the multiprocessing feature."
" This can be caused by some module import or object "
"defined in the settings file, which can't be serialized.",
exc_info=True)
sys.exit('Abort')
else:
with progressbar(media_list, **bar_opt) as medias:
for media_item in medias:
res = process_file(media_item)
if res:
failed_files.append(res)
if failed_files:
self.remove_files(failed_files)
if self.settings['write_html']:
album_writer = AlbumPageWriter(self.settings,
index_title=self.title)
album_list_writer = AlbumListPageWriter(self.settings,
index_title=self.title)
with progressbar(self.albums.values(),
label="%16s" % "Writing files",
item_show_func=log_func, show_eta=False,
file=self.progressbar_target) as albums:
for album in albums:
if album.albums:
if album.medias:
self.logger.warning(
"Album %s contains sub-albums and images. "
"Please move images to their own sub-album. "
"Images in album %s will not be visible.",
album.title, album.title
)
album_list_writer.write(album)
else:
album_writer.write(album)
print('')
signals.gallery_build.send(self)
def remove_files(self, files):
self.logger.error('Some files have failed to be processed:')
for path, filename in files:
self.logger.error(' - %s/%s', path, filename)
album = self.albums[path]
for f in album.medias:
if f.filename == filename:
self.stats[f.type + '_failed'] += 1
album.medias.remove(f)
break
self.logger.error('You can run "sigal build" in verbose (--verbose) or'
' debug (--debug) mode to get more details.')
def process_dir(self, album, force=False):
"""Process a list of images in a directory."""
for f in album:
if isfile(f.dst_path) and not force:
self.logger.info("%s exists - skipping", f.filename)
self.stats[f.type + '_skipped'] += 1
else:
self.stats[f.type] += 1
yield (f.type, f.path, f.filename, f.src_path, album.dst_path,
self.settings)
def process_file(args):
# args => ftype, path, filename, src_path, dst_path, settings
processor = process_image if args[0] == 'image' else process_video
ret = processor(*args[3:])
# If the processor return an error (ret != 0), then we return the path and
# filename of the failed file to the parent process.
return args[1:3] if ret else None
def worker(args):
try:
return process_file(args)
except KeyboardInterrupt:
pass
|
en
| 0.762411
|
# Copyright (c) 2009-2018 - <NAME> # Copyright (c) 2013 - <NAME> # Copyright (c) 2014 - <NAME> # Copyright (c) 2015 - <NAME>. # Copyright (c) 2017 - <NAME> # Copyright (c) 2018 - <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. Base Class for media files. Attributes: - ``type``: ``"image"`` or ``"video"``. - ``filename``: Filename of the resized image. - ``thumbnail``: Location of the corresponding thumbnail image. - ``big``: If not None, location of the unmodified image. - ``big_url``: If not None, url of the unmodified image. - ``exif``: If not None contains a dict with the most common tags. For more information, see :ref:`simple-exif-data`. - ``raw_exif``: If not ``None``, it contains the raw EXIF tags. # default: title is the filename URL of the media. Path to the original image, if ``keep_orig`` is set (relative to the album directory). Copy the file if needed. # The image *is* the original, just use it URL of the original media. Path to the thumbnail image (relative to the album directory). # if thumbnail is missing (if settings['make_thumbs'] is False) Get image metadata from filename.md: title, description, meta. Gather all informations on an image file. # If a title or description hasn't been obtained by other means, look # for the information in IPTC fields # Nothing to do - we already have title and description Gather all informations on a video file. Gather all informations on an album. Attributes: :var description_file: Name of the Markdown file which gives information on an album :ivar index_url: URL to the index page. :ivar output_file: Name of the output HTML file :ivar meta: Meta data from the Markdown file. :ivar description: description from the Markdown file. For details how to annotate your albums with meta data, see :doc:`album_information`. # optionally add index.html to the URLs #: List of all medias in the album (:class:`~sigal.gallery.Image` and #: :class:`~sigal.gallery.Video`). Get album metadata from `description_file` (`index.md`): -> title, thumbnail image, description # default: get title from directory name Create output directories for thumbnails and original images. List of images (:class:`~sigal.gallery.Image`). List of videos (:class:`~sigal.gallery.Video`). List of :class:`~sigal.gallery.Album` objects for each sub-directory. URL of the album, relative to its parent. Path to the thumbnail of the album. # stop if it is already set # Test the thumbnail from the Markdown file. # find and return the first landscape image # Use f.size if available as it is quicker (in cache), but # fallback to the size of src_path if dst_path is missing # else simply return the 1st media file # use the thumbnail of their sub-directories List of ``(url, title)`` tuples defining the current breadcrumb path. Check if we have at least one photo with GPS location in the album Make a ZIP archive with all media files and return its path. If the ``zip_gallery`` setting is set,it contains the location of a zip archive with all original images of the corresponding directory. # Build the list of directories with images # Test if the directory match the ignore_dirs settings # Remove files that match the ignore_files settings # Remove sub-directories that have been ignored in a previous # iteration (as topdown=False, sub-directories are processed before # their parent Title of the gallery. Return the list of all sub-directories of path. # 63 is the total length of progressbar, label, percentage, etc Process a list of images in a directory. # args => ftype, path, filename, src_path, dst_path, settings # If the processor return an error (ret != 0), then we return the path and # filename of the failed file to the parent process.
| 1.534525
| 2
|
definitions.py
|
sandroboehme/cryptotrader
| 2
|
6628892
|
<filename>definitions.py
import os
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
CONFIG_PATH = os.path.join(ROOT_PATH, 'config.json')
|
<filename>definitions.py
import os
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
CONFIG_PATH = os.path.join(ROOT_PATH, 'config.json')
|
none
| 1
| 1.822795
| 2
|
|
rxsci/math/formal/stddev.py
|
maki-nage/rxsci
| 3
|
6628893
|
import math
import rx.operators as ops
import rxsci as rs
def stddev(key_mapper=lambda i: i, reduce=False):
'''Computes standard deviation
The implementation is based on the formal definition of the standard
deviation. This implies that all items are cached in memory to do the
computation. Use the rxsci.math.stddev operator to compute standard
deviation on a large observable.
The source can be an Observable or a MuxObservable.
Args:
key_mapper: [Optional] a function called on each item before computing
the standard deviation.
reduce: [Optional] Emit an item for each source item when reduce is
False, otherwise emits a single item on completion.
Returns:
An observable emitting standard deviation of source items.
'''
def _stddev(source):
return source.pipe(
rs.math.formal.variance(key_mapper, reduce=reduce),
rs.ops.map(lambda i: math.sqrt(i) if i is not None else None),
)
return _stddev
|
import math
import rx.operators as ops
import rxsci as rs
def stddev(key_mapper=lambda i: i, reduce=False):
'''Computes standard deviation
The implementation is based on the formal definition of the standard
deviation. This implies that all items are cached in memory to do the
computation. Use the rxsci.math.stddev operator to compute standard
deviation on a large observable.
The source can be an Observable or a MuxObservable.
Args:
key_mapper: [Optional] a function called on each item before computing
the standard deviation.
reduce: [Optional] Emit an item for each source item when reduce is
False, otherwise emits a single item on completion.
Returns:
An observable emitting standard deviation of source items.
'''
def _stddev(source):
return source.pipe(
rs.math.formal.variance(key_mapper, reduce=reduce),
rs.ops.map(lambda i: math.sqrt(i) if i is not None else None),
)
return _stddev
|
en
| 0.76935
|
Computes standard deviation The implementation is based on the formal definition of the standard deviation. This implies that all items are cached in memory to do the computation. Use the rxsci.math.stddev operator to compute standard deviation on a large observable. The source can be an Observable or a MuxObservable. Args: key_mapper: [Optional] a function called on each item before computing the standard deviation. reduce: [Optional] Emit an item for each source item when reduce is False, otherwise emits a single item on completion. Returns: An observable emitting standard deviation of source items.
| 3.413259
| 3
|
pylark/api_service_bot_add.py
|
chyroc/pylark
| 7
|
6628894
|
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class AddBotToChatReq(object):
chat_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "chat_id"}
) # 群的id
@attr.s
class AddBotToChatResp(object):
pass
def _gen_add_bot_to_chat_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=AddBotToChatResp,
scope="Bot",
api="AddBotToChat",
method="POST",
url="https://open.feishu.cn/open-apis/bot/v4/add",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
)
|
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class AddBotToChatReq(object):
chat_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "chat_id"}
) # 群的id
@attr.s
class AddBotToChatResp(object):
pass
def _gen_add_bot_to_chat_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=AddBotToChatResp,
scope="Bot",
api="AddBotToChat",
method="POST",
url="https://open.feishu.cn/open-apis/bot/v4/add",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
)
|
en
| 0.424663
|
# Code generated by lark_sdk_gen. DO NOT EDIT. # 群的id
| 1.953871
| 2
|
python/xpath_reader.py
|
jasonjimnz/data-science-master
| 0
|
6628895
|
<filename>python/xpath_reader.py
# lxml library is needed
from lxml import etree
# Location of XML file
XML_FILE = "YOUR_XML_DIR"
# Load the XML File in read mode
xfile = open(XML_FILE, 'r')
# Create a etree instance with the XML content
xml_tree = etree.XML(xfile.read())
# Find all Name nodes with xpath
name_nodes = xml_tree.xpath("//Name")
for x, name_node in enumerate(name_nodes):
print("Node %s: %s" % (str(x), name_node.text))
# Find elements in xml_file based in the xpath query
def find_nodes(xml_tree_node, xpath_query):
for x, node in enumerate(xml_tree_node.xpath(xpath_query)):
print("Node %s: %s" % (str(x), node.text))
|
<filename>python/xpath_reader.py
# lxml library is needed
from lxml import etree
# Location of XML file
XML_FILE = "YOUR_XML_DIR"
# Load the XML File in read mode
xfile = open(XML_FILE, 'r')
# Create a etree instance with the XML content
xml_tree = etree.XML(xfile.read())
# Find all Name nodes with xpath
name_nodes = xml_tree.xpath("//Name")
for x, name_node in enumerate(name_nodes):
print("Node %s: %s" % (str(x), name_node.text))
# Find elements in xml_file based in the xpath query
def find_nodes(xml_tree_node, xpath_query):
for x, node in enumerate(xml_tree_node.xpath(xpath_query)):
print("Node %s: %s" % (str(x), node.text))
|
en
| 0.897567
|
# lxml library is needed # Location of XML file # Load the XML File in read mode # Create a etree instance with the XML content # Find all Name nodes with xpath # Find elements in xml_file based in the xpath query
| 4.017463
| 4
|
data/dataset.py
|
TencentYoutuResearch/ActiveLearning-SDM
| 4
|
6628896
|
<gh_stars>1-10
from torchvision.datasets import VisionDataset
import torch
from PIL import Image
import os
import sys
import numpy as np
# load different dataset
def get_data(name,path,transform=None,tr_or_te='train',n_views=1):
assert name in ['OfficeHome', 'DomainNet', 'Office31']
if name == 'OfficeHome':
return ImageList(path+'.txt',transform=transform,n_views=n_views)
elif name == 'DomainNet':
return ImageList(path+'_'+tr_or_te+'.txt',transform=transform,n_views=n_views)
elif name == 'Office31':
return ImageList(path+'.txt',transform=transform,n_views=n_views)
# PIL loader
def pil_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
# active learning should implement add_item function and remove_item function
class ImageList(VisionDataset):
def __init__(self, root, transform=None, contrastive_transform=None, n_views=1):
super(ImageList, self).__init__(root, transform=transform)
# self.samples = np.loadtxt(root, dtype=np.unicode_, delimiter=' ')
self.samples = np.loadtxt(root, dtype=np.dtype((np.unicode_, 1000)), delimiter=' ')
self.loader = pil_loader
self.contrastive_transform = contrastive_transform
self.n_views = n_views
def __getitem__(self, index):
path, target = self.samples[index]
target = int(target)
sample = self.loader(path)
if self.transform is not None:
if self.n_views == 1:
sample = self.transform(sample)
else:
sample = [self.transform(sample) for i in range(self.n_views)]
sample = torch.stack(sample, dim=0)
# sample = torch.cat(sample, dim=1)
# sample = self.transform(sample)
return sample, target, path, index
def __len__(self):
return len(self.samples)
def add_item(self, addition):
self.samples = np.concatenate((self.samples, addition), axis=0)
return self.samples
def remove_item(self, reduced):
reduced = reduced.astype('int64')
self.samples = np.delete(self.samples, reduced, axis=0)
return self.samples
|
from torchvision.datasets import VisionDataset
import torch
from PIL import Image
import os
import sys
import numpy as np
# load different dataset
def get_data(name,path,transform=None,tr_or_te='train',n_views=1):
assert name in ['OfficeHome', 'DomainNet', 'Office31']
if name == 'OfficeHome':
return ImageList(path+'.txt',transform=transform,n_views=n_views)
elif name == 'DomainNet':
return ImageList(path+'_'+tr_or_te+'.txt',transform=transform,n_views=n_views)
elif name == 'Office31':
return ImageList(path+'.txt',transform=transform,n_views=n_views)
# PIL loader
def pil_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
# active learning should implement add_item function and remove_item function
class ImageList(VisionDataset):
def __init__(self, root, transform=None, contrastive_transform=None, n_views=1):
super(ImageList, self).__init__(root, transform=transform)
# self.samples = np.loadtxt(root, dtype=np.unicode_, delimiter=' ')
self.samples = np.loadtxt(root, dtype=np.dtype((np.unicode_, 1000)), delimiter=' ')
self.loader = pil_loader
self.contrastive_transform = contrastive_transform
self.n_views = n_views
def __getitem__(self, index):
path, target = self.samples[index]
target = int(target)
sample = self.loader(path)
if self.transform is not None:
if self.n_views == 1:
sample = self.transform(sample)
else:
sample = [self.transform(sample) for i in range(self.n_views)]
sample = torch.stack(sample, dim=0)
# sample = torch.cat(sample, dim=1)
# sample = self.transform(sample)
return sample, target, path, index
def __len__(self):
return len(self.samples)
def add_item(self, addition):
self.samples = np.concatenate((self.samples, addition), axis=0)
return self.samples
def remove_item(self, reduced):
reduced = reduced.astype('int64')
self.samples = np.delete(self.samples, reduced, axis=0)
return self.samples
|
en
| 0.404559
|
# load different dataset # PIL loader # active learning should implement add_item function and remove_item function # self.samples = np.loadtxt(root, dtype=np.unicode_, delimiter=' ') # sample = torch.cat(sample, dim=1) # sample = self.transform(sample)
| 2.615225
| 3
|
dcgm_pbs.py
|
fengxizhou/dcgm-pbs
| 0
|
6628897
|
#!/usr/bin/python2
import sys
import argparse
import subprocess
import glob
import re
import os
gpuDeviceRE = re.compile(r'/dev/nvidia\d+')
whilespaceSepRE = re.compile(r'[,\s+]')
dcgmCreateGroupRE = re.compile(r'Successfully.+\s+(\d+)$')
def get_all_supported_gpus():
devices = {}
nvidia_devices = glob.glob("/dev/nvidia*")
for dev in nvidia_devices:
if gpuDeviceRE.match(dev):
proc = subprocess.Popen(['ls', '-al', dev], stdout=subprocess.PIPE)
proc.wait()
output = proc.communicate()[0].decode().rstrip()
fields = whilespaceSepRE.split(output)
devices[fields[4]+':'+fields[6]] = int(fields[6])
return devices
def get_attached_gpus(jobid, gpu_devices=None):
if gpu_devices is None:
gpu_devices = get_all_supported_gpus()
attached_devices = []
cgroup_devices = '/sys/fs/cgroup/devices/pbspro.service/jobid/{}/devices.list'.format(jobid)
try:
with open(cgroup_devices) as f:
for line in f.readlines():
fields = whilespaceSepRE.split(line)
if len(fields) >= 3 and fields[1] in gpu_devices:
attached_devices.append(gpu_devices[fields[1]])
except IOError:
print("can not open {}".format(cgroup_devices))
pass
return attached_devices
def start_collection(jobid):
attached_devices = get_attached_gpus(jobid)
attached_devices_s = ','.join([str(d) for d in attached_devices])
groupId = -1
if len(attached_devices) >= 1:
proc = subprocess.Popen(['dcgmi', 'group', '-c', jobid, '-a', attached_devices_s], stdout=subprocess.PIPE)
proc.wait()
output = proc.communicate()[0].decode().split('\n')
if len(output) > 0:
m = dcgmCreateGroupRE.match(output[0])
if m:
groupId = int(m.group(1))
with open("/tmp/dcgm-group-{}".format(jobid), "w+") as f:
f.write(str(groupId))
if groupId != -1:
subprocess.call(['dcgmi', 'stats', '-g', str(groupId), '-e'])
subprocess.call(['dcgmi', 'stats', '-g', str(groupId), '-s', jobid])
def stop_collection(jobid):
subprocess.call(['dcgmi', 'stats', '-x', jobid])
subprocess.call(['dcgmi', 'stats', '-v', '-j', jobid])
with open("/tmp/dcgm-group-{}".format(jobid), "r") as f:
groupId = f.readline()
subprocess.call(['dcgmi', 'group', '-d', groupId])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="script to collect a job's gpu statistics")
parser.add_argument("jobid")
parser.add_argument("user")
parser.add_argument("group")
args = parser.parse_args()
#start_collection(args.jobid, args.user, args.group)
attached_gpus = get_attached_gpus(args.jobid)
print(attached_gpus)
start_collection(args.jobid)
import time
time.sleep(10)
stop_collection(args.jobid)
|
#!/usr/bin/python2
import sys
import argparse
import subprocess
import glob
import re
import os
gpuDeviceRE = re.compile(r'/dev/nvidia\d+')
whilespaceSepRE = re.compile(r'[,\s+]')
dcgmCreateGroupRE = re.compile(r'Successfully.+\s+(\d+)$')
def get_all_supported_gpus():
devices = {}
nvidia_devices = glob.glob("/dev/nvidia*")
for dev in nvidia_devices:
if gpuDeviceRE.match(dev):
proc = subprocess.Popen(['ls', '-al', dev], stdout=subprocess.PIPE)
proc.wait()
output = proc.communicate()[0].decode().rstrip()
fields = whilespaceSepRE.split(output)
devices[fields[4]+':'+fields[6]] = int(fields[6])
return devices
def get_attached_gpus(jobid, gpu_devices=None):
if gpu_devices is None:
gpu_devices = get_all_supported_gpus()
attached_devices = []
cgroup_devices = '/sys/fs/cgroup/devices/pbspro.service/jobid/{}/devices.list'.format(jobid)
try:
with open(cgroup_devices) as f:
for line in f.readlines():
fields = whilespaceSepRE.split(line)
if len(fields) >= 3 and fields[1] in gpu_devices:
attached_devices.append(gpu_devices[fields[1]])
except IOError:
print("can not open {}".format(cgroup_devices))
pass
return attached_devices
def start_collection(jobid):
attached_devices = get_attached_gpus(jobid)
attached_devices_s = ','.join([str(d) for d in attached_devices])
groupId = -1
if len(attached_devices) >= 1:
proc = subprocess.Popen(['dcgmi', 'group', '-c', jobid, '-a', attached_devices_s], stdout=subprocess.PIPE)
proc.wait()
output = proc.communicate()[0].decode().split('\n')
if len(output) > 0:
m = dcgmCreateGroupRE.match(output[0])
if m:
groupId = int(m.group(1))
with open("/tmp/dcgm-group-{}".format(jobid), "w+") as f:
f.write(str(groupId))
if groupId != -1:
subprocess.call(['dcgmi', 'stats', '-g', str(groupId), '-e'])
subprocess.call(['dcgmi', 'stats', '-g', str(groupId), '-s', jobid])
def stop_collection(jobid):
subprocess.call(['dcgmi', 'stats', '-x', jobid])
subprocess.call(['dcgmi', 'stats', '-v', '-j', jobid])
with open("/tmp/dcgm-group-{}".format(jobid), "r") as f:
groupId = f.readline()
subprocess.call(['dcgmi', 'group', '-d', groupId])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="script to collect a job's gpu statistics")
parser.add_argument("jobid")
parser.add_argument("user")
parser.add_argument("group")
args = parser.parse_args()
#start_collection(args.jobid, args.user, args.group)
attached_gpus = get_attached_gpus(args.jobid)
print(attached_gpus)
start_collection(args.jobid)
import time
time.sleep(10)
stop_collection(args.jobid)
|
en
| 0.23361
|
#!/usr/bin/python2 #start_collection(args.jobid, args.user, args.group)
| 2.513458
| 3
|
kayla/log_helpers/log_http_handler.py
|
ducminhgd/kayla-project
| 0
|
6628898
|
"""
HTTP Handlers
"""
from logging.handlers import HTTPHandler
import requests
class LogHttpHandler(HTTPHandler):
"""
Simple HTTP Handler
"""
def __init__(self, logPath, host, url, method, protocol='http'):
"""
Constructor
:param logPath: log path on HTTP server
:param host: Host name or IP
:param url: URL of web services
:param method: HTTP method
:param protocol: HTTP or HTTPS
"""
HTTPHandler.__init__(self, host, url, method)
self.logPath = logPath
self.session = requests.Session()
self.protocol = protocol
def mapLogRecord(self, record):
"""
Map log record as required format of HTTP/HTTPS server
:param record:
:return:
"""
record_modified = HTTPHandler.mapLogRecord(self, record)
record_modified['logPath'] = self.logPath
record_modified['msg'] = record_modified['msg'].encode('utf-8')
return record_modified
def emit(self, record):
"""
Emit log
:param record:
:return:
"""
try:
host = self.host
url = self.url
url = self.protocol + '://' + host + '/' + url
data = self.mapLogRecord(record)
self.session.post(url, data=data, timeout=10)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
|
"""
HTTP Handlers
"""
from logging.handlers import HTTPHandler
import requests
class LogHttpHandler(HTTPHandler):
"""
Simple HTTP Handler
"""
def __init__(self, logPath, host, url, method, protocol='http'):
"""
Constructor
:param logPath: log path on HTTP server
:param host: Host name or IP
:param url: URL of web services
:param method: HTTP method
:param protocol: HTTP or HTTPS
"""
HTTPHandler.__init__(self, host, url, method)
self.logPath = logPath
self.session = requests.Session()
self.protocol = protocol
def mapLogRecord(self, record):
"""
Map log record as required format of HTTP/HTTPS server
:param record:
:return:
"""
record_modified = HTTPHandler.mapLogRecord(self, record)
record_modified['logPath'] = self.logPath
record_modified['msg'] = record_modified['msg'].encode('utf-8')
return record_modified
def emit(self, record):
"""
Emit log
:param record:
:return:
"""
try:
host = self.host
url = self.url
url = self.protocol + '://' + host + '/' + url
data = self.mapLogRecord(record)
self.session.post(url, data=data, timeout=10)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
|
en
| 0.519939
|
HTTP Handlers Simple HTTP Handler Constructor :param logPath: log path on HTTP server :param host: Host name or IP :param url: URL of web services :param method: HTTP method :param protocol: HTTP or HTTPS Map log record as required format of HTTP/HTTPS server :param record: :return: Emit log :param record: :return:
| 3.330537
| 3
|
server/server.py
|
aless80/Tantrix
| 3
|
6628899
|
<reponame>aless80/Tantrix
import sys
#sys.path.insert(0, '/home/kinkyboy/tantrix/PodSixNet')
sys.path.append('../PodSixNet')
#sys.path.append('./PodSixNet')
#import PodSixNet.Server and PodSixNet.Channel
from PodSixNet.Channel import Channel
from PodSixNet.Server import Server
from time import time, sleep #Ale 09/2017
import random
class ClientChannel(Channel, object):
"""Receive messages from client.
NB: self._server refers to tantrixServer ie the instance of TantrixServer"""
def Network_serverListener(self, data):
command = data.pop('command')
data.pop('action')
print("\nReceiving for " + command + ":\n " + str(data))
method = getattr(self, command)
method(data)
def pingserver(self, data): # ALE
"""Print the remaining connections"""
#print("\n" + str(self._server.allConnections))
#self._server.sendUpdateTreeview()
# Ale - also added stuff in last class
self._server.retrieveLastContact(data['sender'])
self._server.updateLastContact(data['sender'])
# print("\n" + str(alive))
def chat(self, data):
msgList = data['msgList']
sendername = self._server.allConnections.getNameFromAddr(data['sender'])
msgList[0] = sendername + ": " + msgList[0]
self._server.sendChatToWRoom(msgList)
def solitaire(self, data):
"""Mark players who are going solitaire in allConnections"""
for ind in range(self._server.allConnections.count()):
if not self._server.allConnections.addr[ind] == data['sender']:
continue
else:
self._server.allConnections.ready[ind] = -2
self._server.allConnections.ready[ind] = -2
"""Send the updated connection"""
self._server.sendUpdateTreeview()
"""Send message to wroom that players have started a game so that they update the logbox"""
player1 = self._server.allConnections.name[ind]
self._server.sendGameStarted(player1, 'Solitaire')
def toggleReady(self, data):
addr = data["sender"]
#print("\nReceiving in server.ClientChannel.Network_toggleReady() from player {}:\n {}".format(str(addr), str(data)))
ready = self._server.allConnections.toggleReadyFromAddr(addr)
ind_game = self._server.checkConnections()
"""Send the updated connection"""
self._server.sendUpdateTreeview()
#"""Print the remaining connections"""
#print("\n" + str(self._server.allConnections))
"""Send message to wroom that player has toggled ready so that they update the logbox"""
player = self._server.allConnections.getNameFromAddr(addr)
self._server.sendPlayerToggledReady(player, ready)
"""Send to wroom that players have started a game"""
if ind_game:
player1 = self._server.allConnections.name[ind_game[0]]
player2 = self._server.allConnections.name[ind_game[0]]
self._server.sendGameStarted(player1, 'Game', player2 = player2)
def confirm(self, data):
#deconsolidate all of the data from the dictionary
#rowcolnum = data["rowcolnum"] not used
#player number (1 or 0)
sender = data["sender"]
#tells server to place line
data['action'] = "clientListener"
data['command'] = "playConfirmedMove"
self._server.placeMove(data, sender)
def name(self, data):
"""Name changed"""
sender = data["sender"]
newname = data["newname"]
self._server.updateName(sender, newname)
def color(self, data):
"""Color changed"""
sender = data["sender"]
newcolor = data["newcolor"]
self._server.updateColor(sender, newcolor)
def quit(self, data):
"""One player has quit"""
quitter = data['sender']
self._server.processQuitter(quitter) #ALE sept 2017
'''#ind = self._server.allConnections.getIndexFromAddr(quitter)
#gametype = self._server.allConnections.ready[ind]
"""Tell other players that one has quit. Must do it inside TantrixServer"""
self._server.tellAllAboutQuitter(quitter)
"""Delete the quitter from allConnections"""
self._server.allConnections.removeConnection(quitter)
self._server.sendUpdateTreeview()
#"""Send message to wroom that one player has quit a game so that they update the logbox"""
#self._server.sendGameQuit(quitter, gametype)
'''
class TantrixServer(Server, object):
"""Send message to clients"""
channelClass = ClientChannel #needed!
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
self.gameIndex = 0
self.allConnections = WaitingConnections()
def sendChatToWRoom(self, msgList):
data = {"action": "clientListener", "command": "receiveChat", "msgList": msgList}
self.sendToAllWRoom(data)
def sendGameStarted(self, player1, gametype, player2 = None):
data = {"action": "clientListener", "command": "hasstartedgame", "player1": player1, "gametype": gametype, 'player2': player2 }
self.sendToAllWRoom(data)
"""def sendGameQuit(self, quitter, gametype):
data = {"action": "clientListener", "command": "hasstartedgame", "player1": player1, "gametype": gametype, 'player2': player2 }
self.sendToAllWRoom(data)\
(quitter, gametype)
"""
def sendPlayerToggledReady(self, player, ready):
data = {"action": "clientListener", "command": "hastoggledready", "player": player, "ready": ready}
self.sendToAllWRoom(data)
def sendUpdateTreeview(self):
listVal = self.allConnections.getAsList()
data = {"action": "clientListener", "command": "updateTreeview", "listVal": listVal}
self.sendToAllWRoom(data)
def checkConnections(self):
"""Check if there are 2 connection ready. In that case start the games"""
#print("\n" + str(self.allConnections))
"""Check if at least two players are ready"""
players_ready = 0
ind_game = []
for ind in range(self.allConnections.count()):
if self.allConnections.ready[ind] == 1:
players_ready += 1
#tempind = ind
ind_game.append(ind)
#TODO: currently the first two players who are ready will start the game. add a confirmation popup?
if players_ready < 2:
return False
else:
self.sendStartgame(ind_game)
return ind_game
def sendStartgame(self, ind_game):
"""Initialize a game with two players"""
self.gameIndex += 1
game = Game(self.allConnections.players[ind_game[0]], self.gameIndex)
"""Add all players to game"""
game.addPlayer(self.allConnections.players[ind_game[1]])
"""Start the game. Add game to both connections (self.allConnections.game), set ready = -1"""
for ind in ind_game:
self.allConnections.addGame(game, self.allConnections.addr[ind])
self.allConnections.ready[ind] = -1
self.doSendStartingGame(ind_game)
def Connected(self, player, addr):
"""self.game contains the array .players"""
print("\nReceiving a new connection: \nchannel = {},address = {}".format(player, addr))
"""Create or edit a game""" #TODO move this once players in wroom confirm each other
if not self.allConnections.game:
self.gameIndex += 1
#name = "Player" + str(addr[1])
name = "Player" + str(self.allConnections.count() + 1)
colors = ["red", "blue", "yellow", "green"]
color = colors.pop(self.allConnections.count() % 4)
self.allConnections.addConnection(player, addr, 0, name = name, color = color)
"""Send confirmation that client has connected. send back the client's address"""
data = {"action": "clientListener", "command": "clientIsConnected", "addr": addr, "color": color, "yourname": name}
self.sendToPlayer(player, data)
"""Send an update to Treeview"""
self.sendUpdateTreeview()
def sendToAllWRoom(self, data):
"""Send to all players that are in wroom, ie are not playing"""
[self.sendToPlayer(self.allConnections.players[ind], data) for ind in range(self.allConnections.count()) if self.allConnections.ready[ind] >= 0]
def sendToPlayer(self, player, data):
player.Send(data)
"""Print to terminal"""
datacp = data.copy() #so that I can edit it
name = self.allConnections.getNameFromPlayer(player)
datacp.pop('action')
command = datacp.pop('command')
print("\nSent to " + name + " for " + command + ": " + str(datacp))
#TODO merge with clientIsConnected?
def doSendStartingGame(self, ind_game):
playernames = [self.allConnections.name[j] for j in ind_game]
playercolors = [self.allConnections.color[j] for j in ind_game]
rndgen = random.Random()
seed = rndgen.randint(0,1000)
for i, ind in enumerate(ind_game):
"""Get the opponent's name"""
playernamescp = list(playernames)
playercolorscp = list(playercolors)
playernamescp.pop(i) #because there are two players
playercolor = playercolorscp.pop(i) #because there are two players
opponentname = playernamescp[0]
opponentcolor = playercolorscp[0]
"""Send stargame"""
data = {"action": "clientListener", "command": "startgame", "player_num": i+1,
"gameid": self.allConnections.game[ind].gameid, "changecolor": False,
"opponentname": opponentname, "opponentcolor": opponentcolor, "playerIsTabUp": i==0, "seed": seed}
if playercolor == opponentcolor:
data['changecolor'] = True
self.allConnections.game[ind] = None
self.sendToPlayer(self.allConnections.players[ind], data)
def placeMove(self, data, sender):
"""A move has been confirmed. Store it"""
game = self.allConnections.getGameFromAddr(sender)
#data['turnUpDown'] = data['turnUpDown'] + 1
game.placeLine(data, sender)
def processQuitter(self, quitter):
"""Server has received signal of a quitting client"""
"""Tell other players that one has quit. Must do it inside TantrixServer"""
self.tellAllAboutQuitter(quitter)
"""Delete the quitter from allConnections"""
self.allConnections.removeConnection(quitter)
self.sendUpdateTreeview()
# """Send message to wroom that one player has quit a game so that they update the logbox"""
# self._server.sendGameQuit(quitter, gametype)
def tellAllAboutQuitter(self, quitter):
#quitter = data["sender"]
ind = self.allConnections.addr.index(quitter)
dataAll = {"action": "clientListener", "command": "hasquit", "quitter": quitter,
"quitterName": self.allConnections.name[ind]}
for i in range(self.allConnections.count()):
if i != ind and self.allConnections.game[i] == self.allConnections.game[ind]:
p = self.allConnections.players[i]
a = self.allConnections.addr[i]
n = self.allConnections.name[i]
print("\nSending to client {}:\n {}".format(n, str(dataAll)))
p.Send(dataAll)
def updateName(self, sender, newname):
"""Edit name stored in allConnection"""
index = self.allConnections.getIndexFromAddr(sender)
"""Check that name is valid"""
def validName(newname):
"""Check that name has an allowed format"""
"""Check that name is not already taken"""
if newname in [conn for conn in self.allConnections.name]:
return False
"""Check that newname begins with non-numeric character"""
import re
if re.match('^[a-zA-Z]+', newname) is None:
return False
return True
"""Send to clients new name if valid or old name"""
if validName(newname):
data = {"action": "clientListener", "command": "newname", "name": newname}
self.sendToPlayer(self.allConnections.players[index], data)
self.allConnections.name[index] = newname
"""Send update to all in Waiting Room"""
self.sendUpdateTreeview()
else:
name = self.allConnections.getNameFromAddr(sender)
data = {"action": "clientListener", "command": "newname", "name": name}
self.sendToPlayer(self.allConnections.players[index], data)
def updateColor(self, sender, newcolor):
"""Edit color stored in allConnection"""
index = self.allConnections.getIndexFromAddr(sender)
self.allConnections.color[index] = newcolor
"""Send update to all in Waiting Room"""
self.sendUpdateTreeview()
def updateLastContact(self, sender): #ALE sept 2017
"""Client regularly pings server. When that happens the server updates the
time of last contact with the client"""
t = self.allConnections.setLastContact(sender)
#print("\nTantrixServer.updateLastContact for %s at %f" % (str(sender),time))
def retrieveLastContact(self, sender): #ALE sept 2017
"""Get the time of last contact with a client"""
lastContact = self.allConnections.getLastContact(sender)
print("\nretrieveLastContact for %s: %f" % (str(sender),lastContact))
def ping(self, index): #ALE sept 2017
"""Ping a client. The client should respond by pinging back so that connection is established"""
data = {"action": "clientListener", "command": "pingclient"}
self.sendToPlayer(self.allConnections.players[index], data)
def checkContacts(self): #ALE sept 2017
"""Server regulary checks that clients are connected."""
#print("\nserver TantrixServer.checkContacts")
t = time()
last = self.allConnections.lastContact
for ind,lc in enumerate(last):
if t - lc > 25:
"""If a client is lost, delete it from the list of connections and notify all other clients"""
print('TantrixServer.checkContacts: deleting client with index %i after %i seconds of no connection' % (ind, int(t - lc)))
quitter = self.allConnections.addr[ind]
self.processQuitter(quitter)
elif t - lc > 10:
print('Lost contact for %i seconds with index %i' % (int(t - lc), ind))
elif t - lc > 4:
print('Ping client with index %i' % ind)
self.ping(ind)
class Game(object):
def __init__(self, player, gameIndex):
# whose turnUpDown (1 or 0)
self.turn = 1
#Storage
self._confirmedgame = []
#initialize the players including the one who started the game
self.players = []
self.addPlayer(player)
#gameid of game
self.gameid = gameIndex
def __str__(self):
string= str(self.gameid)
return string
def addPlayer(self, player):
if player is not None and player not in self.players:
self.players.append(player)
else:
print("Game.addPlayer failed: player is None or was already added")
def placeLine(self, data, sender):
"""A move has been confirmed"""
turnUpDown = data['turnUpDown']
if self.turn is not turnUpDown:
print(" \n\n>>>>>>>>>>placeLine: self.turn is not data['turnUpDown']: {}~={}".format(self.turn, turnUpDown))
self.turn += 1
data['turnUpDown'] =self.turn
print(" \n\n>>>>>>>>self.turnUpDown="+str(self.turn) + "\n\n")
####
if 1 or sender == self.turn + 1: #TODO
self.turn = 0 if self.turn else 1
#place line in game
#?? NEEDED? self._confirmedgame.append(rowcolnum)
#send data and turnUpDown to the opponent
#TODO mv everythiong to TantrixServer
opponents = tantrixServer.allConnections.getOpponentsFromAddress(sender)
for o in opponents:
print("\nSending to other player:\n " + str(data))
o.Send(data)
class WaitingConnections(object):
def __init__(self):
"""Initialize the players"""
self.players = []
self.addr = []
self.game = []
self.ready = []
self.name = []
self.color = []
self.lastContact = [] #ALE sept 2017
def getAsList(self):
"""Return the connections as list for Treeview in wroom eg:
[('Alessandro', 0, 43932, None, 'red'),('Mararie', -1, 2, 1, 'yellow'), ..] """
return [list([self.name[ind], self.ready[ind], self.addr[ind][1], str(self.game[ind]), self.color[ind]]) for ind in range(self.count())]
def addConnection(self, player, addr, ready = 0, game = None, name = "unknown", color = "cyan"):
self.players.append(player)
self.addr.append(addr)
self.ready.append(ready)
self.game.append(game)
self.name.append(name)
self.color.append(color)
self.lastContact.append(time())
def addGame(self, game, addr):
ind = self.addr.index(addr)
self.game[ind] = game
def removeConnection(self, addr):
ind = self.addr.index(addr)
self.addr.pop(ind)
self.players.pop(ind)
self.game.pop(ind)
self.ready.pop(ind)
self.name.pop(ind)
self.color.pop(ind)
self.lastContact.pop(ind) #ALE sept 2017
def count(self):
return len(self.players)
def getIndexFromAddr(self, addr):
return self.addr.index(addr)
def getNameFromAddr(self, addr):
ind = self.getIndexFromAddr(addr)
return self.name[ind]
def getGameFromPlayer(self, player):
ind = self.players.index(player)
return self.game[ind]
def getNameFromPlayer(self, player):
ind = self.players.index(player)
return self.name[ind]
def getGameFromAddr(self, addr):
ind = self.addr.index(addr)
return self.game[ind]
def getPlayerFromAddr(self, addr):
ind = self.addr.index(addr)
return self.players[ind]
def getColorFromAddr(self, addr):
ind = self.addr.index(addr)
return self.color[ind]
def getOpponentsFromAddress(self, addr):
"""Given a player, return a list of players in the game"""
game = self.getGameFromAddr(addr)
ind_sender = self.getIndexFromAddr(addr)
opponents = []
for ind in range(self.count()):
if ind != ind_sender and self.game[ind] == game:
opponents.append(self.players[ind])
return opponents
#return [x for i, x in enumerate(self.players) if x == player and self.addr[i] is not addr]
def getLastContact(self, addr): # ALE sept 2017
ind = self.addr.index(addr)
return self.lastContact[ind]
def setLastContact(self, addr): # ALE sept 2017
ind = self.addr.index(addr)
t = time()
self.lastContact[ind] = t
return t
def toggleReadyFromAddr(self, addr):
"""Toggle ready flag for a certain address. return the 'ready' status"""
try:
ind = self.addr.index(addr)
except:
import inspect
print("Unexpected error at :", inspect.stack()[0][3])
print("addr="+ str(addr) + " is not contained in self.addr="+ str(self.addr))
raise
self.ready[ind] = (self.ready[ind] + 1) %2
return self.ready[ind]
def __str__(self):
string = "Connections:\n<======================"
string += "\nname, ready, addr, players, game, last contact:\n"
for ind in range(self.count()):
string += "{}, {}, {}, {}, {}\n".format(
str(self.name[ind]),
str(self.ready[ind]),
str(self.addr[ind]),
str(self.players[ind]),
str(self.game[ind]),
self.color[ind],
self.lastContact[ind])
string += "======================>\n"
return string
def launch():
"""Get command line argument of server, port"""
if len(sys.argv) != 2:
print("Usage:", sys.argv[0], "host:port")
print(" e.g.", sys.argv[0], "localhost:31425")
#Launch anyway
host = "localhost"
port = 31425
print("Launcing with host, port = %s , %d" % (host, port))
else:
host, port = sys.argv[1].rsplit(":",1)
print("STARTING SERVER ON LOCALHOST")
try:
tantrixServer = TantrixServer(localaddr=(host, int(port))) #'localhost', 1337
except:
print("Cannot start server. Address '" + host + "' already in use on port " + str(port))
#raise
return
while True:
t = time()
if len(tantrixServer.allConnections.players) and not (int(t * 100) % 400): #ALE sept 2017
tantrixServer.checkContacts()
tantrixServer.Pump()
sleep(0.01)
if __name__ == "__main__":
launch()
|
import sys
#sys.path.insert(0, '/home/kinkyboy/tantrix/PodSixNet')
sys.path.append('../PodSixNet')
#sys.path.append('./PodSixNet')
#import PodSixNet.Server and PodSixNet.Channel
from PodSixNet.Channel import Channel
from PodSixNet.Server import Server
from time import time, sleep #Ale 09/2017
import random
class ClientChannel(Channel, object):
"""Receive messages from client.
NB: self._server refers to tantrixServer ie the instance of TantrixServer"""
def Network_serverListener(self, data):
command = data.pop('command')
data.pop('action')
print("\nReceiving for " + command + ":\n " + str(data))
method = getattr(self, command)
method(data)
def pingserver(self, data): # ALE
"""Print the remaining connections"""
#print("\n" + str(self._server.allConnections))
#self._server.sendUpdateTreeview()
# Ale - also added stuff in last class
self._server.retrieveLastContact(data['sender'])
self._server.updateLastContact(data['sender'])
# print("\n" + str(alive))
def chat(self, data):
msgList = data['msgList']
sendername = self._server.allConnections.getNameFromAddr(data['sender'])
msgList[0] = sendername + ": " + msgList[0]
self._server.sendChatToWRoom(msgList)
def solitaire(self, data):
"""Mark players who are going solitaire in allConnections"""
for ind in range(self._server.allConnections.count()):
if not self._server.allConnections.addr[ind] == data['sender']:
continue
else:
self._server.allConnections.ready[ind] = -2
self._server.allConnections.ready[ind] = -2
"""Send the updated connection"""
self._server.sendUpdateTreeview()
"""Send message to wroom that players have started a game so that they update the logbox"""
player1 = self._server.allConnections.name[ind]
self._server.sendGameStarted(player1, 'Solitaire')
def toggleReady(self, data):
addr = data["sender"]
#print("\nReceiving in server.ClientChannel.Network_toggleReady() from player {}:\n {}".format(str(addr), str(data)))
ready = self._server.allConnections.toggleReadyFromAddr(addr)
ind_game = self._server.checkConnections()
"""Send the updated connection"""
self._server.sendUpdateTreeview()
#"""Print the remaining connections"""
#print("\n" + str(self._server.allConnections))
"""Send message to wroom that player has toggled ready so that they update the logbox"""
player = self._server.allConnections.getNameFromAddr(addr)
self._server.sendPlayerToggledReady(player, ready)
"""Send to wroom that players have started a game"""
if ind_game:
player1 = self._server.allConnections.name[ind_game[0]]
player2 = self._server.allConnections.name[ind_game[0]]
self._server.sendGameStarted(player1, 'Game', player2 = player2)
def confirm(self, data):
#deconsolidate all of the data from the dictionary
#rowcolnum = data["rowcolnum"] not used
#player number (1 or 0)
sender = data["sender"]
#tells server to place line
data['action'] = "clientListener"
data['command'] = "playConfirmedMove"
self._server.placeMove(data, sender)
def name(self, data):
"""Name changed"""
sender = data["sender"]
newname = data["newname"]
self._server.updateName(sender, newname)
def color(self, data):
"""Color changed"""
sender = data["sender"]
newcolor = data["newcolor"]
self._server.updateColor(sender, newcolor)
def quit(self, data):
"""One player has quit"""
quitter = data['sender']
self._server.processQuitter(quitter) #ALE sept 2017
'''#ind = self._server.allConnections.getIndexFromAddr(quitter)
#gametype = self._server.allConnections.ready[ind]
"""Tell other players that one has quit. Must do it inside TantrixServer"""
self._server.tellAllAboutQuitter(quitter)
"""Delete the quitter from allConnections"""
self._server.allConnections.removeConnection(quitter)
self._server.sendUpdateTreeview()
#"""Send message to wroom that one player has quit a game so that they update the logbox"""
#self._server.sendGameQuit(quitter, gametype)
'''
class TantrixServer(Server, object):
"""Send message to clients"""
channelClass = ClientChannel #needed!
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
self.gameIndex = 0
self.allConnections = WaitingConnections()
def sendChatToWRoom(self, msgList):
data = {"action": "clientListener", "command": "receiveChat", "msgList": msgList}
self.sendToAllWRoom(data)
def sendGameStarted(self, player1, gametype, player2 = None):
data = {"action": "clientListener", "command": "hasstartedgame", "player1": player1, "gametype": gametype, 'player2': player2 }
self.sendToAllWRoom(data)
"""def sendGameQuit(self, quitter, gametype):
data = {"action": "clientListener", "command": "hasstartedgame", "player1": player1, "gametype": gametype, 'player2': player2 }
self.sendToAllWRoom(data)\
(quitter, gametype)
"""
def sendPlayerToggledReady(self, player, ready):
data = {"action": "clientListener", "command": "hastoggledready", "player": player, "ready": ready}
self.sendToAllWRoom(data)
def sendUpdateTreeview(self):
listVal = self.allConnections.getAsList()
data = {"action": "clientListener", "command": "updateTreeview", "listVal": listVal}
self.sendToAllWRoom(data)
def checkConnections(self):
"""Check if there are 2 connection ready. In that case start the games"""
#print("\n" + str(self.allConnections))
"""Check if at least two players are ready"""
players_ready = 0
ind_game = []
for ind in range(self.allConnections.count()):
if self.allConnections.ready[ind] == 1:
players_ready += 1
#tempind = ind
ind_game.append(ind)
#TODO: currently the first two players who are ready will start the game. add a confirmation popup?
if players_ready < 2:
return False
else:
self.sendStartgame(ind_game)
return ind_game
def sendStartgame(self, ind_game):
"""Initialize a game with two players"""
self.gameIndex += 1
game = Game(self.allConnections.players[ind_game[0]], self.gameIndex)
"""Add all players to game"""
game.addPlayer(self.allConnections.players[ind_game[1]])
"""Start the game. Add game to both connections (self.allConnections.game), set ready = -1"""
for ind in ind_game:
self.allConnections.addGame(game, self.allConnections.addr[ind])
self.allConnections.ready[ind] = -1
self.doSendStartingGame(ind_game)
def Connected(self, player, addr):
"""self.game contains the array .players"""
print("\nReceiving a new connection: \nchannel = {},address = {}".format(player, addr))
"""Create or edit a game""" #TODO move this once players in wroom confirm each other
if not self.allConnections.game:
self.gameIndex += 1
#name = "Player" + str(addr[1])
name = "Player" + str(self.allConnections.count() + 1)
colors = ["red", "blue", "yellow", "green"]
color = colors.pop(self.allConnections.count() % 4)
self.allConnections.addConnection(player, addr, 0, name = name, color = color)
"""Send confirmation that client has connected. send back the client's address"""
data = {"action": "clientListener", "command": "clientIsConnected", "addr": addr, "color": color, "yourname": name}
self.sendToPlayer(player, data)
"""Send an update to Treeview"""
self.sendUpdateTreeview()
def sendToAllWRoom(self, data):
"""Send to all players that are in wroom, ie are not playing"""
[self.sendToPlayer(self.allConnections.players[ind], data) for ind in range(self.allConnections.count()) if self.allConnections.ready[ind] >= 0]
def sendToPlayer(self, player, data):
player.Send(data)
"""Print to terminal"""
datacp = data.copy() #so that I can edit it
name = self.allConnections.getNameFromPlayer(player)
datacp.pop('action')
command = datacp.pop('command')
print("\nSent to " + name + " for " + command + ": " + str(datacp))
#TODO merge with clientIsConnected?
def doSendStartingGame(self, ind_game):
playernames = [self.allConnections.name[j] for j in ind_game]
playercolors = [self.allConnections.color[j] for j in ind_game]
rndgen = random.Random()
seed = rndgen.randint(0,1000)
for i, ind in enumerate(ind_game):
"""Get the opponent's name"""
playernamescp = list(playernames)
playercolorscp = list(playercolors)
playernamescp.pop(i) #because there are two players
playercolor = playercolorscp.pop(i) #because there are two players
opponentname = playernamescp[0]
opponentcolor = playercolorscp[0]
"""Send stargame"""
data = {"action": "clientListener", "command": "startgame", "player_num": i+1,
"gameid": self.allConnections.game[ind].gameid, "changecolor": False,
"opponentname": opponentname, "opponentcolor": opponentcolor, "playerIsTabUp": i==0, "seed": seed}
if playercolor == opponentcolor:
data['changecolor'] = True
self.allConnections.game[ind] = None
self.sendToPlayer(self.allConnections.players[ind], data)
def placeMove(self, data, sender):
"""A move has been confirmed. Store it"""
game = self.allConnections.getGameFromAddr(sender)
#data['turnUpDown'] = data['turnUpDown'] + 1
game.placeLine(data, sender)
def processQuitter(self, quitter):
"""Server has received signal of a quitting client"""
"""Tell other players that one has quit. Must do it inside TantrixServer"""
self.tellAllAboutQuitter(quitter)
"""Delete the quitter from allConnections"""
self.allConnections.removeConnection(quitter)
self.sendUpdateTreeview()
# """Send message to wroom that one player has quit a game so that they update the logbox"""
# self._server.sendGameQuit(quitter, gametype)
def tellAllAboutQuitter(self, quitter):
#quitter = data["sender"]
ind = self.allConnections.addr.index(quitter)
dataAll = {"action": "clientListener", "command": "hasquit", "quitter": quitter,
"quitterName": self.allConnections.name[ind]}
for i in range(self.allConnections.count()):
if i != ind and self.allConnections.game[i] == self.allConnections.game[ind]:
p = self.allConnections.players[i]
a = self.allConnections.addr[i]
n = self.allConnections.name[i]
print("\nSending to client {}:\n {}".format(n, str(dataAll)))
p.Send(dataAll)
def updateName(self, sender, newname):
"""Edit name stored in allConnection"""
index = self.allConnections.getIndexFromAddr(sender)
"""Check that name is valid"""
def validName(newname):
"""Check that name has an allowed format"""
"""Check that name is not already taken"""
if newname in [conn for conn in self.allConnections.name]:
return False
"""Check that newname begins with non-numeric character"""
import re
if re.match('^[a-zA-Z]+', newname) is None:
return False
return True
"""Send to clients new name if valid or old name"""
if validName(newname):
data = {"action": "clientListener", "command": "newname", "name": newname}
self.sendToPlayer(self.allConnections.players[index], data)
self.allConnections.name[index] = newname
"""Send update to all in Waiting Room"""
self.sendUpdateTreeview()
else:
name = self.allConnections.getNameFromAddr(sender)
data = {"action": "clientListener", "command": "newname", "name": name}
self.sendToPlayer(self.allConnections.players[index], data)
def updateColor(self, sender, newcolor):
"""Edit color stored in allConnection"""
index = self.allConnections.getIndexFromAddr(sender)
self.allConnections.color[index] = newcolor
"""Send update to all in Waiting Room"""
self.sendUpdateTreeview()
def updateLastContact(self, sender): #ALE sept 2017
"""Client regularly pings server. When that happens the server updates the
time of last contact with the client"""
t = self.allConnections.setLastContact(sender)
#print("\nTantrixServer.updateLastContact for %s at %f" % (str(sender),time))
def retrieveLastContact(self, sender): #ALE sept 2017
"""Get the time of last contact with a client"""
lastContact = self.allConnections.getLastContact(sender)
print("\nretrieveLastContact for %s: %f" % (str(sender),lastContact))
def ping(self, index): #ALE sept 2017
"""Ping a client. The client should respond by pinging back so that connection is established"""
data = {"action": "clientListener", "command": "pingclient"}
self.sendToPlayer(self.allConnections.players[index], data)
def checkContacts(self): #ALE sept 2017
"""Server regulary checks that clients are connected."""
#print("\nserver TantrixServer.checkContacts")
t = time()
last = self.allConnections.lastContact
for ind,lc in enumerate(last):
if t - lc > 25:
"""If a client is lost, delete it from the list of connections and notify all other clients"""
print('TantrixServer.checkContacts: deleting client with index %i after %i seconds of no connection' % (ind, int(t - lc)))
quitter = self.allConnections.addr[ind]
self.processQuitter(quitter)
elif t - lc > 10:
print('Lost contact for %i seconds with index %i' % (int(t - lc), ind))
elif t - lc > 4:
print('Ping client with index %i' % ind)
self.ping(ind)
class Game(object):
def __init__(self, player, gameIndex):
# whose turnUpDown (1 or 0)
self.turn = 1
#Storage
self._confirmedgame = []
#initialize the players including the one who started the game
self.players = []
self.addPlayer(player)
#gameid of game
self.gameid = gameIndex
def __str__(self):
string= str(self.gameid)
return string
def addPlayer(self, player):
if player is not None and player not in self.players:
self.players.append(player)
else:
print("Game.addPlayer failed: player is None or was already added")
def placeLine(self, data, sender):
"""A move has been confirmed"""
turnUpDown = data['turnUpDown']
if self.turn is not turnUpDown:
print(" \n\n>>>>>>>>>>placeLine: self.turn is not data['turnUpDown']: {}~={}".format(self.turn, turnUpDown))
self.turn += 1
data['turnUpDown'] =self.turn
print(" \n\n>>>>>>>>self.turnUpDown="+str(self.turn) + "\n\n")
####
if 1 or sender == self.turn + 1: #TODO
self.turn = 0 if self.turn else 1
#place line in game
#?? NEEDED? self._confirmedgame.append(rowcolnum)
#send data and turnUpDown to the opponent
#TODO mv everythiong to TantrixServer
opponents = tantrixServer.allConnections.getOpponentsFromAddress(sender)
for o in opponents:
print("\nSending to other player:\n " + str(data))
o.Send(data)
class WaitingConnections(object):
def __init__(self):
"""Initialize the players"""
self.players = []
self.addr = []
self.game = []
self.ready = []
self.name = []
self.color = []
self.lastContact = [] #ALE sept 2017
def getAsList(self):
"""Return the connections as list for Treeview in wroom eg:
[('Alessandro', 0, 43932, None, 'red'),('Mararie', -1, 2, 1, 'yellow'), ..] """
return [list([self.name[ind], self.ready[ind], self.addr[ind][1], str(self.game[ind]), self.color[ind]]) for ind in range(self.count())]
def addConnection(self, player, addr, ready = 0, game = None, name = "unknown", color = "cyan"):
self.players.append(player)
self.addr.append(addr)
self.ready.append(ready)
self.game.append(game)
self.name.append(name)
self.color.append(color)
self.lastContact.append(time())
def addGame(self, game, addr):
ind = self.addr.index(addr)
self.game[ind] = game
def removeConnection(self, addr):
ind = self.addr.index(addr)
self.addr.pop(ind)
self.players.pop(ind)
self.game.pop(ind)
self.ready.pop(ind)
self.name.pop(ind)
self.color.pop(ind)
self.lastContact.pop(ind) #ALE sept 2017
def count(self):
return len(self.players)
def getIndexFromAddr(self, addr):
return self.addr.index(addr)
def getNameFromAddr(self, addr):
ind = self.getIndexFromAddr(addr)
return self.name[ind]
def getGameFromPlayer(self, player):
ind = self.players.index(player)
return self.game[ind]
def getNameFromPlayer(self, player):
ind = self.players.index(player)
return self.name[ind]
def getGameFromAddr(self, addr):
ind = self.addr.index(addr)
return self.game[ind]
def getPlayerFromAddr(self, addr):
ind = self.addr.index(addr)
return self.players[ind]
def getColorFromAddr(self, addr):
ind = self.addr.index(addr)
return self.color[ind]
def getOpponentsFromAddress(self, addr):
"""Given a player, return a list of players in the game"""
game = self.getGameFromAddr(addr)
ind_sender = self.getIndexFromAddr(addr)
opponents = []
for ind in range(self.count()):
if ind != ind_sender and self.game[ind] == game:
opponents.append(self.players[ind])
return opponents
#return [x for i, x in enumerate(self.players) if x == player and self.addr[i] is not addr]
def getLastContact(self, addr): # ALE sept 2017
ind = self.addr.index(addr)
return self.lastContact[ind]
def setLastContact(self, addr): # ALE sept 2017
ind = self.addr.index(addr)
t = time()
self.lastContact[ind] = t
return t
def toggleReadyFromAddr(self, addr):
"""Toggle ready flag for a certain address. return the 'ready' status"""
try:
ind = self.addr.index(addr)
except:
import inspect
print("Unexpected error at :", inspect.stack()[0][3])
print("addr="+ str(addr) + " is not contained in self.addr="+ str(self.addr))
raise
self.ready[ind] = (self.ready[ind] + 1) %2
return self.ready[ind]
def __str__(self):
string = "Connections:\n<======================"
string += "\nname, ready, addr, players, game, last contact:\n"
for ind in range(self.count()):
string += "{}, {}, {}, {}, {}\n".format(
str(self.name[ind]),
str(self.ready[ind]),
str(self.addr[ind]),
str(self.players[ind]),
str(self.game[ind]),
self.color[ind],
self.lastContact[ind])
string += "======================>\n"
return string
def launch():
"""Get command line argument of server, port"""
if len(sys.argv) != 2:
print("Usage:", sys.argv[0], "host:port")
print(" e.g.", sys.argv[0], "localhost:31425")
#Launch anyway
host = "localhost"
port = 31425
print("Launcing with host, port = %s , %d" % (host, port))
else:
host, port = sys.argv[1].rsplit(":",1)
print("STARTING SERVER ON LOCALHOST")
try:
tantrixServer = TantrixServer(localaddr=(host, int(port))) #'localhost', 1337
except:
print("Cannot start server. Address '" + host + "' already in use on port " + str(port))
#raise
return
while True:
t = time()
if len(tantrixServer.allConnections.players) and not (int(t * 100) % 400): #ALE sept 2017
tantrixServer.checkContacts()
tantrixServer.Pump()
sleep(0.01)
if __name__ == "__main__":
launch()
|
en
| 0.810003
|
#sys.path.insert(0, '/home/kinkyboy/tantrix/PodSixNet') #sys.path.append('./PodSixNet') #import PodSixNet.Server and PodSixNet.Channel #Ale 09/2017 Receive messages from client. NB: self._server refers to tantrixServer ie the instance of TantrixServer # ALE Print the remaining connections #print("\n" + str(self._server.allConnections)) #self._server.sendUpdateTreeview() # Ale - also added stuff in last class # print("\n" + str(alive)) Mark players who are going solitaire in allConnections Send the updated connection Send message to wroom that players have started a game so that they update the logbox #print("\nReceiving in server.ClientChannel.Network_toggleReady() from player {}:\n {}".format(str(addr), str(data))) Send the updated connection #"""Print the remaining connections""" #print("\n" + str(self._server.allConnections)) Send message to wroom that player has toggled ready so that they update the logbox Send to wroom that players have started a game #deconsolidate all of the data from the dictionary #rowcolnum = data["rowcolnum"] not used #player number (1 or 0) #tells server to place line Name changed Color changed One player has quit #ALE sept 2017 #ind = self._server.allConnections.getIndexFromAddr(quitter) #gametype = self._server.allConnections.ready[ind] """Tell other players that one has quit. Must do it inside TantrixServer""" self._server.tellAllAboutQuitter(quitter) """Delete the quitter from allConnections""" self._server.allConnections.removeConnection(quitter) self._server.sendUpdateTreeview() #"""Send message to wroom that one player has quit a game so that they update the logbox""" #self._server.sendGameQuit(quitter, gametype) Send message to clients #needed! def sendGameQuit(self, quitter, gametype): data = {"action": "clientListener", "command": "hasstartedgame", "player1": player1, "gametype": gametype, 'player2': player2 } self.sendToAllWRoom(data)\ (quitter, gametype) Check if there are 2 connection ready. In that case start the games #print("\n" + str(self.allConnections)) Check if at least two players are ready #tempind = ind #TODO: currently the first two players who are ready will start the game. add a confirmation popup? Initialize a game with two players Add all players to game Start the game. Add game to both connections (self.allConnections.game), set ready = -1 self.game contains the array .players Create or edit a game #TODO move this once players in wroom confirm each other #name = "Player" + str(addr[1]) Send confirmation that client has connected. send back the client's address Send an update to Treeview Send to all players that are in wroom, ie are not playing Print to terminal #so that I can edit it #TODO merge with clientIsConnected? Get the opponent's name #because there are two players #because there are two players Send stargame A move has been confirmed. Store it #data['turnUpDown'] = data['turnUpDown'] + 1 Server has received signal of a quitting client Tell other players that one has quit. Must do it inside TantrixServer Delete the quitter from allConnections # """Send message to wroom that one player has quit a game so that they update the logbox""" # self._server.sendGameQuit(quitter, gametype) #quitter = data["sender"] Edit name stored in allConnection Check that name is valid Check that name has an allowed format Check that name is not already taken Check that newname begins with non-numeric character Send to clients new name if valid or old name Send update to all in Waiting Room Edit color stored in allConnection Send update to all in Waiting Room #ALE sept 2017 Client regularly pings server. When that happens the server updates the time of last contact with the client #print("\nTantrixServer.updateLastContact for %s at %f" % (str(sender),time)) #ALE sept 2017 Get the time of last contact with a client #ALE sept 2017 Ping a client. The client should respond by pinging back so that connection is established #ALE sept 2017 Server regulary checks that clients are connected. #print("\nserver TantrixServer.checkContacts") If a client is lost, delete it from the list of connections and notify all other clients # whose turnUpDown (1 or 0) #Storage #initialize the players including the one who started the game #gameid of game A move has been confirmed #### #TODO #place line in game #?? NEEDED? self._confirmedgame.append(rowcolnum) #send data and turnUpDown to the opponent #TODO mv everythiong to TantrixServer Initialize the players #ALE sept 2017 Return the connections as list for Treeview in wroom eg: [('Alessandro', 0, 43932, None, 'red'),('Mararie', -1, 2, 1, 'yellow'), ..] #ALE sept 2017 Given a player, return a list of players in the game #return [x for i, x in enumerate(self.players) if x == player and self.addr[i] is not addr] # ALE sept 2017 # ALE sept 2017 Toggle ready flag for a certain address. return the 'ready' status Get command line argument of server, port #Launch anyway #'localhost', 1337 #raise #ALE sept 2017
| 2.364068
| 2
|
funnel/views/section.py
|
jace/goafunnel
| 0
|
6628900
|
<reponame>jace/goafunnel<filename>funnel/views/section.py
# -*- coding: utf-8 -*-
from flask import render_template, redirect, request, flash
from coaster.views import load_models, load_model
from baseframe import _
from .. import app, lastuser
from ..models import db, ProposalSpace, ProposalSpaceSection
from ..forms import SectionForm, ConfirmDeleteForm
def section_data(section):
return {
'name': section.name,
'title': section.title,
'description': section.description,
'url': None,
'json_url': None
}
@app.route('/<space>/sections')
@lastuser.requires_login
@load_model(ProposalSpace, {'name': 'space'}, 'space',
permission=('view-section', 'siteadmin'), addlperms=lastuser.permissions)
def section_list(space):
sections = ProposalSpaceSection.query.filter_by(proposal_space=space).all()
return render_template('sections.html', space=space, sections=sections,
breadcrumbs=[
(space.url_for(), space.title),
(space.url_for('sections'), _("Sections"))])
@app.route('/<space>/sections/<section>')
@lastuser.requires_login
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(ProposalSpaceSection, {'name': 'section', 'proposal_space': 'space'}, 'section'),
permission=('view-section', 'siteadmin'), addlperms=lastuser.permissions)
def section_view(space, section):
return render_template('section.html', space=space, section=section,
breadcrumbs=[
(space.url_for(), space.title),
(space.url_for('sections'), _("Sections")),
(section.url_for(), section.title)])
@app.route('/<space>/sections/new', methods=['GET', 'POST'])
@lastuser.requires_login
@load_model(ProposalSpace, {'name': 'space'}, 'space',
permission=('new-section', 'siteadmin'), addlperms=lastuser.permissions)
def section_new(space):
form = SectionForm(model=ProposalSpaceSection, parent=space)
if form.validate_on_submit():
section = ProposalSpaceSection(proposal_space=space)
form.populate_obj(section)
db.session.add(section)
db.session.commit()
flash(_("Your new section has been added"), 'info')
return redirect(space.url_for(), code=303)
return render_template('baseframe/autoform.html', form=form, title=_("New section"), submit=_("Create section"),
breadcrumbs=[(space.url_for(), space.title), (space.url_for('sections'), _("Sections"))])
@app.route('/<space>/sections/<section>/edit', methods=['GET', 'POST'])
@lastuser.requires_login
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(ProposalSpaceSection, {'name': 'section', 'proposal_space': 'space'}, 'section'),
permission=('edit-section', 'siteadmin'), addlperms=lastuser.permissions)
def section_edit(space, section):
form = SectionForm(obj=section, model=ProposalSpaceSection, parent=space)
if form.validate_on_submit():
form.populate_obj(section)
db.session.commit()
flash(_("Your section has been edited"), 'info')
return redirect(space.url_for(), code=303)
return render_template('baseframe/autoform.html', form=form, title=_("Edit section"), submit=_("Save changes"),
breadcrumbs=[
(space.url_for(), space.title),
(space.url_for('sections'), _("Sections")),
(section.url_for(), section.title)])
@app.route('/<space>/sections/<section>/delete', methods=['GET', 'POST'])
@lastuser.requires_login
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(ProposalSpaceSection, {'name': 'section', 'proposal_space': 'space'}, 'section'),
permission=('delete-section', 'siteadmin'), addlperms=lastuser.permissions)
def section_delete(space, section):
form = ConfirmDeleteForm()
if form.validate_on_submit():
if 'delete' in request.form:
db.session.delete(section)
db.session.commit()
flash(_("Your section has been deleted"), 'info')
return redirect(space.url_for(), code=303)
return render_template('delete.html', form=form, title=_(u"Confirm delete"),
message=_(u"Do you really wish to delete section ‘{title}’?").format(title=section.title),
breadcrumbs=[
(space.url_for(), space.title),
(space.url_for('sections'), _("Sections")),
(section.url_for(), section.title)])
|
# -*- coding: utf-8 -*-
from flask import render_template, redirect, request, flash
from coaster.views import load_models, load_model
from baseframe import _
from .. import app, lastuser
from ..models import db, ProposalSpace, ProposalSpaceSection
from ..forms import SectionForm, ConfirmDeleteForm
def section_data(section):
return {
'name': section.name,
'title': section.title,
'description': section.description,
'url': None,
'json_url': None
}
@app.route('/<space>/sections')
@lastuser.requires_login
@load_model(ProposalSpace, {'name': 'space'}, 'space',
permission=('view-section', 'siteadmin'), addlperms=lastuser.permissions)
def section_list(space):
sections = ProposalSpaceSection.query.filter_by(proposal_space=space).all()
return render_template('sections.html', space=space, sections=sections,
breadcrumbs=[
(space.url_for(), space.title),
(space.url_for('sections'), _("Sections"))])
@app.route('/<space>/sections/<section>')
@lastuser.requires_login
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(ProposalSpaceSection, {'name': 'section', 'proposal_space': 'space'}, 'section'),
permission=('view-section', 'siteadmin'), addlperms=lastuser.permissions)
def section_view(space, section):
return render_template('section.html', space=space, section=section,
breadcrumbs=[
(space.url_for(), space.title),
(space.url_for('sections'), _("Sections")),
(section.url_for(), section.title)])
@app.route('/<space>/sections/new', methods=['GET', 'POST'])
@lastuser.requires_login
@load_model(ProposalSpace, {'name': 'space'}, 'space',
permission=('new-section', 'siteadmin'), addlperms=lastuser.permissions)
def section_new(space):
form = SectionForm(model=ProposalSpaceSection, parent=space)
if form.validate_on_submit():
section = ProposalSpaceSection(proposal_space=space)
form.populate_obj(section)
db.session.add(section)
db.session.commit()
flash(_("Your new section has been added"), 'info')
return redirect(space.url_for(), code=303)
return render_template('baseframe/autoform.html', form=form, title=_("New section"), submit=_("Create section"),
breadcrumbs=[(space.url_for(), space.title), (space.url_for('sections'), _("Sections"))])
@app.route('/<space>/sections/<section>/edit', methods=['GET', 'POST'])
@lastuser.requires_login
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(ProposalSpaceSection, {'name': 'section', 'proposal_space': 'space'}, 'section'),
permission=('edit-section', 'siteadmin'), addlperms=lastuser.permissions)
def section_edit(space, section):
form = SectionForm(obj=section, model=ProposalSpaceSection, parent=space)
if form.validate_on_submit():
form.populate_obj(section)
db.session.commit()
flash(_("Your section has been edited"), 'info')
return redirect(space.url_for(), code=303)
return render_template('baseframe/autoform.html', form=form, title=_("Edit section"), submit=_("Save changes"),
breadcrumbs=[
(space.url_for(), space.title),
(space.url_for('sections'), _("Sections")),
(section.url_for(), section.title)])
@app.route('/<space>/sections/<section>/delete', methods=['GET', 'POST'])
@lastuser.requires_login
@load_models(
(ProposalSpace, {'name': 'space'}, 'space'),
(ProposalSpaceSection, {'name': 'section', 'proposal_space': 'space'}, 'section'),
permission=('delete-section', 'siteadmin'), addlperms=lastuser.permissions)
def section_delete(space, section):
form = ConfirmDeleteForm()
if form.validate_on_submit():
if 'delete' in request.form:
db.session.delete(section)
db.session.commit()
flash(_("Your section has been deleted"), 'info')
return redirect(space.url_for(), code=303)
return render_template('delete.html', form=form, title=_(u"Confirm delete"),
message=_(u"Do you really wish to delete section ‘{title}’?").format(title=section.title),
breadcrumbs=[
(space.url_for(), space.title),
(space.url_for('sections'), _("Sections")),
(section.url_for(), section.title)])
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 2.088415
| 2
|
gpio_multifunction.py
|
meigrafd/Sample-Code
| 10
|
6628901
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# http://www.forum-raspberrypi.de/Thread-python-taster-mit-mehreren-funktionen?pid=134513#pid134513
#
# v0.1 by meigrafd
#
# https://github.com/SoCo/SoCo
#
import sys
from time import sleep, time
import RPi.GPIO as GPIO
import signal
from soco import SoCo
#------------------------------------------------------------------------
HOST = '192.168.178.83'
#GPIO pins
Play = 11
Pause = 12
Lauter = 16
Leiser = 18
Next = 13
Prev = 15
#special function time (in sec) for Play pin to switch playlist
SpecialTime = 1
# only one of following:
PULL = GPIO.PUD_DOWN #GPIO -> GND
#PULL = GPIO.PUD_UP #GPIO -> 3V3
# to use RaspberryPi pin numbers
GPIO.setmode(GPIO.BOARD)
#------------------------------------------------------------------------
# set up GPIO input channels
GPIO.setup(Play, GPIO.IN, pull_up_down = PULL)
GPIO.setup(Pause, GPIO.IN, pull_up_down = PULL)
GPIO.setup(Lauter, GPIO.IN, pull_up_down = PULL)
GPIO.setup(Leiser, GPIO.IN, pull_up_down = PULL)
GPIO.setup(Next, GPIO.IN, pull_up_down = PULL)
GPIO.setup(Prev, GPIO.IN, pull_up_down = PULL)
#------------------------------------------------------------------------
globalVar['timeTrigger'] = {}
def Interrupt_event(pin):
# only for debug:
if GPIO.input(pin) == GPIO.HIGH:
print "rising edge on %s" % pin
elif GPIO.input(pin) == GPIO.LOW:
print "falling edge on %s" % pin
if pin == Play:
if GPIO.input(Play) == GPIO.HIGH:
globalVar['timeTrigger'] = time()
elif GPIO.input(Play) == GPIO.LOW:
globalVar['timeTrigger'] = time() - globalVar['timeTrigger']
if globalVar['timeTrigger'] >= SpecialTime:
#special function: play another playlist
print "special play"
else: #normal play
print "normal play"
client.volume = 10
client.play()
elif GPIO.input(Pause) == GPIO.HIGH:
client.pause()
elif GPIO.input(Lauter) == GPIO.HIGH:
client.volume += 2
elif GPIO.input(Leiser) == GPIO.HIGH:
client.volume -=2
elif GPIO.input(Next) == GPIO.HIGH:
client.next()
elif GPIO.input(Prev) == GPIO.HIGH:
client.previous()
else:
print "ERROR! Unknown GPIO pin triggered: %s" % pin
#------------------------------------------------------------------------
try:
GPIO.add_event_detect(Play, GPIO.BOTH, callback=Interrupt_event, bouncetime=100)
GPIO.add_event_detect(Pause, GPIO.RISING, callback=Interrupt_event, bouncetime=150)
GPIO.add_event_detect(Lauter, GPIO.RISING, callback=Interrupt_event, bouncetime=150)
GPIO.add_event_detect(Leiser, GPIO.RISING, callback=Interrupt_event, bouncetime=150)
GPIO.add_event_detect(Next, GPIO.RISING, callback=Interrupt_event, bouncetime=150)
GPIO.add_event_detect(Prev, GPIO.RISING, callback=Interrupt_event, bouncetime=150)
globalVar['timeTrigger'] = 0
client = SoCo(HOST)
#keep script running
signal.pause()
except (KeyboardInterrupt, SystemExit):
GPIO.cleanup()
print "\nQuit\n"
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# http://www.forum-raspberrypi.de/Thread-python-taster-mit-mehreren-funktionen?pid=134513#pid134513
#
# v0.1 by meigrafd
#
# https://github.com/SoCo/SoCo
#
import sys
from time import sleep, time
import RPi.GPIO as GPIO
import signal
from soco import SoCo
#------------------------------------------------------------------------
HOST = '192.168.178.83'
#GPIO pins
Play = 11
Pause = 12
Lauter = 16
Leiser = 18
Next = 13
Prev = 15
#special function time (in sec) for Play pin to switch playlist
SpecialTime = 1
# only one of following:
PULL = GPIO.PUD_DOWN #GPIO -> GND
#PULL = GPIO.PUD_UP #GPIO -> 3V3
# to use RaspberryPi pin numbers
GPIO.setmode(GPIO.BOARD)
#------------------------------------------------------------------------
# set up GPIO input channels
GPIO.setup(Play, GPIO.IN, pull_up_down = PULL)
GPIO.setup(Pause, GPIO.IN, pull_up_down = PULL)
GPIO.setup(Lauter, GPIO.IN, pull_up_down = PULL)
GPIO.setup(Leiser, GPIO.IN, pull_up_down = PULL)
GPIO.setup(Next, GPIO.IN, pull_up_down = PULL)
GPIO.setup(Prev, GPIO.IN, pull_up_down = PULL)
#------------------------------------------------------------------------
globalVar['timeTrigger'] = {}
def Interrupt_event(pin):
# only for debug:
if GPIO.input(pin) == GPIO.HIGH:
print "rising edge on %s" % pin
elif GPIO.input(pin) == GPIO.LOW:
print "falling edge on %s" % pin
if pin == Play:
if GPIO.input(Play) == GPIO.HIGH:
globalVar['timeTrigger'] = time()
elif GPIO.input(Play) == GPIO.LOW:
globalVar['timeTrigger'] = time() - globalVar['timeTrigger']
if globalVar['timeTrigger'] >= SpecialTime:
#special function: play another playlist
print "special play"
else: #normal play
print "normal play"
client.volume = 10
client.play()
elif GPIO.input(Pause) == GPIO.HIGH:
client.pause()
elif GPIO.input(Lauter) == GPIO.HIGH:
client.volume += 2
elif GPIO.input(Leiser) == GPIO.HIGH:
client.volume -=2
elif GPIO.input(Next) == GPIO.HIGH:
client.next()
elif GPIO.input(Prev) == GPIO.HIGH:
client.previous()
else:
print "ERROR! Unknown GPIO pin triggered: %s" % pin
#------------------------------------------------------------------------
try:
GPIO.add_event_detect(Play, GPIO.BOTH, callback=Interrupt_event, bouncetime=100)
GPIO.add_event_detect(Pause, GPIO.RISING, callback=Interrupt_event, bouncetime=150)
GPIO.add_event_detect(Lauter, GPIO.RISING, callback=Interrupt_event, bouncetime=150)
GPIO.add_event_detect(Leiser, GPIO.RISING, callback=Interrupt_event, bouncetime=150)
GPIO.add_event_detect(Next, GPIO.RISING, callback=Interrupt_event, bouncetime=150)
GPIO.add_event_detect(Prev, GPIO.RISING, callback=Interrupt_event, bouncetime=150)
globalVar['timeTrigger'] = 0
client = SoCo(HOST)
#keep script running
signal.pause()
except (KeyboardInterrupt, SystemExit):
GPIO.cleanup()
print "\nQuit\n"
|
en
| 0.337975
|
#!/usr/bin/python # -*- coding: utf-8 -*- # # http://www.forum-raspberrypi.de/Thread-python-taster-mit-mehreren-funktionen?pid=134513#pid134513 # # v0.1 by meigrafd # # https://github.com/SoCo/SoCo # #------------------------------------------------------------------------ #GPIO pins #special function time (in sec) for Play pin to switch playlist # only one of following: #GPIO -> GND #PULL = GPIO.PUD_UP #GPIO -> 3V3 # to use RaspberryPi pin numbers #------------------------------------------------------------------------ # set up GPIO input channels #------------------------------------------------------------------------ # only for debug: #special function: play another playlist #normal play #------------------------------------------------------------------------ #keep script running
| 2.389145
| 2
|
python/photographic_mc_U_Net_like/photographic_continue_train.py
|
billy000400/Mu2e_MLTracking
| 0
|
6628902
|
<reponame>billy000400/Mu2e_MLTracking
import sys
from pathlib import Path
import csv
import random
import pickle
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model, initializers, regularizers
from tensorflow.keras.layers import(
Input,
Dense,
Conv2D,
BatchNormalization,
MaxPool2D,Dropout,
Flatten,
TimeDistributed,
Embedding,
Reshape,
Softmax
)
from tensorflow.keras.optimizers import Adam
import unet
util_dir = Path.cwd().parent.joinpath('util')
sys.path.insert(1, str(util_dir))
from Config import extractor_config as Config
from mu2e_output import *
from loss import unmasked_cce
from metric import *
model = tf.keras.load_model()
|
import sys
from pathlib import Path
import csv
import random
import pickle
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model, initializers, regularizers
from tensorflow.keras.layers import(
Input,
Dense,
Conv2D,
BatchNormalization,
MaxPool2D,Dropout,
Flatten,
TimeDistributed,
Embedding,
Reshape,
Softmax
)
from tensorflow.keras.optimizers import Adam
import unet
util_dir = Path.cwd().parent.joinpath('util')
sys.path.insert(1, str(util_dir))
from Config import extractor_config as Config
from mu2e_output import *
from loss import unmasked_cce
from metric import *
model = tf.keras.load_model()
|
none
| 1
| 1.600754
| 2
|
|
convert_to_threejs.py
|
mika-co/convert-to-threejs
| 7
|
6628903
|
# @author zfedoran / http://github.com/zfedoran
import os
import sys
import math
# #####################################################
# Globals
# #####################################################
option_triangulate = True
option_textures = True
option_prefix = True
option_geometry = False
option_default_camera = False
option_default_light = False
converter = None
# #####################################################
# Templates
# #####################################################
def Vector2String(v):
return '[ %g, %g ]' % (v[0], v[1])
def Vector3String(v):
return '[ %g, %g, %g ]' % (v[0], v[1], v[2])
def ColorString(c):
return '[ %g, %g, %g ]' % (c[0], c[1], c[2])
def LabelString(s):
return '"%s"' % s
def ArrayString(s):
return '[ %s ]' % s
def PaddingString(n):
output = ""
for i in range(n):
output += "\t"
return output
def BoolString(value):
if value:
return "true"
return "false"
# #####################################################
# Helpers
# #####################################################
def getObjectName(o):
if not o:
return ""
prefix = ""
if option_prefix:
prefix = "Object_"
return prefix + o.GetName()
def getGeometryName(g):
prefix = ""
if option_prefix:
prefix = "Geometry_"
return prefix + g.GetName()
def getEmbedName(e):
prefix = ""
if option_prefix:
prefix = "Embed_"
return prefix + e.GetName()
def getMaterialName(m):
prefix = ""
if option_prefix:
prefix = "Material_"
return prefix + m.GetName()
def getTextureName(t):
texture_file = t.GetFileName()
texture_id = os.path.splitext(os.path.basename(texture_file))[0]
prefix = ""
if option_prefix:
prefix = "Texture_"
return prefix + texture_id
def getFogName(f):
prefix = ""
if option_prefix:
prefix = "Fog_"
return prefix + f.GetName()
def getObjectVisible(n):
return BoolString(True)
def getRadians(v):
return ((v[0]*math.pi)/180, (v[1]*math.pi)/180, (v[2]*math.pi)/180)
def getHex(c):
color = (int(c[0]*255) << 16) + (int(c[1]*255) << 8) + int(c[2]*255)
return color
def generateMultiLineString(lines, separator, padding):
cleanLines = []
for i in range(len(lines)):
line = lines[i]
line = PaddingString(padding) + line
cleanLines.append(line)
return separator.join(cleanLines)
# #####################################################
# Generate - Triangles
# #####################################################
def triangulate_node_hierarchy(node):
node_attribute = node.GetNodeAttribute();
if node_attribute:
if node_attribute.GetAttributeType() == FbxNodeAttribute.eMesh or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbs or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbsSurface or \
node_attribute.GetAttributeType() == FbxNodeAttribute.ePatch:
converter.TriangulateInPlace(node);
child_count = node.GetChildCount()
for i in range(child_count):
triangulate_node_hierarchy(node.GetChild(i))
def triangulate_scene(scene):
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
triangulate_node_hierarchy(node.GetChild(i))
# #####################################################
# Generate - Material String
# #####################################################
def generate_texture_bindings(material_property, texture_list):
binding_types = {
"DiffuseColor": "map", "DiffuseFactor": "diffuseFactor", "EmissiveColor": "emissiveMap",
"EmissiveFactor": "emissiveFactor", "AmbientColor": "ambientMap", "AmbientFactor": "ambientFactor",
"SpecularColor": "specularMap", "SpecularFactor": "specularFactor", "ShininessExponent": "shininessExponent",
"NormalMap": "normalMap", "Bump": "bumpMap", "TransparentColor": "transparentMap",
"TransparencyFactor": "transparentFactor", "ReflectionColor": "reflectionMap",
"ReflectionFactor": "reflectionFactor", "DisplacementColor": "displacementMap",
"VectorDisplacementColor": "vectorDisplacementMap"
}
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_id = getTextureName(texture)
texture_binding = ' "%s": "%s",' % (binding_types[str(material_property.GetName())], texture_id)
texture_list.append(texture_binding)
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_id = getTextureName(texture)
texture_binding = ' "%s": "%s",' % (binding_types[str(material_property.GetName())], texture_id)
texture_list.append(texture_binding)
def generate_material_string(material):
#Get the implementation to see if it's a hardware shader.
implementation = GetImplementation(material, "ImplementationHLSL")
implementation_type = "HLSL"
if not implementation:
implementation = GetImplementation(material, "ImplementationCGFX")
implementation_type = "CGFX"
output = []
if implementation:
# This material is a hardware shader, skip it
print("Shader materials are not supported")
return ''
elif material.GetClassId().Is(FbxSurfaceLambert.ClassId):
ambient = str(getHex(material.Ambient.Get()))
diffuse = str(getHex(material.Diffuse.Get()))
emissive = str(getHex(material.Emissive.Get()))
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = str(opacity)
transparent = BoolString(False)
reflectivity = "1"
output = [
'\t' + LabelString( getMaterialName( material ) ) + ': {',
' "type" : "MeshLambertMaterial",',
' "parameters" : {',
' "color" : ' + diffuse + ',',
' "ambient" : ' + ambient + ',',
' "emissive" : ' + emissive + ',',
' "reflectivity" : ' + reflectivity + ',',
' "transparent" : ' + transparent + ',',
' "opacity" : ' + opacity + ',',
]
elif material.GetClassId().Is(FbxSurfacePhong.ClassId):
ambient = str(getHex(material.Ambient.Get()))
diffuse = str(getHex(material.Diffuse.Get()))
emissive = str(getHex(material.Emissive.Get()))
specular = str(getHex(material.Specular.Get()))
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = str(opacity)
shininess = str(material.Shininess.Get())
transparent = BoolString(False)
reflectivity = "1"
bumpScale = "1"
output = [
'\t' + LabelString( getMaterialName( material ) ) + ': {',
' "type" : "MeshPhongMaterial",',
' "parameters" : {',
' "color" : ' + diffuse + ',',
' "ambient" : ' + ambient + ',',
' "emissive" : ' + emissive + ',',
' "specular" : ' + specular + ',',
' "shininess" : ' + shininess + ',',
' "bumpScale" : ' + bumpScale + ',',
' "reflectivity" : ' + reflectivity + ',',
' "transparent" : ' + transparent + ',',
' "opacity" : ' + opacity + ',',
]
else:
print("Unknown type of Material")
return ''
if option_textures:
texture_list = []
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
generate_texture_bindings(material_property, texture_list)
output += texture_list
wireframe = BoolString(False)
wireframeLinewidth = "1"
output.append(' "wireframe" : ' + wireframe + ',')
output.append(' "wireframeLinewidth" : ' + wireframeLinewidth)
output.append(' }')
output.append('}')
return generateMultiLineString( output, '\n\t\t', 0 )
def generate_proxy_material_string(node, material_names):
output = [
'\t' + LabelString( getMaterialName( node ) ) + ': {',
' "type" : "MeshFaceMaterial",',
' "parameters" : {',
' "materials" : ' + ArrayString( ",".join(LabelString(m) for m in material_names) ),
' }',
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
# #####################################################
# Parse - Materials
# #####################################################
def extract_materials_from_node(node, material_list):
name = node.GetName()
mesh = node.GetNodeAttribute()
node = None
if mesh:
node = mesh.GetNode()
if node:
material_count = node.GetMaterialCount()
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append(getMaterialName(material))
material_string = generate_material_string(material)
material_list.append(material_string)
if material_count > 1:
proxy_material = generate_proxy_material_string(node, material_names)
material_list.append(proxy_material)
def generate_materials_from_hierarchy(node, material_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_materials_from_node(node, material_list)
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_list)
def generate_material_list(scene):
material_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_list)
return material_list
# #####################################################
# Generate - Texture String
# #####################################################
def generate_texture_string(texture):
wrap_u = texture.GetWrapModeU()
wrap_v = texture.GetWrapModeV()
offset = texture.GetUVTranslation()
output = [
'\t' + LabelString( getTextureName( texture ) ) + ': {',
' "url" : "' + texture.GetFileName() + '",',
' "repeat" : ' + Vector2String( (1,1) ) + ',',
' "offset" : ' + Vector2String( texture.GetUVTranslation() ) + ',',
' "magFilter" : ' + LabelString( "LinearFilter" ) + ',',
' "minFilter" : ' + LabelString( "LinearMipMapLinearFilter" ) + ',',
' "anisotropy" : ' + BoolString( True ),
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
# #####################################################
# Parse - Textures
# #####################################################
def extract_material_textures(material_property, texture_list):
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_string = generate_texture_string(texture)
texture_list.append(texture_string)
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_string = generate_texture_string(texture)
texture_list.append(texture_string)
def extract_textures_from_node(node, texture_list):
name = node.GetName()
mesh = node.GetNodeAttribute()
#for all materials attached to this mesh
material_count = mesh.GetNode().GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for material_index in range(material_count):
material = mesh.GetNode().GetSrcObject(FbxSurfaceMaterial.ClassId, material_index)
#go through all the possible textures types
if material:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
extract_material_textures(material_property, texture_list)
def generate_textures_from_hierarchy(node, texture_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_textures_from_node(node, texture_list)
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_list)
def generate_texture_list(scene):
if not option_textures:
return []
texture_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_list)
return texture_list
# #####################################################
# Generate - Mesh String
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def extract_color(color):
return [color.mRed, color.mGreen, color.mBlue]
def extract_vec2(v):
return [v[0], v[1]]
def extract_vec3(v):
return [v[0], v[1], v[2]]
def join_vec2(v):
return "%g,%g" % (v[0], v[1])
def join_vec3(v):
return "%g,%g,%g" % (v[0], v[1], v[2])
def generate_uv(uv):
return "%g,%g" % (uv[0], uv[1])
def generate_uvs(uv_layers):
layers = []
for uvs in uv_layers:
layer = ",".join(generate_uv(n) for n in uvs)
layers.append(layer)
return ",".join("[%s]" % n for n in layers)
def extract_mesh_bounding_box(mesh):
control_points_count = mesh.GetControlPointsCount()
control_points = mesh.GetControlPoints()
minx = 0
miny = 0
minz = 0
maxx = 0
maxy = 0
maxz = 0
for i in range(control_points_count):
vertex = control_points[i]
if vertex[0] < minx:
minx = vertex[0]
if vertex[1] < miny:
miny = vertex[1]
if vertex[2] < minz:
minz = vertex[2]
if vertex[0] > maxx:
maxx = vertex[0]
if vertex[1] > maxy:
maxy = vertex[1]
if vertex[2] > maxz:
maxz = vertex[2]
return [minx, miny, minz], [maxx, maxy, maxz]
def extract_vertex_positions(mesh):
control_points_count = mesh.GetControlPointsCount()
control_points = mesh.GetControlPoints()
positions = []
for i in range(control_points_count):
positions.append(extract_vec3(control_points[i]))
return positions
def extract_vertex_normals(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_normal_indices = []
layered_normal_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_normals = mesh.GetLayer(l).GetNormals()
if not mesh_normals:
continue
normals_array = mesh_normals.GetDirectArray()
normals_count = normals_array.GetCount()
if normals_count == 0:
continue
normal_indices = []
normal_values = []
# values
for i in range(normals_count):
normal = extract_vec3(normals_array.GetAt(i))
normal_values.append(normal)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_normals = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_normals.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(control_point_index)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(control_point_index)
poly_normals.append(index)
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(vertexId)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(vertexId)
poly_normals.append(index)
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_normals.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_normals.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported normal mapping mode for polygon vertex")
vertexId += 1
normal_indices.append(poly_normals)
layered_normal_values.append(normal_values)
layered_normal_indices.append(normal_indices)
return layered_normal_values, layered_normal_indices
def extract_vertex_colors(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_color_indices = []
layered_color_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_colors = mesh.GetLayer(l).GetVertexColors()
if not mesh_colors:
continue
colors_array = mesh_colors.GetDirectArray()
colors_count = colors_array.GetCount()
if colors_count == 0:
continue
color_indices = []
color_values = []
# values
for i in range(colors_count):
color = extract_color(colors_array.GetAt(i))
color_values.append(color)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_colors = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_colors.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(control_point_index)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(control_point_index)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(vertexId)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(vertexId)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_colors.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_colors.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported color mapping mode for polygon vertex")
vertexId += 1
color_indices.append(poly_colors)
layered_color_values.append(color_values)
layered_color_indices.append(color_indices)
return layered_color_values, layered_color_indices
def extract_vertex_uvs(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_uv_indices = []
layered_uv_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_uvs = mesh.GetLayer(l).GetUVs()
if not mesh_uvs:
continue
uvs_array = mesh_uvs.GetDirectArray()
uvs_count = uvs_array.GetCount()
if uvs_count == 0:
continue
uv_indices = []
uv_values = []
# values
for i in range(uvs_count):
uv = extract_vec2(uvs_array.GetAt(i))
uv_values.append(uv)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_uvs = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_uvs.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect:
poly_uvs.append(control_point_index)
elif mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_uvs.GetIndexArray().GetAt(control_point_index)
poly_uvs.append(index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
uv_texture_index = mesh.GetTextureUVIndex(p, v)
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect or \
mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
poly_uvs.append(uv_texture_index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported uv mapping mode for polygon vertex")
vertexId += 1
uv_indices.append(poly_uvs)
layered_uv_values.append(uv_values)
layered_uv_indices.append(uv_indices)
return layered_uv_values, layered_uv_indices
def generate_mesh_face(mesh, vertex_indices, polygon_index, normals, colors, uv_layers, material_count, material_is_same):
isTriangle = ( len(vertex_indices) == 3 )
nVertices = 3 if isTriangle else 4
hasMaterial = material_count > 0
hasFaceUvs = False
hasFaceVertexUvs = len(uv_layers) > 0
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = len(normals) > 0
hasFaceColors = False
hasFaceVertexColors = len(colors) > 0
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in range(nVertices):
index = vertex_indices[i]
faceData.append(index)
if hasMaterial:
material_id = 0
if not material_is_same:
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
material_id = materials.GetIndexArray().GetAt(polygon_index)
break
faceData.append( material_id )
if hasFaceVertexUvs:
for layer_index, uvs in enumerate(uv_layers):
polygon_uvs = uvs[polygon_index]
for i in range(nVertices):
index = polygon_uvs[i]
faceData.append(index)
if hasFaceVertexNormals:
polygon_normals = normals[polygon_index]
for i in range(nVertices):
index = polygon_normals[i]
faceData.append(index)
if hasFaceVertexColors:
polygon_colors = colors[polygon_index]
for i in range(nVertices):
index = polygon_colors[i]
faceData.append(index)
return ",".join( map(str, faceData) )
def generate_mesh_faces(mesh, normals, colors, uv_layers):
has_same_material_for_all_polygons = True
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetMappingMode() == FbxLayerElement.eByPolygon:
has_same_material_for_all_polygons = False
break
node = mesh.GetNode()
if node:
material_count = node.GetMaterialCount()
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
faces = []
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
vertex_indices = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
vertex_indices.append(control_point_index)
face = generate_mesh_face(mesh, vertex_indices, p, normals, colors, uv_layers, material_count, has_same_material_for_all_polygons)
faces.append(face)
return faces
def generate_mesh_string(node):
mesh = node.GetNodeAttribute()
vertices = extract_vertex_positions(mesh)
aabb_min, aabb_max = extract_mesh_bounding_box(mesh)
normal_values, normal_indices = extract_vertex_normals(mesh)
color_values, color_indices = extract_vertex_colors(mesh)
uv_values, uv_indices = extract_vertex_uvs(mesh)
# Three.js only supports one layer of normals
if len(normal_values) > 0:
normal_values = normal_values[0]
normal_indices = normal_indices[0]
# Three.js only supports one layer of colors
if len(color_values) > 0:
color_values = color_values[0]
color_indices = color_indices[0]
faces = generate_mesh_faces(mesh, normal_indices, color_indices, uv_indices)
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
nuvs = ",".join(nuvs)
vertices = ",".join(join_vec3(v) for v in vertices)
normals = ",".join(join_vec3(v) for v in normal_values)
colors = ",".join(join_vec3(v) for v in color_values)
faces = ",".join(faces)
uvs = generate_uvs(uv_values)
aabb_min = ",".join(str(f) for f in aabb_min)
aabb_max = ",".join(str(f) for f in aabb_max)
output = [
'\t' + LabelString( getEmbedName( node ) ) + ' : {',
' "metadata" : {',
' "vertices" : ' + str(nvertices) + ',',
' "normals" : ' + str(nnormals) + ',',
' "colors" : ' + str(ncolors) + ',',
' "faces" : ' + str(nfaces) + ',',
' "uvs" : ' + ArrayString(nuvs),
' },',
' "boundingBox" : {',
' "min" : ' + ArrayString(aabb_min) + ',',
' "max" : ' + ArrayString(aabb_max),
' },',
' "scale" : ' + str( 1 ) + ',',
' "materials" : ' + ArrayString("") + ',',
' "vertices" : ' + ArrayString(vertices) + ',',
' "normals" : ' + ArrayString(normals) + ',',
' "colors" : ' + ArrayString(colors) + ',',
' "uvs" : ' + ArrayString(uvs) + ',',
' "faces" : ' + ArrayString(faces),
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
# #####################################################
# Generate - Embeds
# #####################################################
def generate_embed_list_from_hierarchy(node, embed_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
embed_string = generate_mesh_string(node)
embed_list.append(embed_string)
for i in range(node.GetChildCount()):
generate_embed_list_from_hierarchy(node.GetChild(i), embed_list)
def generate_embed_list(scene):
embed_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_embed_list_from_hierarchy(node.GetChild(i), embed_list)
return embed_list
# #####################################################
# Generate - Geometries
# #####################################################
def generate_geometry_string(node):
output = [
'\t' + LabelString( getGeometryName( node ) ) + ' : {',
' "type" : "embedded",',
' "id" : ' + LabelString( getEmbedName( node ) ),
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
def generate_geometry_list_from_hierarchy(node, geometry_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
geometry_string = generate_geometry_string(node)
geometry_list.append(geometry_string)
for i in range(node.GetChildCount()):
generate_geometry_list_from_hierarchy(node.GetChild(i), geometry_list)
def generate_geometry_list(scene):
geometry_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_geometry_list_from_hierarchy(node.GetChild(i), geometry_list)
return geometry_list
# #####################################################
# Generate - Camera Names
# #####################################################
def generate_camera_name_list_from_hierarchy(node, camera_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eCamera:
camera_string = getObjectName(node)
camera_list.append(camera_string)
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
def generate_camera_name_list(scene):
camera_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
return camera_list
# #####################################################
# Generate - Light Object
# #####################################################
def generate_default_light_string(padding):
direction = (1,1,1)
color = (1,1,1)
intensity = 80.0
output = [
'\t\t' + LabelString( 'default_light' ) + ' : {',
' "type" : "DirectionalLight",',
' "color" : ' + str(getHex(color)) + ',',
' "intensity" : ' + str(intensity/100.0) + ',',
' "direction" : ' + Vector3String( direction ) + ',',
' "target" : ' + LabelString( getObjectName( None ) ),
' }'
]
return generateMultiLineString( output, '\n\t\t', padding )
def generate_light_string(node, padding):
light = node.GetNodeAttribute()
light_types = ["point", "directional", "spot", "area", "volume"]
light_type = light_types[light.LightType.Get()]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
output = []
if light_type == "directional":
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "DirectionalLight",',
' "color" : ' + str(getHex(light.Color.Get())) + ',',
' "intensity" : ' + str(light.Intensity.Get()/100.0) + ',',
' "direction" : ' + Vector3String( position ) + ',',
' "target" : ' + LabelString( getObjectName( node.GetTarget() ) ) + ( ',' if node.GetChildCount() > 0 else '' )
]
elif light_type == "point":
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "PointLight",',
' "color" : ' + str(getHex(light.Color.Get())) + ',',
' "intensity" : ' + str(light.Intensity.Get()/100.0) + ',',
' "position" : ' + Vector3String( position ) + ',',
' "distance" : ' + str(light.FarAttenuationEnd.Get()) + ( ',' if node.GetChildCount() > 0 else '' )
]
elif light_type == "spot":
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "SpotLight",',
' "color" : ' + str(getHex(light.Color.Get())) + ',',
' "intensity" : ' + str(light.Intensity.Get()/100.0) + ',',
' "position" : ' + Vector3String( position ) + ',',
' "distance" : ' + str(light.FarAttenuationEnd.Get()) + ',',
' "angle" : ' + str(light.OuterAngle.Get()) + ',',
' "exponent" : ' + str(light.DecayType.Get()) + ',',
' "target" : ' + LabelString( getObjectName( node.GetTarget() ) ) + ( ',' if node.GetChildCount() > 0 else '' )
]
return generateMultiLineString( output, '\n\t\t', padding )
def generate_ambient_light_string(scene):
scene_settings = scene.GetGlobalSettings()
ambient_color = scene_settings.GetAmbientColor()
ambient_color = (ambient_color.mRed, ambient_color.mGreen, ambient_color.mBlue)
if ambient_color[0] == 0 and ambient_color[1] == 0 and ambient_color[2] == 0:
return None
class AmbientLight:
def GetName(self):
return "AmbientLight"
node = AmbientLight()
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "AmbientLight",',
' "color" : ' + str(getHex(ambient_color)),
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
# #####################################################
# Generate - Camera Object
# #####################################################
def generate_default_camera_string(padding):
position = (100, 100, 100)
near = 0.1
far = 1000
fov = 75
output = [
'\t\t' + LabelString( 'default_camera' ) + ' : {',
' "type" : "PerspectiveCamera",',
' "fov" : ' + str(fov) + ',',
' "near" : ' + str(near) + ',',
' "far" : ' + str(far) + ',',
' "position" : ' + Vector3String( position ),
' }'
]
return generateMultiLineString( output, '\n\t\t', padding )
def generate_camera_string(node, padding):
camera = node.GetNodeAttribute()
target_node = node.GetTarget()
target = ""
if target_node:
transform = target.EvaluateLocalTransform()
target = transform.GetT()
else:
target = camera.InterestPosition.Get()
position = camera.Position.Get()
projection_types = [ "perspective", "orthogonal" ]
projection = projection_types[camera.ProjectionType.Get()]
near = camera.NearPlane.Get()
far = camera.FarPlane.Get()
output = []
if projection == "perspective":
aspect = camera.PixelAspectRatio.Get()
fov = camera.FieldOfView.Get()
fov = 75
far = 1000
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "PerspectiveCamera",',
' "fov" : ' + str(fov) + ',',
' "aspect" : ' + str(aspect) + ',',
' "near" : ' + str(near) + ',',
' "far" : ' + str(far) + ',',
' "position" : ' + Vector3String( position ) + ( ',' if node.GetChildCount() > 0 else '' )
]
elif projection == "orthogonal":
left = ""
right = ""
top = ""
bottom = ""
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "OrthographicCamera",',
' "left" : ' + left + ',',
' "right" : ' + right + ',',
' "top" : ' + top + ',',
' "bottom" : ' + bottom + ',',
' "near" : ' + str(near) + ',',
' "far" : ' + str(far) + ',',
' "position" : ' + Vector3String( position ) + ( ',' if node.GetChildCount() > 0 else '' )
]
return generateMultiLineString( output, '\n\t\t', padding )
# #####################################################
# Generate - Mesh Object
# #####################################################
def generate_mesh_object_string(node, padding):
mesh = node.GetNodeAttribute()
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
material_count = node.GetMaterialCount()
material_name = ""
if material_count > 0:
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append( getMaterialName(material) )
#If this mesh has more than one material, use a proxy material
material_name = getMaterialName( node ) if material_count > 1 else material_names[0]
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "geometry" : ' + LabelString( getGeometryName( node ) ) + ',',
' "material" : ' + LabelString( material_name ) + ',',
' "position" : ' + Vector3String( position ) + ',',
' "rotation" : ' + Vector3String( rotation ) + ',',
' "scale" : ' + Vector3String( scale ) + ',',
' "visible" : ' + getObjectVisible( node ) + ( ',' if node.GetChildCount() > 0 else '' )
]
return generateMultiLineString( output, '\n\t\t', padding )
# #####################################################
# Generate - Object
# #####################################################
def generate_object_string(node, padding):
node_types = ["Unknown", "Null", "Marker", "Skeleton", "Mesh", "Nurbs", "Patch", "Camera",
"CameraStereo", "CameraSwitcher", "Light", "OpticalReference", "OpticalMarker", "NurbsCurve",
"TrimNurbsSurface", "Boundary", "NurbsSurface", "Shape", "LODGroup", "SubDiv", "CachedEffect", "Line"]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
node_type = ""
if node.GetNodeAttribute() == None:
node_type = "Null"
else:
node_type = node_types[node.GetNodeAttribute().GetAttributeType()]
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "fbx_type" : ' + LabelString( node_type ) + ',',
' "position" : ' + Vector3String( position ) + ',',
' "rotation" : ' + Vector3String( rotation ) + ',',
' "scale" : ' + Vector3String( scale ) + ',',
' "visible" : ' + getObjectVisible( node ) + ( ',' if node.GetChildCount() > 0 else '' )
]
return generateMultiLineString( output, '\n\t\t', padding )
# #####################################################
# Parse - Objects
# #####################################################
def generate_object_hierarchy(node, object_list, pad, siblings_left):
object_count = 0
if node.GetNodeAttribute() == None:
object_string = generate_object_string(node, pad)
object_list.append(object_string)
object_count += 1
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
object_string = generate_mesh_object_string(node, pad)
object_list.append(object_string)
object_count += 1
elif attribute_type == FbxNodeAttribute.eLight:
object_string = generate_light_string(node, pad)
object_list.append(object_string)
object_count += 1
elif attribute_type == FbxNodeAttribute.eCamera:
object_string = generate_camera_string(node, pad)
object_list.append(object_string)
object_count += 1
else:
object_string = generate_object_string(node, pad)
object_list.append(object_string)
object_count += 1
if node.GetChildCount() > 0:
object_list.append( PaddingString( pad + 1 ) + '\t\t"children" : {\n' )
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_list, pad + 2, node.GetChildCount() - i - 1)
object_list.append( PaddingString( pad + 1 ) + '\t\t}' )
object_list.append( PaddingString( pad ) + '\t\t}' + (',\n' if siblings_left > 0 else ''))
return object_count
def generate_scene_objects_string(scene):
object_count = 0
object_list = []
ambient_light = generate_ambient_light_string(scene)
if ambient_light:
if scene.GetNodeCount() > 0 or option_default_light or option_default_camera:
ambient_light += (',\n')
object_list.append(ambient_light)
object_count += 1
if option_default_light:
default_light = generate_default_light_string(0)
if scene.GetNodeCount() > 0 or option_default_camera:
default_light += (',\n')
object_list.append(default_light)
object_count += 1
if option_default_camera:
default_camera = generate_default_camera_string(0)
if scene.GetNodeCount() > 0:
default_camera += (',\n')
object_list.append(default_camera)
object_count += 1
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_list, 0, node.GetChildCount() - i - 1)
return "\n".join(object_list), object_count
# #####################################################
# Parse - Scene
# #####################################################
def extract_scene(scene, filename):
objects, nobjects = generate_scene_objects_string(scene)
textures = generate_texture_list(scene)
materials = generate_material_list(scene)
geometries = generate_geometry_list(scene)
embeds = generate_embed_list(scene)
fogs = []
ntextures = len(textures)
nmaterials = len(materials)
ngeometries = len(geometries)
position = Vector3String( (0,0,0) )
rotation = Vector3String( (0,0,0) )
scale = Vector3String( (1,1,1) )
camera_names = generate_camera_name_list(scene)
scene_settings = scene.GetGlobalSettings()
bgcolor = Vector3String( (0.667,0.667,0.667) )
bgalpha = 1
defcamera = LabelString(camera_names[0] if len(camera_names) > 0 else "")
if option_default_camera:
defcamera = LabelString('default_camera')
#TODO: extract fog info from scene
deffog = LabelString("")
geometries = generateMultiLineString( geometries, ",\n\n\t", 0 )
materials = generateMultiLineString( materials, ",\n\n\t", 0 )
textures = generateMultiLineString( textures, ",\n\n\t", 0 )
embeds = generateMultiLineString( embeds, ",\n\n\t", 0 )
fogs = generateMultiLineString( fogs, ",\n\n\t", 0 )
output = [
'{',
' "metadata": {',
' "formatVersion" : 3.2,',
' "type" : "scene",',
' "generatedBy" : "convert-to-threejs.py",',
' "objects" : ' + str(nobjects) + ',',
' "geometries" : ' + str(ngeometries) + ',',
' "materials" : ' + str(nmaterials) + ',',
' "textures" : ' + str(ntextures),
' },',
'',
' "urlBaseType": "relativeToScene",',
'',
' "objects" :',
' {',
objects,
' },',
'',
' "geometries" :',
' {',
'\t' + geometries,
' },',
'',
' "materials" :',
' {',
'\t' + materials,
' },',
'',
' "textures" :',
' {',
'\t' + textures,
' },',
'',
' "embeds" :',
' {',
'\t' + embeds,
' },',
'',
' "fogs" :',
' {',
'\t' + fogs,
' },',
'',
' "transform" :',
' {',
' "position" : ' + position + ',',
' "rotation" : ' + rotation + ',',
' "scale" : ' + scale,
' },',
'',
' "defaults" :',
' {',
' "bgcolor" : ' + str(bgcolor) + ',',
' "bgalpha" : ' + str(bgalpha) + ',',
' "camera" : ' + defcamera + ',',
' "fog" : ' + deffog,
' }',
'}'
]
return "\n".join(output)
# #####################################################
# file helpers
# #####################################################
def write_file(fname, content):
out = open(fname, "w")
out.write(content)
out.close()
# #####################################################
# main
# #####################################################
if __name__ == "__main__":
from optparse import OptionParser
try:
from FbxCommon import *
except ImportError:
import platform
msg = 'Could not locate the python FBX SDK!\n'
msg += 'You need to copy the FBX SDK into your python install folder such as '
if platform.system() == 'Windows' or platform.system() == 'Microsoft':
msg += '"Python26/Lib/site-packages"'
elif platform.system() == 'Linux':
msg += '"/usr/local/lib/python2.6/site-packages"'
elif platform.system() == 'Darwin':
msg += '"/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages"'
msg += ' folder.'
print(msg)
sys.exit(1)
usage = "Usage: %prog [source_file.fbx] [output_file.js] [options]"
parser = OptionParser(usage=usage)
parser.add_option('-t', '--triangulate', action='store_true', dest='triangulate', help="force quad geometry into triangles", default=False)
parser.add_option('-x', '--no-textures', action='store_true', dest='notextures', help="don't include texture references in output file", default=False)
parser.add_option('-p', '--no-prefix', action='store_true', dest='noprefix', help="don't prefix object names in output file", default=False)
parser.add_option('-g', '--geometry-only', action='store_true', dest='geometry', help="output geometry only", default=False)
parser.add_option('-c', '--default-camera', action='store_true', dest='defcamera', help="include default camera in output scene", default=False)
parser.add_option('-l', '--defualt-light', action='store_true', dest='deflight', help="include default light in output scene", default=False)
(options, args) = parser.parse_args()
option_triangulate = options.triangulate
option_textures = True if not options.notextures else False
option_prefix = True if not options.noprefix else False
option_geometry = options.geometry
option_default_camera = options.defcamera
option_default_light = options.deflight
# Prepare the FBX SDK.
sdk_manager, scene = InitializeSdkObjects()
converter = FbxGeometryConverter(sdk_manager)
# The converter takes an FBX file as an argument.
if len(args) > 1:
print("\nLoading file: %s" % args[0])
result = LoadScene(sdk_manager, scene, args[0])
else:
result = False
print("\nUsage: convert_fbx_to_threejs [source_file.fbx] [output_file.js]\n")
if not result:
print("\nAn error occurred while loading the file...")
else:
if option_triangulate:
print("\nForcing geometry to triangles")
triangulate_scene(scene)
output_content = extract_scene(scene, os.path.basename(args[0]))
output_path = os.path.join(os.getcwd(), args[1])
write_file(output_path, output_content)
print("\nExported Three.js file to:\n%s\n" % output_path)
# SaveScene(sdk_manager, scene, args[2], 8)
# Destroy all objects created by the FBX SDK.
sdk_manager.Destroy()
sys.exit(0)
|
# @author zfedoran / http://github.com/zfedoran
import os
import sys
import math
# #####################################################
# Globals
# #####################################################
option_triangulate = True
option_textures = True
option_prefix = True
option_geometry = False
option_default_camera = False
option_default_light = False
converter = None
# #####################################################
# Templates
# #####################################################
def Vector2String(v):
return '[ %g, %g ]' % (v[0], v[1])
def Vector3String(v):
return '[ %g, %g, %g ]' % (v[0], v[1], v[2])
def ColorString(c):
return '[ %g, %g, %g ]' % (c[0], c[1], c[2])
def LabelString(s):
return '"%s"' % s
def ArrayString(s):
return '[ %s ]' % s
def PaddingString(n):
output = ""
for i in range(n):
output += "\t"
return output
def BoolString(value):
if value:
return "true"
return "false"
# #####################################################
# Helpers
# #####################################################
def getObjectName(o):
if not o:
return ""
prefix = ""
if option_prefix:
prefix = "Object_"
return prefix + o.GetName()
def getGeometryName(g):
prefix = ""
if option_prefix:
prefix = "Geometry_"
return prefix + g.GetName()
def getEmbedName(e):
prefix = ""
if option_prefix:
prefix = "Embed_"
return prefix + e.GetName()
def getMaterialName(m):
prefix = ""
if option_prefix:
prefix = "Material_"
return prefix + m.GetName()
def getTextureName(t):
texture_file = t.GetFileName()
texture_id = os.path.splitext(os.path.basename(texture_file))[0]
prefix = ""
if option_prefix:
prefix = "Texture_"
return prefix + texture_id
def getFogName(f):
prefix = ""
if option_prefix:
prefix = "Fog_"
return prefix + f.GetName()
def getObjectVisible(n):
return BoolString(True)
def getRadians(v):
return ((v[0]*math.pi)/180, (v[1]*math.pi)/180, (v[2]*math.pi)/180)
def getHex(c):
color = (int(c[0]*255) << 16) + (int(c[1]*255) << 8) + int(c[2]*255)
return color
def generateMultiLineString(lines, separator, padding):
cleanLines = []
for i in range(len(lines)):
line = lines[i]
line = PaddingString(padding) + line
cleanLines.append(line)
return separator.join(cleanLines)
# #####################################################
# Generate - Triangles
# #####################################################
def triangulate_node_hierarchy(node):
node_attribute = node.GetNodeAttribute();
if node_attribute:
if node_attribute.GetAttributeType() == FbxNodeAttribute.eMesh or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbs or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbsSurface or \
node_attribute.GetAttributeType() == FbxNodeAttribute.ePatch:
converter.TriangulateInPlace(node);
child_count = node.GetChildCount()
for i in range(child_count):
triangulate_node_hierarchy(node.GetChild(i))
def triangulate_scene(scene):
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
triangulate_node_hierarchy(node.GetChild(i))
# #####################################################
# Generate - Material String
# #####################################################
def generate_texture_bindings(material_property, texture_list):
binding_types = {
"DiffuseColor": "map", "DiffuseFactor": "diffuseFactor", "EmissiveColor": "emissiveMap",
"EmissiveFactor": "emissiveFactor", "AmbientColor": "ambientMap", "AmbientFactor": "ambientFactor",
"SpecularColor": "specularMap", "SpecularFactor": "specularFactor", "ShininessExponent": "shininessExponent",
"NormalMap": "normalMap", "Bump": "bumpMap", "TransparentColor": "transparentMap",
"TransparencyFactor": "transparentFactor", "ReflectionColor": "reflectionMap",
"ReflectionFactor": "reflectionFactor", "DisplacementColor": "displacementMap",
"VectorDisplacementColor": "vectorDisplacementMap"
}
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_id = getTextureName(texture)
texture_binding = ' "%s": "%s",' % (binding_types[str(material_property.GetName())], texture_id)
texture_list.append(texture_binding)
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_id = getTextureName(texture)
texture_binding = ' "%s": "%s",' % (binding_types[str(material_property.GetName())], texture_id)
texture_list.append(texture_binding)
def generate_material_string(material):
#Get the implementation to see if it's a hardware shader.
implementation = GetImplementation(material, "ImplementationHLSL")
implementation_type = "HLSL"
if not implementation:
implementation = GetImplementation(material, "ImplementationCGFX")
implementation_type = "CGFX"
output = []
if implementation:
# This material is a hardware shader, skip it
print("Shader materials are not supported")
return ''
elif material.GetClassId().Is(FbxSurfaceLambert.ClassId):
ambient = str(getHex(material.Ambient.Get()))
diffuse = str(getHex(material.Diffuse.Get()))
emissive = str(getHex(material.Emissive.Get()))
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = str(opacity)
transparent = BoolString(False)
reflectivity = "1"
output = [
'\t' + LabelString( getMaterialName( material ) ) + ': {',
' "type" : "MeshLambertMaterial",',
' "parameters" : {',
' "color" : ' + diffuse + ',',
' "ambient" : ' + ambient + ',',
' "emissive" : ' + emissive + ',',
' "reflectivity" : ' + reflectivity + ',',
' "transparent" : ' + transparent + ',',
' "opacity" : ' + opacity + ',',
]
elif material.GetClassId().Is(FbxSurfacePhong.ClassId):
ambient = str(getHex(material.Ambient.Get()))
diffuse = str(getHex(material.Diffuse.Get()))
emissive = str(getHex(material.Emissive.Get()))
specular = str(getHex(material.Specular.Get()))
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = str(opacity)
shininess = str(material.Shininess.Get())
transparent = BoolString(False)
reflectivity = "1"
bumpScale = "1"
output = [
'\t' + LabelString( getMaterialName( material ) ) + ': {',
' "type" : "MeshPhongMaterial",',
' "parameters" : {',
' "color" : ' + diffuse + ',',
' "ambient" : ' + ambient + ',',
' "emissive" : ' + emissive + ',',
' "specular" : ' + specular + ',',
' "shininess" : ' + shininess + ',',
' "bumpScale" : ' + bumpScale + ',',
' "reflectivity" : ' + reflectivity + ',',
' "transparent" : ' + transparent + ',',
' "opacity" : ' + opacity + ',',
]
else:
print("Unknown type of Material")
return ''
if option_textures:
texture_list = []
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
generate_texture_bindings(material_property, texture_list)
output += texture_list
wireframe = BoolString(False)
wireframeLinewidth = "1"
output.append(' "wireframe" : ' + wireframe + ',')
output.append(' "wireframeLinewidth" : ' + wireframeLinewidth)
output.append(' }')
output.append('}')
return generateMultiLineString( output, '\n\t\t', 0 )
def generate_proxy_material_string(node, material_names):
output = [
'\t' + LabelString( getMaterialName( node ) ) + ': {',
' "type" : "MeshFaceMaterial",',
' "parameters" : {',
' "materials" : ' + ArrayString( ",".join(LabelString(m) for m in material_names) ),
' }',
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
# #####################################################
# Parse - Materials
# #####################################################
def extract_materials_from_node(node, material_list):
name = node.GetName()
mesh = node.GetNodeAttribute()
node = None
if mesh:
node = mesh.GetNode()
if node:
material_count = node.GetMaterialCount()
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append(getMaterialName(material))
material_string = generate_material_string(material)
material_list.append(material_string)
if material_count > 1:
proxy_material = generate_proxy_material_string(node, material_names)
material_list.append(proxy_material)
def generate_materials_from_hierarchy(node, material_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_materials_from_node(node, material_list)
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_list)
def generate_material_list(scene):
material_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_list)
return material_list
# #####################################################
# Generate - Texture String
# #####################################################
def generate_texture_string(texture):
wrap_u = texture.GetWrapModeU()
wrap_v = texture.GetWrapModeV()
offset = texture.GetUVTranslation()
output = [
'\t' + LabelString( getTextureName( texture ) ) + ': {',
' "url" : "' + texture.GetFileName() + '",',
' "repeat" : ' + Vector2String( (1,1) ) + ',',
' "offset" : ' + Vector2String( texture.GetUVTranslation() ) + ',',
' "magFilter" : ' + LabelString( "LinearFilter" ) + ',',
' "minFilter" : ' + LabelString( "LinearMipMapLinearFilter" ) + ',',
' "anisotropy" : ' + BoolString( True ),
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
# #####################################################
# Parse - Textures
# #####################################################
def extract_material_textures(material_property, texture_list):
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_string = generate_texture_string(texture)
texture_list.append(texture_string)
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_string = generate_texture_string(texture)
texture_list.append(texture_string)
def extract_textures_from_node(node, texture_list):
name = node.GetName()
mesh = node.GetNodeAttribute()
#for all materials attached to this mesh
material_count = mesh.GetNode().GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for material_index in range(material_count):
material = mesh.GetNode().GetSrcObject(FbxSurfaceMaterial.ClassId, material_index)
#go through all the possible textures types
if material:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
extract_material_textures(material_property, texture_list)
def generate_textures_from_hierarchy(node, texture_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_textures_from_node(node, texture_list)
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_list)
def generate_texture_list(scene):
if not option_textures:
return []
texture_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_list)
return texture_list
# #####################################################
# Generate - Mesh String
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def extract_color(color):
return [color.mRed, color.mGreen, color.mBlue]
def extract_vec2(v):
return [v[0], v[1]]
def extract_vec3(v):
return [v[0], v[1], v[2]]
def join_vec2(v):
return "%g,%g" % (v[0], v[1])
def join_vec3(v):
return "%g,%g,%g" % (v[0], v[1], v[2])
def generate_uv(uv):
return "%g,%g" % (uv[0], uv[1])
def generate_uvs(uv_layers):
layers = []
for uvs in uv_layers:
layer = ",".join(generate_uv(n) for n in uvs)
layers.append(layer)
return ",".join("[%s]" % n for n in layers)
def extract_mesh_bounding_box(mesh):
control_points_count = mesh.GetControlPointsCount()
control_points = mesh.GetControlPoints()
minx = 0
miny = 0
minz = 0
maxx = 0
maxy = 0
maxz = 0
for i in range(control_points_count):
vertex = control_points[i]
if vertex[0] < minx:
minx = vertex[0]
if vertex[1] < miny:
miny = vertex[1]
if vertex[2] < minz:
minz = vertex[2]
if vertex[0] > maxx:
maxx = vertex[0]
if vertex[1] > maxy:
maxy = vertex[1]
if vertex[2] > maxz:
maxz = vertex[2]
return [minx, miny, minz], [maxx, maxy, maxz]
def extract_vertex_positions(mesh):
control_points_count = mesh.GetControlPointsCount()
control_points = mesh.GetControlPoints()
positions = []
for i in range(control_points_count):
positions.append(extract_vec3(control_points[i]))
return positions
def extract_vertex_normals(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_normal_indices = []
layered_normal_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_normals = mesh.GetLayer(l).GetNormals()
if not mesh_normals:
continue
normals_array = mesh_normals.GetDirectArray()
normals_count = normals_array.GetCount()
if normals_count == 0:
continue
normal_indices = []
normal_values = []
# values
for i in range(normals_count):
normal = extract_vec3(normals_array.GetAt(i))
normal_values.append(normal)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_normals = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_normals.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(control_point_index)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(control_point_index)
poly_normals.append(index)
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(vertexId)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(vertexId)
poly_normals.append(index)
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_normals.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_normals.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported normal mapping mode for polygon vertex")
vertexId += 1
normal_indices.append(poly_normals)
layered_normal_values.append(normal_values)
layered_normal_indices.append(normal_indices)
return layered_normal_values, layered_normal_indices
def extract_vertex_colors(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_color_indices = []
layered_color_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_colors = mesh.GetLayer(l).GetVertexColors()
if not mesh_colors:
continue
colors_array = mesh_colors.GetDirectArray()
colors_count = colors_array.GetCount()
if colors_count == 0:
continue
color_indices = []
color_values = []
# values
for i in range(colors_count):
color = extract_color(colors_array.GetAt(i))
color_values.append(color)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_colors = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_colors.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(control_point_index)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(control_point_index)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(vertexId)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(vertexId)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_colors.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_colors.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported color mapping mode for polygon vertex")
vertexId += 1
color_indices.append(poly_colors)
layered_color_values.append(color_values)
layered_color_indices.append(color_indices)
return layered_color_values, layered_color_indices
def extract_vertex_uvs(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_uv_indices = []
layered_uv_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_uvs = mesh.GetLayer(l).GetUVs()
if not mesh_uvs:
continue
uvs_array = mesh_uvs.GetDirectArray()
uvs_count = uvs_array.GetCount()
if uvs_count == 0:
continue
uv_indices = []
uv_values = []
# values
for i in range(uvs_count):
uv = extract_vec2(uvs_array.GetAt(i))
uv_values.append(uv)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_uvs = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_uvs.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect:
poly_uvs.append(control_point_index)
elif mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_uvs.GetIndexArray().GetAt(control_point_index)
poly_uvs.append(index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
uv_texture_index = mesh.GetTextureUVIndex(p, v)
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect or \
mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
poly_uvs.append(uv_texture_index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported uv mapping mode for polygon vertex")
vertexId += 1
uv_indices.append(poly_uvs)
layered_uv_values.append(uv_values)
layered_uv_indices.append(uv_indices)
return layered_uv_values, layered_uv_indices
def generate_mesh_face(mesh, vertex_indices, polygon_index, normals, colors, uv_layers, material_count, material_is_same):
isTriangle = ( len(vertex_indices) == 3 )
nVertices = 3 if isTriangle else 4
hasMaterial = material_count > 0
hasFaceUvs = False
hasFaceVertexUvs = len(uv_layers) > 0
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = len(normals) > 0
hasFaceColors = False
hasFaceVertexColors = len(colors) > 0
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in range(nVertices):
index = vertex_indices[i]
faceData.append(index)
if hasMaterial:
material_id = 0
if not material_is_same:
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
material_id = materials.GetIndexArray().GetAt(polygon_index)
break
faceData.append( material_id )
if hasFaceVertexUvs:
for layer_index, uvs in enumerate(uv_layers):
polygon_uvs = uvs[polygon_index]
for i in range(nVertices):
index = polygon_uvs[i]
faceData.append(index)
if hasFaceVertexNormals:
polygon_normals = normals[polygon_index]
for i in range(nVertices):
index = polygon_normals[i]
faceData.append(index)
if hasFaceVertexColors:
polygon_colors = colors[polygon_index]
for i in range(nVertices):
index = polygon_colors[i]
faceData.append(index)
return ",".join( map(str, faceData) )
def generate_mesh_faces(mesh, normals, colors, uv_layers):
has_same_material_for_all_polygons = True
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetMappingMode() == FbxLayerElement.eByPolygon:
has_same_material_for_all_polygons = False
break
node = mesh.GetNode()
if node:
material_count = node.GetMaterialCount()
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
faces = []
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
vertex_indices = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
vertex_indices.append(control_point_index)
face = generate_mesh_face(mesh, vertex_indices, p, normals, colors, uv_layers, material_count, has_same_material_for_all_polygons)
faces.append(face)
return faces
def generate_mesh_string(node):
mesh = node.GetNodeAttribute()
vertices = extract_vertex_positions(mesh)
aabb_min, aabb_max = extract_mesh_bounding_box(mesh)
normal_values, normal_indices = extract_vertex_normals(mesh)
color_values, color_indices = extract_vertex_colors(mesh)
uv_values, uv_indices = extract_vertex_uvs(mesh)
# Three.js only supports one layer of normals
if len(normal_values) > 0:
normal_values = normal_values[0]
normal_indices = normal_indices[0]
# Three.js only supports one layer of colors
if len(color_values) > 0:
color_values = color_values[0]
color_indices = color_indices[0]
faces = generate_mesh_faces(mesh, normal_indices, color_indices, uv_indices)
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
nuvs = ",".join(nuvs)
vertices = ",".join(join_vec3(v) for v in vertices)
normals = ",".join(join_vec3(v) for v in normal_values)
colors = ",".join(join_vec3(v) for v in color_values)
faces = ",".join(faces)
uvs = generate_uvs(uv_values)
aabb_min = ",".join(str(f) for f in aabb_min)
aabb_max = ",".join(str(f) for f in aabb_max)
output = [
'\t' + LabelString( getEmbedName( node ) ) + ' : {',
' "metadata" : {',
' "vertices" : ' + str(nvertices) + ',',
' "normals" : ' + str(nnormals) + ',',
' "colors" : ' + str(ncolors) + ',',
' "faces" : ' + str(nfaces) + ',',
' "uvs" : ' + ArrayString(nuvs),
' },',
' "boundingBox" : {',
' "min" : ' + ArrayString(aabb_min) + ',',
' "max" : ' + ArrayString(aabb_max),
' },',
' "scale" : ' + str( 1 ) + ',',
' "materials" : ' + ArrayString("") + ',',
' "vertices" : ' + ArrayString(vertices) + ',',
' "normals" : ' + ArrayString(normals) + ',',
' "colors" : ' + ArrayString(colors) + ',',
' "uvs" : ' + ArrayString(uvs) + ',',
' "faces" : ' + ArrayString(faces),
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
# #####################################################
# Generate - Embeds
# #####################################################
def generate_embed_list_from_hierarchy(node, embed_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
embed_string = generate_mesh_string(node)
embed_list.append(embed_string)
for i in range(node.GetChildCount()):
generate_embed_list_from_hierarchy(node.GetChild(i), embed_list)
def generate_embed_list(scene):
embed_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_embed_list_from_hierarchy(node.GetChild(i), embed_list)
return embed_list
# #####################################################
# Generate - Geometries
# #####################################################
def generate_geometry_string(node):
output = [
'\t' + LabelString( getGeometryName( node ) ) + ' : {',
' "type" : "embedded",',
' "id" : ' + LabelString( getEmbedName( node ) ),
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
def generate_geometry_list_from_hierarchy(node, geometry_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
geometry_string = generate_geometry_string(node)
geometry_list.append(geometry_string)
for i in range(node.GetChildCount()):
generate_geometry_list_from_hierarchy(node.GetChild(i), geometry_list)
def generate_geometry_list(scene):
geometry_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_geometry_list_from_hierarchy(node.GetChild(i), geometry_list)
return geometry_list
# #####################################################
# Generate - Camera Names
# #####################################################
def generate_camera_name_list_from_hierarchy(node, camera_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eCamera:
camera_string = getObjectName(node)
camera_list.append(camera_string)
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
def generate_camera_name_list(scene):
camera_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
return camera_list
# #####################################################
# Generate - Light Object
# #####################################################
def generate_default_light_string(padding):
direction = (1,1,1)
color = (1,1,1)
intensity = 80.0
output = [
'\t\t' + LabelString( 'default_light' ) + ' : {',
' "type" : "DirectionalLight",',
' "color" : ' + str(getHex(color)) + ',',
' "intensity" : ' + str(intensity/100.0) + ',',
' "direction" : ' + Vector3String( direction ) + ',',
' "target" : ' + LabelString( getObjectName( None ) ),
' }'
]
return generateMultiLineString( output, '\n\t\t', padding )
def generate_light_string(node, padding):
light = node.GetNodeAttribute()
light_types = ["point", "directional", "spot", "area", "volume"]
light_type = light_types[light.LightType.Get()]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
output = []
if light_type == "directional":
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "DirectionalLight",',
' "color" : ' + str(getHex(light.Color.Get())) + ',',
' "intensity" : ' + str(light.Intensity.Get()/100.0) + ',',
' "direction" : ' + Vector3String( position ) + ',',
' "target" : ' + LabelString( getObjectName( node.GetTarget() ) ) + ( ',' if node.GetChildCount() > 0 else '' )
]
elif light_type == "point":
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "PointLight",',
' "color" : ' + str(getHex(light.Color.Get())) + ',',
' "intensity" : ' + str(light.Intensity.Get()/100.0) + ',',
' "position" : ' + Vector3String( position ) + ',',
' "distance" : ' + str(light.FarAttenuationEnd.Get()) + ( ',' if node.GetChildCount() > 0 else '' )
]
elif light_type == "spot":
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "SpotLight",',
' "color" : ' + str(getHex(light.Color.Get())) + ',',
' "intensity" : ' + str(light.Intensity.Get()/100.0) + ',',
' "position" : ' + Vector3String( position ) + ',',
' "distance" : ' + str(light.FarAttenuationEnd.Get()) + ',',
' "angle" : ' + str(light.OuterAngle.Get()) + ',',
' "exponent" : ' + str(light.DecayType.Get()) + ',',
' "target" : ' + LabelString( getObjectName( node.GetTarget() ) ) + ( ',' if node.GetChildCount() > 0 else '' )
]
return generateMultiLineString( output, '\n\t\t', padding )
def generate_ambient_light_string(scene):
scene_settings = scene.GetGlobalSettings()
ambient_color = scene_settings.GetAmbientColor()
ambient_color = (ambient_color.mRed, ambient_color.mGreen, ambient_color.mBlue)
if ambient_color[0] == 0 and ambient_color[1] == 0 and ambient_color[2] == 0:
return None
class AmbientLight:
def GetName(self):
return "AmbientLight"
node = AmbientLight()
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "AmbientLight",',
' "color" : ' + str(getHex(ambient_color)),
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
# #####################################################
# Generate - Camera Object
# #####################################################
def generate_default_camera_string(padding):
position = (100, 100, 100)
near = 0.1
far = 1000
fov = 75
output = [
'\t\t' + LabelString( 'default_camera' ) + ' : {',
' "type" : "PerspectiveCamera",',
' "fov" : ' + str(fov) + ',',
' "near" : ' + str(near) + ',',
' "far" : ' + str(far) + ',',
' "position" : ' + Vector3String( position ),
' }'
]
return generateMultiLineString( output, '\n\t\t', padding )
def generate_camera_string(node, padding):
camera = node.GetNodeAttribute()
target_node = node.GetTarget()
target = ""
if target_node:
transform = target.EvaluateLocalTransform()
target = transform.GetT()
else:
target = camera.InterestPosition.Get()
position = camera.Position.Get()
projection_types = [ "perspective", "orthogonal" ]
projection = projection_types[camera.ProjectionType.Get()]
near = camera.NearPlane.Get()
far = camera.FarPlane.Get()
output = []
if projection == "perspective":
aspect = camera.PixelAspectRatio.Get()
fov = camera.FieldOfView.Get()
fov = 75
far = 1000
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "PerspectiveCamera",',
' "fov" : ' + str(fov) + ',',
' "aspect" : ' + str(aspect) + ',',
' "near" : ' + str(near) + ',',
' "far" : ' + str(far) + ',',
' "position" : ' + Vector3String( position ) + ( ',' if node.GetChildCount() > 0 else '' )
]
elif projection == "orthogonal":
left = ""
right = ""
top = ""
bottom = ""
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "OrthographicCamera",',
' "left" : ' + left + ',',
' "right" : ' + right + ',',
' "top" : ' + top + ',',
' "bottom" : ' + bottom + ',',
' "near" : ' + str(near) + ',',
' "far" : ' + str(far) + ',',
' "position" : ' + Vector3String( position ) + ( ',' if node.GetChildCount() > 0 else '' )
]
return generateMultiLineString( output, '\n\t\t', padding )
# #####################################################
# Generate - Mesh Object
# #####################################################
def generate_mesh_object_string(node, padding):
mesh = node.GetNodeAttribute()
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
material_count = node.GetMaterialCount()
material_name = ""
if material_count > 0:
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append( getMaterialName(material) )
#If this mesh has more than one material, use a proxy material
material_name = getMaterialName( node ) if material_count > 1 else material_names[0]
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "geometry" : ' + LabelString( getGeometryName( node ) ) + ',',
' "material" : ' + LabelString( material_name ) + ',',
' "position" : ' + Vector3String( position ) + ',',
' "rotation" : ' + Vector3String( rotation ) + ',',
' "scale" : ' + Vector3String( scale ) + ',',
' "visible" : ' + getObjectVisible( node ) + ( ',' if node.GetChildCount() > 0 else '' )
]
return generateMultiLineString( output, '\n\t\t', padding )
# #####################################################
# Generate - Object
# #####################################################
def generate_object_string(node, padding):
node_types = ["Unknown", "Null", "Marker", "Skeleton", "Mesh", "Nurbs", "Patch", "Camera",
"CameraStereo", "CameraSwitcher", "Light", "OpticalReference", "OpticalMarker", "NurbsCurve",
"TrimNurbsSurface", "Boundary", "NurbsSurface", "Shape", "LODGroup", "SubDiv", "CachedEffect", "Line"]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
node_type = ""
if node.GetNodeAttribute() == None:
node_type = "Null"
else:
node_type = node_types[node.GetNodeAttribute().GetAttributeType()]
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "fbx_type" : ' + LabelString( node_type ) + ',',
' "position" : ' + Vector3String( position ) + ',',
' "rotation" : ' + Vector3String( rotation ) + ',',
' "scale" : ' + Vector3String( scale ) + ',',
' "visible" : ' + getObjectVisible( node ) + ( ',' if node.GetChildCount() > 0 else '' )
]
return generateMultiLineString( output, '\n\t\t', padding )
# #####################################################
# Parse - Objects
# #####################################################
def generate_object_hierarchy(node, object_list, pad, siblings_left):
object_count = 0
if node.GetNodeAttribute() == None:
object_string = generate_object_string(node, pad)
object_list.append(object_string)
object_count += 1
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
object_string = generate_mesh_object_string(node, pad)
object_list.append(object_string)
object_count += 1
elif attribute_type == FbxNodeAttribute.eLight:
object_string = generate_light_string(node, pad)
object_list.append(object_string)
object_count += 1
elif attribute_type == FbxNodeAttribute.eCamera:
object_string = generate_camera_string(node, pad)
object_list.append(object_string)
object_count += 1
else:
object_string = generate_object_string(node, pad)
object_list.append(object_string)
object_count += 1
if node.GetChildCount() > 0:
object_list.append( PaddingString( pad + 1 ) + '\t\t"children" : {\n' )
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_list, pad + 2, node.GetChildCount() - i - 1)
object_list.append( PaddingString( pad + 1 ) + '\t\t}' )
object_list.append( PaddingString( pad ) + '\t\t}' + (',\n' if siblings_left > 0 else ''))
return object_count
def generate_scene_objects_string(scene):
object_count = 0
object_list = []
ambient_light = generate_ambient_light_string(scene)
if ambient_light:
if scene.GetNodeCount() > 0 or option_default_light or option_default_camera:
ambient_light += (',\n')
object_list.append(ambient_light)
object_count += 1
if option_default_light:
default_light = generate_default_light_string(0)
if scene.GetNodeCount() > 0 or option_default_camera:
default_light += (',\n')
object_list.append(default_light)
object_count += 1
if option_default_camera:
default_camera = generate_default_camera_string(0)
if scene.GetNodeCount() > 0:
default_camera += (',\n')
object_list.append(default_camera)
object_count += 1
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_list, 0, node.GetChildCount() - i - 1)
return "\n".join(object_list), object_count
# #####################################################
# Parse - Scene
# #####################################################
def extract_scene(scene, filename):
objects, nobjects = generate_scene_objects_string(scene)
textures = generate_texture_list(scene)
materials = generate_material_list(scene)
geometries = generate_geometry_list(scene)
embeds = generate_embed_list(scene)
fogs = []
ntextures = len(textures)
nmaterials = len(materials)
ngeometries = len(geometries)
position = Vector3String( (0,0,0) )
rotation = Vector3String( (0,0,0) )
scale = Vector3String( (1,1,1) )
camera_names = generate_camera_name_list(scene)
scene_settings = scene.GetGlobalSettings()
bgcolor = Vector3String( (0.667,0.667,0.667) )
bgalpha = 1
defcamera = LabelString(camera_names[0] if len(camera_names) > 0 else "")
if option_default_camera:
defcamera = LabelString('default_camera')
#TODO: extract fog info from scene
deffog = LabelString("")
geometries = generateMultiLineString( geometries, ",\n\n\t", 0 )
materials = generateMultiLineString( materials, ",\n\n\t", 0 )
textures = generateMultiLineString( textures, ",\n\n\t", 0 )
embeds = generateMultiLineString( embeds, ",\n\n\t", 0 )
fogs = generateMultiLineString( fogs, ",\n\n\t", 0 )
output = [
'{',
' "metadata": {',
' "formatVersion" : 3.2,',
' "type" : "scene",',
' "generatedBy" : "convert-to-threejs.py",',
' "objects" : ' + str(nobjects) + ',',
' "geometries" : ' + str(ngeometries) + ',',
' "materials" : ' + str(nmaterials) + ',',
' "textures" : ' + str(ntextures),
' },',
'',
' "urlBaseType": "relativeToScene",',
'',
' "objects" :',
' {',
objects,
' },',
'',
' "geometries" :',
' {',
'\t' + geometries,
' },',
'',
' "materials" :',
' {',
'\t' + materials,
' },',
'',
' "textures" :',
' {',
'\t' + textures,
' },',
'',
' "embeds" :',
' {',
'\t' + embeds,
' },',
'',
' "fogs" :',
' {',
'\t' + fogs,
' },',
'',
' "transform" :',
' {',
' "position" : ' + position + ',',
' "rotation" : ' + rotation + ',',
' "scale" : ' + scale,
' },',
'',
' "defaults" :',
' {',
' "bgcolor" : ' + str(bgcolor) + ',',
' "bgalpha" : ' + str(bgalpha) + ',',
' "camera" : ' + defcamera + ',',
' "fog" : ' + deffog,
' }',
'}'
]
return "\n".join(output)
# #####################################################
# file helpers
# #####################################################
def write_file(fname, content):
out = open(fname, "w")
out.write(content)
out.close()
# #####################################################
# main
# #####################################################
if __name__ == "__main__":
from optparse import OptionParser
try:
from FbxCommon import *
except ImportError:
import platform
msg = 'Could not locate the python FBX SDK!\n'
msg += 'You need to copy the FBX SDK into your python install folder such as '
if platform.system() == 'Windows' or platform.system() == 'Microsoft':
msg += '"Python26/Lib/site-packages"'
elif platform.system() == 'Linux':
msg += '"/usr/local/lib/python2.6/site-packages"'
elif platform.system() == 'Darwin':
msg += '"/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages"'
msg += ' folder.'
print(msg)
sys.exit(1)
usage = "Usage: %prog [source_file.fbx] [output_file.js] [options]"
parser = OptionParser(usage=usage)
parser.add_option('-t', '--triangulate', action='store_true', dest='triangulate', help="force quad geometry into triangles", default=False)
parser.add_option('-x', '--no-textures', action='store_true', dest='notextures', help="don't include texture references in output file", default=False)
parser.add_option('-p', '--no-prefix', action='store_true', dest='noprefix', help="don't prefix object names in output file", default=False)
parser.add_option('-g', '--geometry-only', action='store_true', dest='geometry', help="output geometry only", default=False)
parser.add_option('-c', '--default-camera', action='store_true', dest='defcamera', help="include default camera in output scene", default=False)
parser.add_option('-l', '--defualt-light', action='store_true', dest='deflight', help="include default light in output scene", default=False)
(options, args) = parser.parse_args()
option_triangulate = options.triangulate
option_textures = True if not options.notextures else False
option_prefix = True if not options.noprefix else False
option_geometry = options.geometry
option_default_camera = options.defcamera
option_default_light = options.deflight
# Prepare the FBX SDK.
sdk_manager, scene = InitializeSdkObjects()
converter = FbxGeometryConverter(sdk_manager)
# The converter takes an FBX file as an argument.
if len(args) > 1:
print("\nLoading file: %s" % args[0])
result = LoadScene(sdk_manager, scene, args[0])
else:
result = False
print("\nUsage: convert_fbx_to_threejs [source_file.fbx] [output_file.js]\n")
if not result:
print("\nAn error occurred while loading the file...")
else:
if option_triangulate:
print("\nForcing geometry to triangles")
triangulate_scene(scene)
output_content = extract_scene(scene, os.path.basename(args[0]))
output_path = os.path.join(os.getcwd(), args[1])
write_file(output_path, output_content)
print("\nExported Three.js file to:\n%s\n" % output_path)
# SaveScene(sdk_manager, scene, args[2], 8)
# Destroy all objects created by the FBX SDK.
sdk_manager.Destroy()
sys.exit(0)
|
de
| 0.339533
|
# @author zfedoran / http://github.com/zfedoran # ##################################################### # Globals # ##################################################### # ##################################################### # Templates # ##################################################### # ##################################################### # Helpers # ##################################################### # ##################################################### # Generate - Triangles # ##################################################### # ##################################################### # Generate - Material String # ##################################################### #Here we have to check if it's layeredtextures, or just textures: # no layered texture simply get on the property #Get the implementation to see if it's a hardware shader. # This material is a hardware shader, skip it # ##################################################### # Parse - Materials # ##################################################### #Materials are in an undefined external table # ##################################################### # Generate - Texture String # ##################################################### # ##################################################### # Parse - Textures # ##################################################### #Here we have to check if it's layeredtextures, or just textures: # no layered texture simply get on the property #for all materials attached to this mesh #go through all the possible textures types # ##################################################### # Generate - Mesh String # ##################################################### # eNone The mapping is undetermined. # eByControlPoint There will be one mapping coordinate for each surface control point/vertex. # eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part. # eByPolygon There can be only one mapping coordinate for the whole polygon. # eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements. # eAllSame There can be only one mapping coordinate for the whole surface. # values # indices # eNone The mapping is undetermined. # eByControlPoint There will be one mapping coordinate for each surface control point/vertex. # eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part. # eByPolygon There can be only one mapping coordinate for the whole polygon. # eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements. # eAllSame There can be only one mapping coordinate for the whole surface. # values # indices # eNone The mapping is undetermined. # eByControlPoint There will be one mapping coordinate for each surface control point/vertex. # eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part. # eByPolygon There can be only one mapping coordinate for the whole polygon. # eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements. # eAllSame There can be only one mapping coordinate for the whole surface. # values # indices # don't export any face normals (as they are computed in engine) # order is important, must match order in JSONLoader # face type # vertex indices # material index # face uvs index # face vertex uvs indices # face color index # face vertex colors indices # must clamp in case on polygons bigger than quads # Three.js only supports one layer of normals # Three.js only supports one layer of colors # ##################################################### # Generate - Embeds # ##################################################### # ##################################################### # Generate - Geometries # ##################################################### # ##################################################### # Generate - Camera Names # ##################################################### # ##################################################### # Generate - Light Object # ##################################################### # ##################################################### # Generate - Camera Object # ##################################################### # ##################################################### # Generate - Mesh Object # ##################################################### #Materials are in an undefined external table #If this mesh has more than one material, use a proxy material # ##################################################### # Generate - Object # ##################################################### # ##################################################### # Parse - Objects # ##################################################### # ##################################################### # Parse - Scene # ##################################################### #TODO: extract fog info from scene # ##################################################### # file helpers # ##################################################### # ##################################################### # main # ##################################################### # Prepare the FBX SDK. # The converter takes an FBX file as an argument. # SaveScene(sdk_manager, scene, args[2], 8) # Destroy all objects created by the FBX SDK.
| 2.500175
| 3
|
fj_scraper.py
|
diegulio/FirstJob-Scraper
| 0
|
6628904
|
<reponame>diegulio/FirstJob-Scraper
# -*- coding: utf-8 -*-
import re
# Selenium desde google colab (Webdriver)
from selenium import webdriver
chrome_options = webdriver.ChromeOptions()
# Ignoro Deprecation warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore")
wd = webdriver.Chrome(options= chrome_options, executable_path = '/Users/diegulio/Desktop/FJ-Scraper/chromedriver')
def get_company(job):
'''
Función que extrae la compañia del trabajo
Parameters
----------
job : Elemento html
Trabajo.
Returns
-------
company : str
Compañia a la cual pertenece el trabajo.
'''
info = job.find_element_by_tag_name('h5').text
company = re.search(r'(.*),', info).group(1) # Extraigo todo lo que está antes de la coma excluyendola
return company
def get_type(job):
'''
Función que extrae el tipo de la publicación.
(trabajo o práctica)
Parameters
----------
job : Elemento html
Trabajo.
Returns
-------
type_ : str
tipo de trabajo a la cual pertenece el trabajo.
'''
type_ = job.find_element_by_class_name('label-fj-type').text
return type_
def get_link(job):
'''
Función que extrae el link del trabajo
Parameters
----------
job : Elemento html
Trabajo.
Returns
-------
link : str
link del trabajo
'''
a = job.find_element_by_tag_name('a')
link = a.get_attribute('href')
return link
def fill_form(driver, position_element):
'''
Función que rellena el formulario con la data
Parameters
----------
driver : TYPE
Driver Navegador.
position_element : str
Palabra clave a buscar.
Returns
-------
driver : TYPE
Driver de navegador con el formulario rellenado.
'''
driver.find_element_by_id("title").send_keys(position_element) # relleno el text box 'Buscar por nombre del cargo: {position_element}'
driver.find_element_by_id("filter_type_work_1").click() # Checkeo el Check-box de prácticas
driver.find_element_by_tag_name('button').click() # Clickeo botón submit
return driver
def reset_form(driver, url):
'''
Función que resetea el drive a la página principal
Parameters
----------
driver : TYPE
Driver del navegador.
url : TYPE
Url a la cual se reiniciará.
Returns
-------
driver : TYPE
Nuevo driver seteado en el url.
'''
fj_url = url
driver.get(fj_url)
return driver
def get_time(job):
'''
Función que extrae el el tiempo de la publicación
de trabajo
Parameters
----------
job : Elemento html
Trabajo.
Returns
-------
num : int
numero de el tiempo
time : str
tiempo (hora, dias, meses,etc)
'''
info = job.find_element_by_tag_name('h5').text
# retornare una lista con un numero y un tiempo ej: [2,dias] -> publicada hace 2 dias
num = re.search(r'\s(\d*)\s', info).group(1)
time = re.search(r'\d\s(\w*)', info).group(1)
return (num,time)
def get_title(job):
'''
Función que extrae el titulo del trabajo
Parameters
----------
job : Elemento html
Trabajo.
Returns
-------
title : str
titulo del trabajo
'''
title = job.find_element_by_tag_name('h4').text
return title
def get_jobs(driver):
'''
Función que obtiene todas las publicaciones de trabajo y muestra info.
Esta las identifico ya que se encuentran en elementos html
de clase col-xs-12
Parameters
----------
driver : TYPE
Driver del navegador.
Returns
-------
None.
'''
# Trabajos que aparecen en la página principal
jobs = driver.find_elements_by_class_name('col-xs-12')
for job in jobs:
try:
head = job.find_element_by_class_name('hgroup')
print(head.find_element_by_tag_name('h4').text)
print(head.find_element_by_tag_name('h5').text)
print(get_company(job))
print(get_type(job))
print(get_link(job))
print(get_time(job))
print(get_title(job))
print('-----')
except:
pass
class internship():
# Clase de práctica
def __init__(self, title, company, type_, link, time):
self.title = title
self.company = company
self.type_ = type_
self.link = link
self.time = time
# Realizar conexión
wd = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
fj_url = 'https://firstjob.me/ofertas'
wd.get(fj_url)
def recommendation(interest_areas):
'''
Función que entrega las recomendaciones de todas las áreas de interés deseadas
Args:
------
* interest_areas: Áreas de interés del usuario. List
Outputs:
---------
* recommends: Diccionario con áreas como keys y una lista de objetos de prácticas
como values. Dict
* jobs_count: Contador de trabajos recomendados. Int
'''
# Conexión al driver
wd = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
# Se fija Url First Job Ofertas
fj_url = 'https://firstjob.me/ofertas'
wd.get(fj_url)
recommends = {} # Diccionario de recomendaciones finales
recommended_links = [] # Todos los links de las prácticas escogidas (esto se usa
# para descartar duplicados)
jobs_count = 0 # contador de practicas encontradas
for area in interest_areas:
# Se rellena el formulario con el area
form_driver = fill_form(wd, area)
internships = [] # practicas del area
jobs = form_driver.find_elements_by_class_name('col-xs-12') # Los trabajos vienen con esta clase
for job in jobs:
try: # Probamos, porque no todos son trabajos
title = get_title(job) # titulo
company = get_company(job) # empresa
type_ = get_type(job) # trabajo o práctica
link = get_link(job) # link
num_time,time = get_time(job) # [numero, tiempo] e.g [3, días]
# Se supone que todas son prácticas ya que se
# indicó en el formulario
# Se analiza el tiempo, sólo me interesará aquellas publicadas en 'horas'
# o 'día' (en first job 'día' -> 1 día, cuando son más le ponen 'días')
if time in ['horas', 'día']:
if link not in recommended_links: # si aún no se ha recomendado el link
# Se crea objeto practica con sus datos
inship = internship(title, company, type_, link, (num_time,time))
# Se agrega a las practicas del área
internships.append(inship)
# Lo agrego a la lista global de links
# para descartar duplicados
recommended_links.append(link)
jobs_count += 1
except: # Si no es un trabajo
pass # no se hace nada
# Se agregan practicas a las recomendaciones
recommends[area] = internships
# Se resetea el form(driver)
wd = reset_form(wd, fj_url)
return recommends, jobs_count
# Prueba
interest_areas = ['data', 'supply', '']
rec, count = recommendation(interest_areas)
|
# -*- coding: utf-8 -*-
import re
# Selenium desde google colab (Webdriver)
from selenium import webdriver
chrome_options = webdriver.ChromeOptions()
# Ignoro Deprecation warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore")
wd = webdriver.Chrome(options= chrome_options, executable_path = '/Users/diegulio/Desktop/FJ-Scraper/chromedriver')
def get_company(job):
'''
Función que extrae la compañia del trabajo
Parameters
----------
job : Elemento html
Trabajo.
Returns
-------
company : str
Compañia a la cual pertenece el trabajo.
'''
info = job.find_element_by_tag_name('h5').text
company = re.search(r'(.*),', info).group(1) # Extraigo todo lo que está antes de la coma excluyendola
return company
def get_type(job):
'''
Función que extrae el tipo de la publicación.
(trabajo o práctica)
Parameters
----------
job : Elemento html
Trabajo.
Returns
-------
type_ : str
tipo de trabajo a la cual pertenece el trabajo.
'''
type_ = job.find_element_by_class_name('label-fj-type').text
return type_
def get_link(job):
'''
Función que extrae el link del trabajo
Parameters
----------
job : Elemento html
Trabajo.
Returns
-------
link : str
link del trabajo
'''
a = job.find_element_by_tag_name('a')
link = a.get_attribute('href')
return link
def fill_form(driver, position_element):
'''
Función que rellena el formulario con la data
Parameters
----------
driver : TYPE
Driver Navegador.
position_element : str
Palabra clave a buscar.
Returns
-------
driver : TYPE
Driver de navegador con el formulario rellenado.
'''
driver.find_element_by_id("title").send_keys(position_element) # relleno el text box 'Buscar por nombre del cargo: {position_element}'
driver.find_element_by_id("filter_type_work_1").click() # Checkeo el Check-box de prácticas
driver.find_element_by_tag_name('button').click() # Clickeo botón submit
return driver
def reset_form(driver, url):
'''
Función que resetea el drive a la página principal
Parameters
----------
driver : TYPE
Driver del navegador.
url : TYPE
Url a la cual se reiniciará.
Returns
-------
driver : TYPE
Nuevo driver seteado en el url.
'''
fj_url = url
driver.get(fj_url)
return driver
def get_time(job):
'''
Función que extrae el el tiempo de la publicación
de trabajo
Parameters
----------
job : Elemento html
Trabajo.
Returns
-------
num : int
numero de el tiempo
time : str
tiempo (hora, dias, meses,etc)
'''
info = job.find_element_by_tag_name('h5').text
# retornare una lista con un numero y un tiempo ej: [2,dias] -> publicada hace 2 dias
num = re.search(r'\s(\d*)\s', info).group(1)
time = re.search(r'\d\s(\w*)', info).group(1)
return (num,time)
def get_title(job):
'''
Función que extrae el titulo del trabajo
Parameters
----------
job : Elemento html
Trabajo.
Returns
-------
title : str
titulo del trabajo
'''
title = job.find_element_by_tag_name('h4').text
return title
def get_jobs(driver):
'''
Función que obtiene todas las publicaciones de trabajo y muestra info.
Esta las identifico ya que se encuentran en elementos html
de clase col-xs-12
Parameters
----------
driver : TYPE
Driver del navegador.
Returns
-------
None.
'''
# Trabajos que aparecen en la página principal
jobs = driver.find_elements_by_class_name('col-xs-12')
for job in jobs:
try:
head = job.find_element_by_class_name('hgroup')
print(head.find_element_by_tag_name('h4').text)
print(head.find_element_by_tag_name('h5').text)
print(get_company(job))
print(get_type(job))
print(get_link(job))
print(get_time(job))
print(get_title(job))
print('-----')
except:
pass
class internship():
# Clase de práctica
def __init__(self, title, company, type_, link, time):
self.title = title
self.company = company
self.type_ = type_
self.link = link
self.time = time
# Realizar conexión
wd = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
fj_url = 'https://firstjob.me/ofertas'
wd.get(fj_url)
def recommendation(interest_areas):
'''
Función que entrega las recomendaciones de todas las áreas de interés deseadas
Args:
------
* interest_areas: Áreas de interés del usuario. List
Outputs:
---------
* recommends: Diccionario con áreas como keys y una lista de objetos de prácticas
como values. Dict
* jobs_count: Contador de trabajos recomendados. Int
'''
# Conexión al driver
wd = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
# Se fija Url First Job Ofertas
fj_url = 'https://firstjob.me/ofertas'
wd.get(fj_url)
recommends = {} # Diccionario de recomendaciones finales
recommended_links = [] # Todos los links de las prácticas escogidas (esto se usa
# para descartar duplicados)
jobs_count = 0 # contador de practicas encontradas
for area in interest_areas:
# Se rellena el formulario con el area
form_driver = fill_form(wd, area)
internships = [] # practicas del area
jobs = form_driver.find_elements_by_class_name('col-xs-12') # Los trabajos vienen con esta clase
for job in jobs:
try: # Probamos, porque no todos son trabajos
title = get_title(job) # titulo
company = get_company(job) # empresa
type_ = get_type(job) # trabajo o práctica
link = get_link(job) # link
num_time,time = get_time(job) # [numero, tiempo] e.g [3, días]
# Se supone que todas son prácticas ya que se
# indicó en el formulario
# Se analiza el tiempo, sólo me interesará aquellas publicadas en 'horas'
# o 'día' (en first job 'día' -> 1 día, cuando son más le ponen 'días')
if time in ['horas', 'día']:
if link not in recommended_links: # si aún no se ha recomendado el link
# Se crea objeto practica con sus datos
inship = internship(title, company, type_, link, (num_time,time))
# Se agrega a las practicas del área
internships.append(inship)
# Lo agrego a la lista global de links
# para descartar duplicados
recommended_links.append(link)
jobs_count += 1
except: # Si no es un trabajo
pass # no se hace nada
# Se agregan practicas a las recomendaciones
recommends[area] = internships
# Se resetea el form(driver)
wd = reset_form(wd, fj_url)
return recommends, jobs_count
# Prueba
interest_areas = ['data', 'supply', '']
rec, count = recommendation(interest_areas)
|
es
| 0.927458
|
# -*- coding: utf-8 -*- # Selenium desde google colab (Webdriver) # Ignoro Deprecation warnings Función que extrae la compañia del trabajo Parameters ---------- job : Elemento html Trabajo. Returns ------- company : str Compañia a la cual pertenece el trabajo. # Extraigo todo lo que está antes de la coma excluyendola Función que extrae el tipo de la publicación. (trabajo o práctica) Parameters ---------- job : Elemento html Trabajo. Returns ------- type_ : str tipo de trabajo a la cual pertenece el trabajo. Función que extrae el link del trabajo Parameters ---------- job : Elemento html Trabajo. Returns ------- link : str link del trabajo Función que rellena el formulario con la data Parameters ---------- driver : TYPE Driver Navegador. position_element : str Palabra clave a buscar. Returns ------- driver : TYPE Driver de navegador con el formulario rellenado. # relleno el text box 'Buscar por nombre del cargo: {position_element}' # Checkeo el Check-box de prácticas # Clickeo botón submit Función que resetea el drive a la página principal Parameters ---------- driver : TYPE Driver del navegador. url : TYPE Url a la cual se reiniciará. Returns ------- driver : TYPE Nuevo driver seteado en el url. Función que extrae el el tiempo de la publicación de trabajo Parameters ---------- job : Elemento html Trabajo. Returns ------- num : int numero de el tiempo time : str tiempo (hora, dias, meses,etc) # retornare una lista con un numero y un tiempo ej: [2,dias] -> publicada hace 2 dias Función que extrae el titulo del trabajo Parameters ---------- job : Elemento html Trabajo. Returns ------- title : str titulo del trabajo Función que obtiene todas las publicaciones de trabajo y muestra info. Esta las identifico ya que se encuentran en elementos html de clase col-xs-12 Parameters ---------- driver : TYPE Driver del navegador. Returns ------- None. # Trabajos que aparecen en la página principal # Clase de práctica # Realizar conexión Función que entrega las recomendaciones de todas las áreas de interés deseadas Args: ------ * interest_areas: Áreas de interés del usuario. List Outputs: --------- * recommends: Diccionario con áreas como keys y una lista de objetos de prácticas como values. Dict * jobs_count: Contador de trabajos recomendados. Int # Conexión al driver # Se fija Url First Job Ofertas # Diccionario de recomendaciones finales # Todos los links de las prácticas escogidas (esto se usa # para descartar duplicados) # contador de practicas encontradas # Se rellena el formulario con el area # practicas del area # Los trabajos vienen con esta clase # Probamos, porque no todos son trabajos # titulo # empresa # trabajo o práctica # link # [numero, tiempo] e.g [3, días] # Se supone que todas son prácticas ya que se # indicó en el formulario # Se analiza el tiempo, sólo me interesará aquellas publicadas en 'horas' # o 'día' (en first job 'día' -> 1 día, cuando son más le ponen 'días') # si aún no se ha recomendado el link # Se crea objeto practica con sus datos # Se agrega a las practicas del área # Lo agrego a la lista global de links # para descartar duplicados # Si no es un trabajo # no se hace nada # Se agregan practicas a las recomendaciones # Se resetea el form(driver) # Prueba
| 2.704255
| 3
|
chatify/settings/production.py
|
prashg008/chatify
| 1
|
6628905
|
from .base import * # noqa
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = os.getenv("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = os.getenv("DJANGO_ALLOWED_HOSTS", ["prashanthg.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES = {
'default': {
'ENGINE': '{0}'.format(os.getenv('DATABASE_ENGINE', 'django.db.backends.sqlite3')),
# for postgres django.db.backends.postgresql
'NAME': '{0}'.format(os.getenv('DATABASE_NAME', 'chatify')),
'USER': '{0}'.format(os.getenv('DATABASE_USER')),
'PASSWORD': '{0}'.format(os.getenv('DATABASE_PASSWORD')),
'HOST': '{0}'.format(os.getenv('DATABASE_HOST')),
'PORT': '{0}'.format(os.getenv('DATABASE_PORT')),
}
} # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = os.getenv("CONN_MAX_AGE", 60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": os.getenv("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = os.getenv("DJANGO_SECURE_SSL_REDIRECT", True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = os.getenv(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = os.getenv("DJANGO_SECURE_HSTS_PRELOAD", True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = os.getenv(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = os.getenv("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = os.getenv("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = os.getenv("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = os.getenv("DJANGO_AWS_S3_REGION_NAME", None)
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = "temp.utils.storages.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# webpack
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'static/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.production.json'),
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = os.getenv(
"chatify", "chatify <<EMAIL>>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = os.getenv("DJANGO_SERVER_EMAIL", DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = os.getenv(
"DJANGO_EMAIL_SUBJECT_PREFIX", "[chatify]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = os.getenv("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps/mailgun/
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
ANYMAIL = {
"MAILGUN_API_KEY": os.getenv("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": os.getenv("MAILGUN_DOMAIN"),
"MAILGUN_API_URL": os.getenv("MAILGUN_API_URL", "https://api.mailgun.net/v3"),
}
|
from .base import * # noqa
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = os.getenv("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = os.getenv("DJANGO_ALLOWED_HOSTS", ["prashanthg.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES = {
'default': {
'ENGINE': '{0}'.format(os.getenv('DATABASE_ENGINE', 'django.db.backends.sqlite3')),
# for postgres django.db.backends.postgresql
'NAME': '{0}'.format(os.getenv('DATABASE_NAME', 'chatify')),
'USER': '{0}'.format(os.getenv('DATABASE_USER')),
'PASSWORD': '{0}'.format(os.getenv('DATABASE_PASSWORD')),
'HOST': '{0}'.format(os.getenv('DATABASE_HOST')),
'PORT': '{0}'.format(os.getenv('DATABASE_PORT')),
}
} # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = os.getenv("CONN_MAX_AGE", 60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": os.getenv("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = os.getenv("DJANGO_SECURE_SSL_REDIRECT", True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = os.getenv(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = os.getenv("DJANGO_SECURE_HSTS_PRELOAD", True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = os.getenv(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = os.getenv("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = os.getenv("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = os.getenv("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = os.getenv("DJANGO_AWS_S3_REGION_NAME", None)
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = "temp.utils.storages.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# webpack
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'static/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.production.json'),
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = os.getenv(
"chatify", "chatify <<EMAIL>>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = os.getenv("DJANGO_SERVER_EMAIL", DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = os.getenv(
"DJANGO_EMAIL_SUBJECT_PREFIX", "[chatify]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = os.getenv("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps/mailgun/
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
ANYMAIL = {
"MAILGUN_API_KEY": os.getenv("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": os.getenv("MAILGUN_DOMAIN"),
"MAILGUN_API_URL": os.getenv("MAILGUN_API_URL", "https://api.mailgun.net/v3"),
}
|
en
| 0.49818
|
# noqa # GENERAL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts # DATABASES # ------------------------------------------------------------------------------ # for postgres django.db.backends.postgresql # noqa F405 # noqa F405 # noqa F405 # CACHES # ------------------------------------------------------------------------------ # Mimicing memcache behavior. # http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior # SECURITY # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header # https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect # https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure # https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure # https://docs.djangoproject.com/en/dev/topics/security/#ssl-https # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds # TODO: set this to 60 seconds first and then to 518400 once you prove the former works # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload # https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff # STORAGES # ------------------------------------------------------------------------------ # https://django-storages.readthedocs.io/en/latest/#installation # noqa F405 # https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings # https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings # https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings # https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings # DO NOT change these unless you know what you're doing. # https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings # https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings # https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings # STATIC # ------------------------ # MEDIA # ------------------------------------------------------------------------------ # TEMPLATES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#templates # type: ignore[index] # noqa F405 # webpack # EMAIL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email # https://docs.djangoproject.com/en/dev/ref/settings/#server-email # https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix # ADMIN # ------------------------------------------------------------------------------ # Django Admin URL regex. # Anymail # ------------------------------------------------------------------------------ # https://anymail.readthedocs.io/en/stable/installation/#installing-anymail # noqa F405 # https://docs.djangoproject.com/en/dev/ref/settings/#email-backend # https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference # https://anymail.readthedocs.io/en/stable/esps/mailgun/
| 2.03121
| 2
|
CodeWars/Fundamentals/CompleteThePattern/CompleteThePattern2.py
|
CajetanP/programming-exercises
| 1
|
6628906
|
<filename>CodeWars/Fundamentals/CompleteThePattern/CompleteThePattern2.py
def pattern(n):
if n < 1:
return ""
res = ""
for i in range(0, n):
for j in range(n, i, -1):
res += str(j)
res += '\n'
if res.endswith('\n'):
res = res[:-1]
return res
print(pattern(4))
print(pattern(11))
|
<filename>CodeWars/Fundamentals/CompleteThePattern/CompleteThePattern2.py
def pattern(n):
if n < 1:
return ""
res = ""
for i in range(0, n):
for j in range(n, i, -1):
res += str(j)
res += '\n'
if res.endswith('\n'):
res = res[:-1]
return res
print(pattern(4))
print(pattern(11))
|
none
| 1
| 3.794535
| 4
|
|
thumbor/filters/watermark.py
|
bear8421/thumbor
| 1
|
6628907
|
<filename>thumbor/filters/watermark.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com <EMAIL>
# pylint: disable=invalid-name
import math
import re
import tornado.gen
from thumbor.ext.filters import _alpha
from thumbor.filters import BaseFilter, filter_method
from thumbor.loaders import LoaderResult
from thumbor.utils import logger
class Filter(BaseFilter):
@staticmethod
def detect_and_get_ratio_position(pos, length):
match = re.match("^(-?)([0-9]+)p$", pos)
if not match:
return pos
sign, ratio = match.groups()
new_pos = round(length * float(ratio) / 100 + 1e-5)
return f"{sign}{new_pos}"
@staticmethod
def calc_watermark_size(image_size, watermark_sz, w_ratio, h_ratio):
wm_max_width = image_size[0] * w_ratio if w_ratio else None
wm_max_height = image_size[1] * h_ratio if h_ratio else None
if not wm_max_width:
wm_max_width = watermark_sz[0] * wm_max_height / watermark_sz[1]
if not wm_max_height:
wm_max_height = watermark_sz[1] * wm_max_width / watermark_sz[0]
if watermark_sz[0] / wm_max_width >= watermark_sz[1] / wm_max_height:
wm_height = round(watermark_sz[1] * wm_max_width / watermark_sz[0])
wm_width = round(wm_max_width)
else:
wm_height = round(wm_max_height)
wm_width = round(watermark_sz[0] * wm_max_height / watermark_sz[1])
return (wm_width, wm_height)
# TODO: refactor this
def on_image_ready( # pylint: disable=too-many-statements,too-many-branches,too-many-locals
self, buffer
):
self.watermark_engine.load(buffer, None)
self.watermark_engine.enable_alpha()
mode, data = self.watermark_engine.image_data_as_rgb()
imgdata = _alpha.apply(mode, self.alpha, data)
self.watermark_engine.set_image_data(imgdata)
image_size = self.engine.size
watermark_sz = self.watermark_engine.size
if self.w_ratio or self.h_ratio:
watermark_sz = self.calc_watermark_size(
image_size, watermark_sz, self.w_ratio, self.h_ratio
)
self.watermark_engine.resize(watermark_sz[0], watermark_sz[1])
self.x = self.detect_and_get_ratio_position(self.x, image_size[0])
self.y = self.detect_and_get_ratio_position(self.y, image_size[1])
mos_x = self.x == "repeat"
mos_y = self.y == "repeat"
center_x = self.x == "center"
center_y = self.y == "center"
if not center_x and not mos_x:
inv_x = self.x[0] == "-"
x = int(self.x)
if not center_y and not mos_y:
inv_y = self.y[0] == "-"
y = int(self.y)
if not mos_x:
repeat_x = (1, 0)
if center_x:
x = int((image_size[0] - watermark_sz[0]) / 2)
elif inv_x:
x = int((image_size[0] - watermark_sz[0]) + x)
else:
repeat_x = divmod(image_size[0], watermark_sz[0])
if image_size[0] * 1.0 / watermark_sz[0] < 2:
repeat_x = (
math.ceil(image_size[0] * 1.0 / watermark_sz[0]),
10,
)
space_x = 10
if not mos_y:
repeat_y = (1, 0)
if center_y:
y = int((image_size[1] - watermark_sz[1]) / 2)
elif inv_y:
y = int((image_size[1] - watermark_sz[1]) + y)
else:
repeat_y = divmod(image_size[1], watermark_sz[1])
if image_size[1] * 1.0 / watermark_sz[1] < 2:
repeat_y = (
math.ceil(image_size[1] * 1.0 / watermark_sz[1]),
10,
)
space_y = 10
if not mos_x and not mos_y:
self.engine.paste(self.watermark_engine, (x, y), merge=True)
elif mos_x and mos_y:
if (repeat_x[0] * repeat_y[0]) > 100:
tmpRepeatX = min(6, repeat_x[0])
tmpRepeatY = min(6, repeat_y[0])
repeat_x = (
tmpRepeatX,
image_size[0] - tmpRepeatX * watermark_sz[0],
)
repeat_y = (
tmpRepeatY,
image_size[1] - tmpRepeatY * watermark_sz[1],
)
space_x = repeat_x[1] // (max(repeat_x[0], 2) - 1)
space_y = repeat_y[1] // (max(repeat_y[0], 2) - 1)
for i in range(int(repeat_x[0])):
x = i * space_x + i * watermark_sz[0]
for j in range(int(repeat_y[0])):
y = j * space_y + j * watermark_sz[1]
self.engine.paste(
self.watermark_engine, (x, y), merge=True
)
elif mos_x:
space_x = repeat_x[1] // (max(repeat_x[0], 2) - 1)
for i in range(int(repeat_x[0])):
x = i * space_x + i * watermark_sz[0]
self.engine.paste(self.watermark_engine, (x, y), merge=True)
else:
space_y = repeat_y[1] // (max(repeat_y[0], 2) - 1)
for j in range(int(repeat_y[0])):
y = j * space_y + j * watermark_sz[1]
self.engine.paste(self.watermark_engine, (x, y), merge=True)
@filter_method(
BaseFilter.String,
r"(?:-?\d+p?)|center|repeat",
r"(?:-?\d+p?)|center|repeat",
BaseFilter.PositiveNumber,
r"(?:-?\d+)|none",
r"(?:-?\d+)|none",
)
async def watermark(self, url, x, y, alpha, w_ratio=False, h_ratio=False):
self.url = url
self.x = x
self.y = y
self.alpha = alpha
self.w_ratio = (
float(w_ratio) / 100.0 if w_ratio and w_ratio != "none" else False
)
self.h_ratio = (
float(h_ratio) / 100.0 if h_ratio and h_ratio != "none" else False
)
self.watermark_engine = self.context.modules.engine.__class__(
self.context
)
self.storage = self.context.modules.storage
try:
buffer = await self.storage.get(self.url)
if buffer is not None:
return self.on_image_ready(buffer)
result = await self.context.modules.loader.load(
self.context, self.url
)
if isinstance(result, LoaderResult) and not result.successful:
logger.warning(
"bad watermark result error=%s metadata=%s",
result.error,
result.metadata,
)
raise tornado.web.HTTPError(400)
if isinstance(result, LoaderResult):
buffer = result.buffer
else:
buffer = result
await self.storage.put(self.url, buffer)
await self.storage.put_crypto(self.url)
self.on_image_ready(buffer)
except Exception as error:
if isinstance(error, tornado.web.HTTPError):
raise error
logger.warning("bad watermark")
raise tornado.web.HTTPError(500)
|
<filename>thumbor/filters/watermark.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com <EMAIL>
# pylint: disable=invalid-name
import math
import re
import tornado.gen
from thumbor.ext.filters import _alpha
from thumbor.filters import BaseFilter, filter_method
from thumbor.loaders import LoaderResult
from thumbor.utils import logger
class Filter(BaseFilter):
@staticmethod
def detect_and_get_ratio_position(pos, length):
match = re.match("^(-?)([0-9]+)p$", pos)
if not match:
return pos
sign, ratio = match.groups()
new_pos = round(length * float(ratio) / 100 + 1e-5)
return f"{sign}{new_pos}"
@staticmethod
def calc_watermark_size(image_size, watermark_sz, w_ratio, h_ratio):
wm_max_width = image_size[0] * w_ratio if w_ratio else None
wm_max_height = image_size[1] * h_ratio if h_ratio else None
if not wm_max_width:
wm_max_width = watermark_sz[0] * wm_max_height / watermark_sz[1]
if not wm_max_height:
wm_max_height = watermark_sz[1] * wm_max_width / watermark_sz[0]
if watermark_sz[0] / wm_max_width >= watermark_sz[1] / wm_max_height:
wm_height = round(watermark_sz[1] * wm_max_width / watermark_sz[0])
wm_width = round(wm_max_width)
else:
wm_height = round(wm_max_height)
wm_width = round(watermark_sz[0] * wm_max_height / watermark_sz[1])
return (wm_width, wm_height)
# TODO: refactor this
def on_image_ready( # pylint: disable=too-many-statements,too-many-branches,too-many-locals
self, buffer
):
self.watermark_engine.load(buffer, None)
self.watermark_engine.enable_alpha()
mode, data = self.watermark_engine.image_data_as_rgb()
imgdata = _alpha.apply(mode, self.alpha, data)
self.watermark_engine.set_image_data(imgdata)
image_size = self.engine.size
watermark_sz = self.watermark_engine.size
if self.w_ratio or self.h_ratio:
watermark_sz = self.calc_watermark_size(
image_size, watermark_sz, self.w_ratio, self.h_ratio
)
self.watermark_engine.resize(watermark_sz[0], watermark_sz[1])
self.x = self.detect_and_get_ratio_position(self.x, image_size[0])
self.y = self.detect_and_get_ratio_position(self.y, image_size[1])
mos_x = self.x == "repeat"
mos_y = self.y == "repeat"
center_x = self.x == "center"
center_y = self.y == "center"
if not center_x and not mos_x:
inv_x = self.x[0] == "-"
x = int(self.x)
if not center_y and not mos_y:
inv_y = self.y[0] == "-"
y = int(self.y)
if not mos_x:
repeat_x = (1, 0)
if center_x:
x = int((image_size[0] - watermark_sz[0]) / 2)
elif inv_x:
x = int((image_size[0] - watermark_sz[0]) + x)
else:
repeat_x = divmod(image_size[0], watermark_sz[0])
if image_size[0] * 1.0 / watermark_sz[0] < 2:
repeat_x = (
math.ceil(image_size[0] * 1.0 / watermark_sz[0]),
10,
)
space_x = 10
if not mos_y:
repeat_y = (1, 0)
if center_y:
y = int((image_size[1] - watermark_sz[1]) / 2)
elif inv_y:
y = int((image_size[1] - watermark_sz[1]) + y)
else:
repeat_y = divmod(image_size[1], watermark_sz[1])
if image_size[1] * 1.0 / watermark_sz[1] < 2:
repeat_y = (
math.ceil(image_size[1] * 1.0 / watermark_sz[1]),
10,
)
space_y = 10
if not mos_x and not mos_y:
self.engine.paste(self.watermark_engine, (x, y), merge=True)
elif mos_x and mos_y:
if (repeat_x[0] * repeat_y[0]) > 100:
tmpRepeatX = min(6, repeat_x[0])
tmpRepeatY = min(6, repeat_y[0])
repeat_x = (
tmpRepeatX,
image_size[0] - tmpRepeatX * watermark_sz[0],
)
repeat_y = (
tmpRepeatY,
image_size[1] - tmpRepeatY * watermark_sz[1],
)
space_x = repeat_x[1] // (max(repeat_x[0], 2) - 1)
space_y = repeat_y[1] // (max(repeat_y[0], 2) - 1)
for i in range(int(repeat_x[0])):
x = i * space_x + i * watermark_sz[0]
for j in range(int(repeat_y[0])):
y = j * space_y + j * watermark_sz[1]
self.engine.paste(
self.watermark_engine, (x, y), merge=True
)
elif mos_x:
space_x = repeat_x[1] // (max(repeat_x[0], 2) - 1)
for i in range(int(repeat_x[0])):
x = i * space_x + i * watermark_sz[0]
self.engine.paste(self.watermark_engine, (x, y), merge=True)
else:
space_y = repeat_y[1] // (max(repeat_y[0], 2) - 1)
for j in range(int(repeat_y[0])):
y = j * space_y + j * watermark_sz[1]
self.engine.paste(self.watermark_engine, (x, y), merge=True)
@filter_method(
BaseFilter.String,
r"(?:-?\d+p?)|center|repeat",
r"(?:-?\d+p?)|center|repeat",
BaseFilter.PositiveNumber,
r"(?:-?\d+)|none",
r"(?:-?\d+)|none",
)
async def watermark(self, url, x, y, alpha, w_ratio=False, h_ratio=False):
self.url = url
self.x = x
self.y = y
self.alpha = alpha
self.w_ratio = (
float(w_ratio) / 100.0 if w_ratio and w_ratio != "none" else False
)
self.h_ratio = (
float(h_ratio) / 100.0 if h_ratio and h_ratio != "none" else False
)
self.watermark_engine = self.context.modules.engine.__class__(
self.context
)
self.storage = self.context.modules.storage
try:
buffer = await self.storage.get(self.url)
if buffer is not None:
return self.on_image_ready(buffer)
result = await self.context.modules.loader.load(
self.context, self.url
)
if isinstance(result, LoaderResult) and not result.successful:
logger.warning(
"bad watermark result error=%s metadata=%s",
result.error,
result.metadata,
)
raise tornado.web.HTTPError(400)
if isinstance(result, LoaderResult):
buffer = result.buffer
else:
buffer = result
await self.storage.put(self.url, buffer)
await self.storage.put_crypto(self.url)
self.on_image_ready(buffer)
except Exception as error:
if isinstance(error, tornado.web.HTTPError):
raise error
logger.warning("bad watermark")
raise tornado.web.HTTPError(500)
|
en
| 0.677662
|
#!/usr/bin/python # -*- coding: utf-8 -*- # thumbor imaging service # https://github.com/thumbor/thumbor/wiki # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2011 globo.com <EMAIL> # pylint: disable=invalid-name # TODO: refactor this # pylint: disable=too-many-statements,too-many-branches,too-many-locals
| 2.245723
| 2
|
tests/unit/test_crawl_frontier.py
|
peterbencze/silene
| 0
|
6628908
|
<reponame>peterbencze/silene
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from silene.crawl_frontier import CrawlFrontier
from silene.crawl_request import CrawlRequest
from silene.crawler_configuration import CrawlerConfiguration
request = CrawlRequest(url='http://example.com')
def test_add_request_should_add_duplicate_request_to_queue_when_duplicate_request_filter_is_disabled() -> None:
crawler_configuration = CrawlerConfiguration([request], filter_duplicate_requests=False)
crawl_frontier = CrawlFrontier(crawler_configuration)
crawl_frontier.get_next_request()
result = crawl_frontier.add_request(request)
assert result is True
assert crawl_frontier.get_next_request() is request
def test_add_request_should_not_add_duplicate_request_to_queue_when_duplicate_request_filter_is_enabled() -> None:
crawler_configuration = CrawlerConfiguration([CrawlRequest(url='http://example.com/test?abc=def&ghi=jkl#fragment')])
crawl_frontier = CrawlFrontier(crawler_configuration)
crawl_frontier.get_next_request()
result = crawl_frontier.add_request(CrawlRequest(url='http://example.com/test?ghi=jkl&abc=def'))
assert result is False
assert crawl_frontier.get_next_request() is None
def test_add_request_should_add_request_to_queue_when_offsite_request_filter_is_disabled() -> None:
crawler_configuration = CrawlerConfiguration([], filter_offsite_requests=False, allowed_domains=['notexample.com'])
crawl_frontier = CrawlFrontier(crawler_configuration)
result = crawl_frontier.add_request(request)
assert result is True
assert crawl_frontier.get_next_request() is request
def test_add_request_should_not_add_offsite_request_to_queue_when_offsite_request_filter_is_enabled() -> None:
crawler_configuration = CrawlerConfiguration([], filter_offsite_requests=True, allowed_domains=['notexample.com'])
crawl_frontier = CrawlFrontier(crawler_configuration)
result = crawl_frontier.add_request(request)
assert result is False
assert crawl_frontier.get_next_request() is None
def test_add_request_should_add_allowed_request_to_queue_when_offsite_request_filter_is_enabled() -> None:
crawler_configuration = CrawlerConfiguration([], filter_offsite_requests=True, allowed_domains=['example.com'])
crawl_frontier = CrawlFrontier(crawler_configuration)
result = crawl_frontier.add_request(request)
assert result is True
assert crawl_frontier.get_next_request() is request
def test_has_next_request_should_return_false_when_queue_is_empty() -> None:
crawler_configuration = CrawlerConfiguration([])
crawl_frontier = CrawlFrontier(crawler_configuration)
assert crawl_frontier.has_next_request() is False
def test_has_next_request_should_return_true_when_queue_is_not_empty() -> None:
crawler_configuration = CrawlerConfiguration([request])
crawl_frontier = CrawlFrontier(crawler_configuration)
assert crawl_frontier.has_next_request() is True
def test_get_next_request_should_return_none_when_queue_is_empty() -> None:
crawler_configuration = CrawlerConfiguration([])
crawl_frontier = CrawlFrontier(crawler_configuration)
assert crawl_frontier.get_next_request() is None
def test_get_next_request_should_return_next_request_when_queue_is_not_empty() -> None:
crawler_configuration = CrawlerConfiguration([request])
crawl_frontier = CrawlFrontier(crawler_configuration)
assert crawl_frontier.get_next_request() is request
def test_get_next_request_should_return_next_request_with_higher_priority() -> None:
high_priority_request = CrawlRequest('http://test.com', priority=1)
crawler_configuration = CrawlerConfiguration([request, high_priority_request])
crawl_frontier = CrawlFrontier(crawler_configuration)
assert crawl_frontier.get_next_request() is high_priority_request
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from silene.crawl_frontier import CrawlFrontier
from silene.crawl_request import CrawlRequest
from silene.crawler_configuration import CrawlerConfiguration
request = CrawlRequest(url='http://example.com')
def test_add_request_should_add_duplicate_request_to_queue_when_duplicate_request_filter_is_disabled() -> None:
crawler_configuration = CrawlerConfiguration([request], filter_duplicate_requests=False)
crawl_frontier = CrawlFrontier(crawler_configuration)
crawl_frontier.get_next_request()
result = crawl_frontier.add_request(request)
assert result is True
assert crawl_frontier.get_next_request() is request
def test_add_request_should_not_add_duplicate_request_to_queue_when_duplicate_request_filter_is_enabled() -> None:
crawler_configuration = CrawlerConfiguration([CrawlRequest(url='http://example.com/test?abc=def&ghi=jkl#fragment')])
crawl_frontier = CrawlFrontier(crawler_configuration)
crawl_frontier.get_next_request()
result = crawl_frontier.add_request(CrawlRequest(url='http://example.com/test?ghi=jkl&abc=def'))
assert result is False
assert crawl_frontier.get_next_request() is None
def test_add_request_should_add_request_to_queue_when_offsite_request_filter_is_disabled() -> None:
crawler_configuration = CrawlerConfiguration([], filter_offsite_requests=False, allowed_domains=['notexample.com'])
crawl_frontier = CrawlFrontier(crawler_configuration)
result = crawl_frontier.add_request(request)
assert result is True
assert crawl_frontier.get_next_request() is request
def test_add_request_should_not_add_offsite_request_to_queue_when_offsite_request_filter_is_enabled() -> None:
crawler_configuration = CrawlerConfiguration([], filter_offsite_requests=True, allowed_domains=['notexample.com'])
crawl_frontier = CrawlFrontier(crawler_configuration)
result = crawl_frontier.add_request(request)
assert result is False
assert crawl_frontier.get_next_request() is None
def test_add_request_should_add_allowed_request_to_queue_when_offsite_request_filter_is_enabled() -> None:
crawler_configuration = CrawlerConfiguration([], filter_offsite_requests=True, allowed_domains=['example.com'])
crawl_frontier = CrawlFrontier(crawler_configuration)
result = crawl_frontier.add_request(request)
assert result is True
assert crawl_frontier.get_next_request() is request
def test_has_next_request_should_return_false_when_queue_is_empty() -> None:
crawler_configuration = CrawlerConfiguration([])
crawl_frontier = CrawlFrontier(crawler_configuration)
assert crawl_frontier.has_next_request() is False
def test_has_next_request_should_return_true_when_queue_is_not_empty() -> None:
crawler_configuration = CrawlerConfiguration([request])
crawl_frontier = CrawlFrontier(crawler_configuration)
assert crawl_frontier.has_next_request() is True
def test_get_next_request_should_return_none_when_queue_is_empty() -> None:
crawler_configuration = CrawlerConfiguration([])
crawl_frontier = CrawlFrontier(crawler_configuration)
assert crawl_frontier.get_next_request() is None
def test_get_next_request_should_return_next_request_when_queue_is_not_empty() -> None:
crawler_configuration = CrawlerConfiguration([request])
crawl_frontier = CrawlFrontier(crawler_configuration)
assert crawl_frontier.get_next_request() is request
def test_get_next_request_should_return_next_request_with_higher_priority() -> None:
high_priority_request = CrawlRequest('http://test.com', priority=1)
crawler_configuration = CrawlerConfiguration([request, high_priority_request])
crawl_frontier = CrawlFrontier(crawler_configuration)
assert crawl_frontier.get_next_request() is high_priority_request
|
en
| 0.84267
|
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #fragment')])
| 1.880234
| 2
|
core/migrations/0008_reserv_updated_at.py
|
justlikedev/tap
| 0
|
6628909
|
<filename>core/migrations/0008_reserv_updated_at.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2018-06-09 16:47
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20180609_1625'),
]
operations = [
migrations.AddField(
model_name='reserv',
name='updated_at',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2018, 6, 9, 16, 47, 55, 176766, tzinfo=utc)),
preserve_default=False,
),
]
|
<filename>core/migrations/0008_reserv_updated_at.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2018-06-09 16:47
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20180609_1625'),
]
operations = [
migrations.AddField(
model_name='reserv',
name='updated_at',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2018, 6, 9, 16, 47, 55, 176766, tzinfo=utc)),
preserve_default=False,
),
]
|
en
| 0.724958
|
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2018-06-09 16:47
| 1.659539
| 2
|
algorithms/two_sum_ii_input_array_is_sorted_test.py
|
kainonly/leetcode
| 0
|
6628910
|
import unittest
from .two_sum_ii_input_array_is_sorted import Solution
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.sol = Solution()
def test_example1(self):
self.assertEqual(self.sol.twoSum([2, 7, 11, 15], 9), [1, 2])
|
import unittest
from .two_sum_ii_input_array_is_sorted import Solution
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.sol = Solution()
def test_example1(self):
self.assertEqual(self.sol.twoSum([2, 7, 11, 15], 9), [1, 2])
|
none
| 1
| 3.303768
| 3
|
|
archapp/interactive.py
|
pcdshub/archapp
| 1
|
6628911
|
<filename>archapp/interactive.py<gh_stars>1-10
"""
interactive.py defines ipython archive interface
"""
import datetime as dt
from datetime import datetime
from typing import Any, Dict
import numpy as np
from . import config, data, mgmt
from .dates import days_map, units_rule
from .doc_sub import doc_sub, doc_sub_txt
from .print_formats import print_xarray
from .url import data_port_doc, hostname_doc, mgmt_port_doc
interactive_args = """
pvname : string or list of strings, optional
Pv or pvs to look up in the archiver. You can include glob
wildcards in the pv name and we will look up all matching pvs.
If not selected, we'll use the last pvname we looked up.
{date_args}
chunk : bool, optional
If True, chunk the data
"""
time_args = """
Time can be designated in three ways:
1. A number, to designate "units of time ago".
For example, if units='days', 30 means 30 days ago.
2. A list of integers to designate a specific date.
For example, [2016, 11, 10, 5, 0, 0] is Nov 10, 2016 at 5am.
This list must have at least 3 entries to specify a day and
may be extended up to 7 entries to specify a time.
At minimum: [year, month, day].
At maximum: [year, month, day, hour, min, sec, ms].
3. A datetime object to designate a specific date.
The list and datetime objects are in the local timezone.
"""
date_arg = """
{{0}} : number, list of int, or datetime, optional
{{1}} time in the archiver{{2}}
{time_args}
"""
date_arg = doc_sub_txt(date_arg, time_args=time_args)
date_args = """
{start}
{end}
{units_rule}
"""
date_args = doc_sub_txt(
date_args,
start=date_arg.format("start", "Start", ", default is 30 days ago."),
end=date_arg.format("end", "End", ", default is now, the present time."),
units_rule=units_rule,
)
interactive_args = doc_sub_txt(interactive_args, date_args=date_args)
class EpicsArchive(object):
"""
Interactive interface to an Archive Appliance
"""
@doc_sub(host=hostname_doc, data=data_port_doc, mgmt=mgmt_port_doc)
def __init__(
self,
hostname=config.hostname,
data_port=config.data_port,
mgmt_port=config.mgmt_port,
):
"""
Parameters
----------
{host}
{data}
{mgmt}
Methods
-------
get
Return timeseries data from the archiver as an xarray or numpy
array.
plot
Plot timeseries data from the archiver in a new figure.
prints
Print timeseries data from the archiver.
search
Queries the archiver with a PV search using glob format.
Examples
--------
To get data, use:
>>> data = archive.get("pv_name", xarray=True)
To get the data in a data frame format:
>>> data.to_dataframe()
To get values of the data frame, such as vals or stat, for example:
>>> data.to_dataframe()['pv_name']['vals']
"""
self._data = data.ArchiveData(hostname, data_port)
self._mgmt = mgmt.ArchiveMgmt(hostname, mgmt_port)
self._last_pvname = None
@doc_sub(args=interactive_args)
def get(
self, pvname=None, start=30, end=None, unit="days", chunk=False,
xarray=False
):
"""
Return timeseries data from the archiver.
Parameters
----------
{args}
xarray : bool, optional
If True, return the xarray. Otherwise, return numpy arrays.
Returns
-------
data : np.ndarray or xarray
"""
if pvname is None:
pvname = self._last_pvname
if pvname is None:
raise ValueError(
"No cached pvname. Must provide the first pvname of "
"the session."
)
else:
self._last_pvname = pvname
pvs = self._expand_pvnames(pvname)
dt_objs = sorted(interactive_args(start, end, unit))
xarr = self._data.get(pvs, dt_objs[0], dt_objs[1], chunk=chunk)
if xarray:
return xarr
# It's possible to get an empty response, so return an empty array:
if not len(xarr):
return np.zeros(0)
# Unpack the xarray as a np.ndarray of just the values
values = [np.datetime_as_string(xarr.time.values)]
for var in xarr.variables:
if var not in ("time", "field"):
values.append(xarr[var].values[0])
return np.column_stack(values)
def get_snapshot(
self,
*pvs: str,
at: datetime,
include_proxies: bool = True
) -> Dict[str, Any]:
"""
Get a snapshot of PV data for the given PVs at the provided time.
Parameters
----------
*pvs : str
The list of PV names.
at : datetime.datetime
The timestamp of the snapshot to request.
include_proxies : bool, optional
Allow the archiver appliance to use its internal proxies.
"""
pvs = sum((self._expand_pvnames(pv, validate=False) for pv in pvs), [])
if not pvs:
raise ValueError(
"Expected one or more PVs, or glob pattern did not match PVs "
"in archiver."
)
return self._data.get_snapshot(
pvs=pvs, at=at, include_proxies=include_proxies
)
def _expand_pvnames(self, pvname, *, validate: bool = True):
"""
Given globs or list of globs, expand to the full set of pvs to look up
Parameters
----------
pvname : str, list/tuple of str
"""
if isinstance(pvname, str):
if validate or ("*" in pvname or "?" in pvname):
return self.search(pvname, do_print=False)
return [pvname]
if isinstance(pvname, (list, tuple)):
pvs = []
for pv in pvname:
pvs.extend(self._expand_pvnames(pv, validate=validate))
return pvs
raise Exception("pvname must be string, list, or tuple")
@doc_sub(args=interactive_args)
def prints(self, pvname=None, start=30, end=None, unit="days",
chunk=False):
"""
Print timeseries data from the archiver.
Parameters
---------
{args}
"""
xarr = self.get(
pvname=pvname, start=start, end=end, unit=unit, chunk=chunk,
xarray=True
)
print_xarray(xarr, "vals")
@doc_sub(args=interactive_args)
def plot(self, pvname=None, start=30, end=None, unit="days", chunk=False):
"""
Plot timeseries data from the archiver in a new figure.
Parameters
----------
{args}
Returns
-------
plot_handle
"""
import matplotlib.pyplot as plt
xarr = self.get(
pvname=pvname, start=start, end=end, unit=unit, chunk=chunk,
xarray=True
)
# print (xarr)
df = xarr.to_dataframe()
# return df
values = df[pvname]["vals"]
# sevrs = df[pvname]["sevr"]
# return values
dft = xarr["time"].to_dataframe()["time"]
# return xarr.to_dataframe()
# return xarr# DOING test_plot = arch.plot("pvname")
# --> test_plot.to_dataframe yeilds the same Pandas dataframe!
# plt.plot(sevrs, values)
# plt.scatter(sevrs, values)
plt.plot_date(dft, values, linestyle="solid", marker="None")
plt.title(pvname)
plt.xlabel(unit)
plt.xticks(rotation=60)
plt.ylabel("vals")
plt.show()
def search(self, glob, do_print=True):
return self._mgmt.search_pvs(glob, do_print=do_print)
search.__doc__ = mgmt.ArchiveMgmt.search_pvs.__doc__
@doc_sub(date_args=date_args)
def interactive_args(start, end, unit):
"""
Return datetime objects given the interactive args.
Parameters
----------
{date_args}
Returns
-------
endpts : list
the start and end points of the archive search
"""
start = convert_date_arg(start, unit)
if end is None:
end = dt.datetime.now()
else:
end = convert_date_arg(end, unit)
return [start, end]
@doc_sub(date_arg=date_arg.format("arg", "Arg", ""))
def convert_date_arg(arg, unit):
"""
Return datetime object corresponding to date argument.
Parameters
----------
{date_arg}
Returns
-------
date : datetime
"""
if isinstance(arg, (list, tuple)):
return dt.datetime(*arg)
elif not isinstance(arg, dt.datetime):
days = days_map[unit] * float(arg)
delta = dt.timedelta(days)
return dt.datetime.now() - delta
return arg
|
<filename>archapp/interactive.py<gh_stars>1-10
"""
interactive.py defines ipython archive interface
"""
import datetime as dt
from datetime import datetime
from typing import Any, Dict
import numpy as np
from . import config, data, mgmt
from .dates import days_map, units_rule
from .doc_sub import doc_sub, doc_sub_txt
from .print_formats import print_xarray
from .url import data_port_doc, hostname_doc, mgmt_port_doc
interactive_args = """
pvname : string or list of strings, optional
Pv or pvs to look up in the archiver. You can include glob
wildcards in the pv name and we will look up all matching pvs.
If not selected, we'll use the last pvname we looked up.
{date_args}
chunk : bool, optional
If True, chunk the data
"""
time_args = """
Time can be designated in three ways:
1. A number, to designate "units of time ago".
For example, if units='days', 30 means 30 days ago.
2. A list of integers to designate a specific date.
For example, [2016, 11, 10, 5, 0, 0] is Nov 10, 2016 at 5am.
This list must have at least 3 entries to specify a day and
may be extended up to 7 entries to specify a time.
At minimum: [year, month, day].
At maximum: [year, month, day, hour, min, sec, ms].
3. A datetime object to designate a specific date.
The list and datetime objects are in the local timezone.
"""
date_arg = """
{{0}} : number, list of int, or datetime, optional
{{1}} time in the archiver{{2}}
{time_args}
"""
date_arg = doc_sub_txt(date_arg, time_args=time_args)
date_args = """
{start}
{end}
{units_rule}
"""
date_args = doc_sub_txt(
date_args,
start=date_arg.format("start", "Start", ", default is 30 days ago."),
end=date_arg.format("end", "End", ", default is now, the present time."),
units_rule=units_rule,
)
interactive_args = doc_sub_txt(interactive_args, date_args=date_args)
class EpicsArchive(object):
"""
Interactive interface to an Archive Appliance
"""
@doc_sub(host=hostname_doc, data=data_port_doc, mgmt=mgmt_port_doc)
def __init__(
self,
hostname=config.hostname,
data_port=config.data_port,
mgmt_port=config.mgmt_port,
):
"""
Parameters
----------
{host}
{data}
{mgmt}
Methods
-------
get
Return timeseries data from the archiver as an xarray or numpy
array.
plot
Plot timeseries data from the archiver in a new figure.
prints
Print timeseries data from the archiver.
search
Queries the archiver with a PV search using glob format.
Examples
--------
To get data, use:
>>> data = archive.get("pv_name", xarray=True)
To get the data in a data frame format:
>>> data.to_dataframe()
To get values of the data frame, such as vals or stat, for example:
>>> data.to_dataframe()['pv_name']['vals']
"""
self._data = data.ArchiveData(hostname, data_port)
self._mgmt = mgmt.ArchiveMgmt(hostname, mgmt_port)
self._last_pvname = None
@doc_sub(args=interactive_args)
def get(
self, pvname=None, start=30, end=None, unit="days", chunk=False,
xarray=False
):
"""
Return timeseries data from the archiver.
Parameters
----------
{args}
xarray : bool, optional
If True, return the xarray. Otherwise, return numpy arrays.
Returns
-------
data : np.ndarray or xarray
"""
if pvname is None:
pvname = self._last_pvname
if pvname is None:
raise ValueError(
"No cached pvname. Must provide the first pvname of "
"the session."
)
else:
self._last_pvname = pvname
pvs = self._expand_pvnames(pvname)
dt_objs = sorted(interactive_args(start, end, unit))
xarr = self._data.get(pvs, dt_objs[0], dt_objs[1], chunk=chunk)
if xarray:
return xarr
# It's possible to get an empty response, so return an empty array:
if not len(xarr):
return np.zeros(0)
# Unpack the xarray as a np.ndarray of just the values
values = [np.datetime_as_string(xarr.time.values)]
for var in xarr.variables:
if var not in ("time", "field"):
values.append(xarr[var].values[0])
return np.column_stack(values)
def get_snapshot(
self,
*pvs: str,
at: datetime,
include_proxies: bool = True
) -> Dict[str, Any]:
"""
Get a snapshot of PV data for the given PVs at the provided time.
Parameters
----------
*pvs : str
The list of PV names.
at : datetime.datetime
The timestamp of the snapshot to request.
include_proxies : bool, optional
Allow the archiver appliance to use its internal proxies.
"""
pvs = sum((self._expand_pvnames(pv, validate=False) for pv in pvs), [])
if not pvs:
raise ValueError(
"Expected one or more PVs, or glob pattern did not match PVs "
"in archiver."
)
return self._data.get_snapshot(
pvs=pvs, at=at, include_proxies=include_proxies
)
def _expand_pvnames(self, pvname, *, validate: bool = True):
"""
Given globs or list of globs, expand to the full set of pvs to look up
Parameters
----------
pvname : str, list/tuple of str
"""
if isinstance(pvname, str):
if validate or ("*" in pvname or "?" in pvname):
return self.search(pvname, do_print=False)
return [pvname]
if isinstance(pvname, (list, tuple)):
pvs = []
for pv in pvname:
pvs.extend(self._expand_pvnames(pv, validate=validate))
return pvs
raise Exception("pvname must be string, list, or tuple")
@doc_sub(args=interactive_args)
def prints(self, pvname=None, start=30, end=None, unit="days",
chunk=False):
"""
Print timeseries data from the archiver.
Parameters
---------
{args}
"""
xarr = self.get(
pvname=pvname, start=start, end=end, unit=unit, chunk=chunk,
xarray=True
)
print_xarray(xarr, "vals")
@doc_sub(args=interactive_args)
def plot(self, pvname=None, start=30, end=None, unit="days", chunk=False):
"""
Plot timeseries data from the archiver in a new figure.
Parameters
----------
{args}
Returns
-------
plot_handle
"""
import matplotlib.pyplot as plt
xarr = self.get(
pvname=pvname, start=start, end=end, unit=unit, chunk=chunk,
xarray=True
)
# print (xarr)
df = xarr.to_dataframe()
# return df
values = df[pvname]["vals"]
# sevrs = df[pvname]["sevr"]
# return values
dft = xarr["time"].to_dataframe()["time"]
# return xarr.to_dataframe()
# return xarr# DOING test_plot = arch.plot("pvname")
# --> test_plot.to_dataframe yeilds the same Pandas dataframe!
# plt.plot(sevrs, values)
# plt.scatter(sevrs, values)
plt.plot_date(dft, values, linestyle="solid", marker="None")
plt.title(pvname)
plt.xlabel(unit)
plt.xticks(rotation=60)
plt.ylabel("vals")
plt.show()
def search(self, glob, do_print=True):
return self._mgmt.search_pvs(glob, do_print=do_print)
search.__doc__ = mgmt.ArchiveMgmt.search_pvs.__doc__
@doc_sub(date_args=date_args)
def interactive_args(start, end, unit):
"""
Return datetime objects given the interactive args.
Parameters
----------
{date_args}
Returns
-------
endpts : list
the start and end points of the archive search
"""
start = convert_date_arg(start, unit)
if end is None:
end = dt.datetime.now()
else:
end = convert_date_arg(end, unit)
return [start, end]
@doc_sub(date_arg=date_arg.format("arg", "Arg", ""))
def convert_date_arg(arg, unit):
"""
Return datetime object corresponding to date argument.
Parameters
----------
{date_arg}
Returns
-------
date : datetime
"""
if isinstance(arg, (list, tuple)):
return dt.datetime(*arg)
elif not isinstance(arg, dt.datetime):
days = days_map[unit] * float(arg)
delta = dt.timedelta(days)
return dt.datetime.now() - delta
return arg
|
en
| 0.469954
|
interactive.py defines ipython archive interface pvname : string or list of strings, optional Pv or pvs to look up in the archiver. You can include glob wildcards in the pv name and we will look up all matching pvs. If not selected, we'll use the last pvname we looked up. {date_args} chunk : bool, optional If True, chunk the data Time can be designated in three ways: 1. A number, to designate "units of time ago". For example, if units='days', 30 means 30 days ago. 2. A list of integers to designate a specific date. For example, [2016, 11, 10, 5, 0, 0] is Nov 10, 2016 at 5am. This list must have at least 3 entries to specify a day and may be extended up to 7 entries to specify a time. At minimum: [year, month, day]. At maximum: [year, month, day, hour, min, sec, ms]. 3. A datetime object to designate a specific date. The list and datetime objects are in the local timezone. {{0}} : number, list of int, or datetime, optional {{1}} time in the archiver{{2}} {time_args} {start} {end} {units_rule} Interactive interface to an Archive Appliance Parameters ---------- {host} {data} {mgmt} Methods ------- get Return timeseries data from the archiver as an xarray or numpy array. plot Plot timeseries data from the archiver in a new figure. prints Print timeseries data from the archiver. search Queries the archiver with a PV search using glob format. Examples -------- To get data, use: >>> data = archive.get("pv_name", xarray=True) To get the data in a data frame format: >>> data.to_dataframe() To get values of the data frame, such as vals or stat, for example: >>> data.to_dataframe()['pv_name']['vals'] Return timeseries data from the archiver. Parameters ---------- {args} xarray : bool, optional If True, return the xarray. Otherwise, return numpy arrays. Returns ------- data : np.ndarray or xarray # It's possible to get an empty response, so return an empty array: # Unpack the xarray as a np.ndarray of just the values Get a snapshot of PV data for the given PVs at the provided time. Parameters ---------- *pvs : str The list of PV names. at : datetime.datetime The timestamp of the snapshot to request. include_proxies : bool, optional Allow the archiver appliance to use its internal proxies. Given globs or list of globs, expand to the full set of pvs to look up Parameters ---------- pvname : str, list/tuple of str Print timeseries data from the archiver. Parameters --------- {args} Plot timeseries data from the archiver in a new figure. Parameters ---------- {args} Returns ------- plot_handle # print (xarr) # return df # sevrs = df[pvname]["sevr"] # return values # return xarr.to_dataframe() # return xarr# DOING test_plot = arch.plot("pvname") # --> test_plot.to_dataframe yeilds the same Pandas dataframe! # plt.plot(sevrs, values) # plt.scatter(sevrs, values) Return datetime objects given the interactive args. Parameters ---------- {date_args} Returns ------- endpts : list the start and end points of the archive search Return datetime object corresponding to date argument. Parameters ---------- {date_arg} Returns ------- date : datetime
| 3.063189
| 3
|
diamondkata_200217/diamond.py
|
cjhnim/daily-kata-python
| 0
|
6628912
|
class Diamond:
START_LETTER = 'A'
def __init__(self, upTo):
self.upTo = upTo
def show(self):
top = ''
bottom = ''
for c in range(ord(self.START_LETTER), ord(self.upTo)+1):
line = self.buildLine(chr(c))
top = self.buildTop(line, top)
bottom = self.buildBottom(chr(c), line, bottom)
return top + bottom
def buildLine(self, c):
return self.indent(c) + \
self.firstLetter(c) + \
self.secondIndent(c) + \
self.secondLetter(c) + \
'\n'
def firstLetter(self, c):
return c
def secondLetter(self, c):
return c if c != self.START_LETTER else ''
def indent(self, c):
count = ord(self.upTo)-ord(c)
return self.buildSpace(count)
def buildSpace(self, count):
space = ''
for n in range(0, count):
space += ' '
return space
def secondIndent(self, c):
return ' ' if c != self.START_LETTER else ''
def buildTop(self, line, top):
return top + line
def buildBottom(self, c, line, bottom):
return line + bottom if self.upTo != c else bottom
|
class Diamond:
START_LETTER = 'A'
def __init__(self, upTo):
self.upTo = upTo
def show(self):
top = ''
bottom = ''
for c in range(ord(self.START_LETTER), ord(self.upTo)+1):
line = self.buildLine(chr(c))
top = self.buildTop(line, top)
bottom = self.buildBottom(chr(c), line, bottom)
return top + bottom
def buildLine(self, c):
return self.indent(c) + \
self.firstLetter(c) + \
self.secondIndent(c) + \
self.secondLetter(c) + \
'\n'
def firstLetter(self, c):
return c
def secondLetter(self, c):
return c if c != self.START_LETTER else ''
def indent(self, c):
count = ord(self.upTo)-ord(c)
return self.buildSpace(count)
def buildSpace(self, count):
space = ''
for n in range(0, count):
space += ' '
return space
def secondIndent(self, c):
return ' ' if c != self.START_LETTER else ''
def buildTop(self, line, top):
return top + line
def buildBottom(self, c, line, bottom):
return line + bottom if self.upTo != c else bottom
|
none
| 1
| 3.661435
| 4
|
|
osh/bool_parse.py
|
ariabuckles/oil
| 0
|
6628913
|
<reponame>ariabuckles/oil<filename>osh/bool_parse.py<gh_stars>0
#!/usr/bin/env python
# Copyright 2016 <NAME>. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
bool_parse.py - Parse boolean expressions.
In contrast to test / [, the parsing of [[ expressions is done BEFORE
evaluation. So we are parsing a list of Word instances to an AST, rather than
a list of strings.
Grammar from http://compilers.iecc.com/crenshaw/tutor6.txt, adapted to ANTLR
syntax.
Expr : Term (OR Term)*
Term : Negated (AND Negated)*
Negated : '!'? Factor
Factor : WORD
| UNARY_OP WORD
| WORD BINARY_OP WORD
| '(' Expr ')'
OR = || -o
AND = && -a
WORD = any word
UNARY_OP: -z -n, etc.
BINARY_OP: -gt, -ot, ==, etc.
"""
from _devbuild.gen.id_kind_asdl import Id, Kind
from _devbuild.gen.types_asdl import lex_mode_t, lex_mode_e
from _devbuild.gen.syntax_asdl import (
word_t, word__CompoundWord, word__StringWord,
bool_expr, bool_expr_t,
)
from osh import word
from core.util import p_die
from core.meta import LookupKind
from typing import List, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from osh.word_parse import WordParser
#try:
# import libc # for regex_parse
#except ImportError:
# from benchmarks import fake_libc as libc
class BoolParser(object):
"""Parses [[ at compile time and [ at runtime."""
def __init__(self, w_parser):
# type: (WordParser) -> None
"""
Args:
w_parser: WordParser
"""
self.w_parser = w_parser
# Either one word or two words for lookahead
self.words = [] # type: List[word_t]
self.cur_word = None # type: Optional[word_t]
self.op_id = Id.Undefined_Tok
self.b_kind = Kind.Undefined
def _NextOne(self, lex_mode=lex_mode_e.DBracket):
# type: (lex_mode_t) -> None
n = len(self.words)
if n == 2:
assert lex_mode == lex_mode_e.DBracket
self.words[0] = self.words[1]
self.cur_word = self.words[0]
del self.words[1]
elif n in (0, 1):
w = self.w_parser.ReadWord(lex_mode) # may raise
if n == 0:
self.words.append(w)
else:
self.words[0] = w
self.cur_word = w
assert self.cur_word is not None
self.op_id = word.BoolId(self.cur_word)
self.b_kind = LookupKind(self.op_id)
#log('--- word %s', self.cur_word)
#log('op_id %s %s %s', self.op_id, self.b_kind, lex_mode)
def _Next(self, lex_mode=lex_mode_e.DBracket):
# type: (lex_mode_t) -> None
"""Advance to the next token, skipping newlines.
We don't handle newlines in the lexer because we want the newline after ]]
to be Id.Op_Newline rather than Id.WS_Newline. It's more complicated if
it's Id.WS_Newline -- we might have to unread tokens, etc.
"""
while True:
self._NextOne(lex_mode=lex_mode)
if self.op_id != Id.Op_Newline:
break
def _LookAhead(self):
# type: () -> word_t
n = len(self.words)
if n != 1:
raise AssertionError(self.words)
w = self.w_parser.ReadWord(lex_mode_e.DBracket) # may raise
self.words.append(w) # Save it for _Next()
return w
def Parse(self):
# type: () -> bool_expr_t
self._Next()
node = self.ParseExpr()
if self.op_id != Id.Lit_DRightBracket:
#p_die("Expected ]], got %r", self.cur_word, word=self.cur_word)
# NOTE: This might be better as unexpected token, since ]] doesn't always
# make sense.
p_die('Expected ]]', word=self.cur_word)
return node
def _TestAtEnd(self):
# type: () -> bool
"""For unit tests only."""
return self.op_id == Id.Lit_DRightBracket
def ParseForBuiltin(self):
# type: () -> bool_expr_t
"""For test builtin."""
self._Next()
node = self.ParseExpr()
if self.op_id != Id.Eof_Real:
p_die('Unexpected trailing word in test expression: %s',
self.cur_word, word=self.cur_word)
return node
def ParseExpr(self):
# type: () -> bool_expr_t
"""
Iterative:
Expr : Term (OR Term)*
Right recursion:
Expr : Term (OR Expr)?
"""
left = self.ParseTerm()
# [[ uses || but [ uses -o
if self.op_id in (Id.Op_DPipe, Id.BoolUnary_o):
self._Next()
right = self.ParseExpr()
return bool_expr.LogicalOr(left, right)
else:
return left
def ParseTerm(self):
# type: () -> bool_expr_t
"""
Term : Negated (AND Negated)*
Right recursion:
Term : Negated (AND Term)?
"""
left = self.ParseNegatedFactor()
# [[ uses && but [ uses -a
if self.op_id in (Id.Op_DAmp, Id.BoolUnary_a):
self._Next()
right = self.ParseTerm()
return bool_expr.LogicalAnd(left, right)
else:
return left
def ParseNegatedFactor(self):
# type: () -> bool_expr_t
"""
Negated : '!'? Factor
"""
if self.op_id == Id.KW_Bang:
self._Next()
child = self.ParseFactor()
return bool_expr.LogicalNot(child)
else:
return self.ParseFactor()
def ParseFactor(self):
# type: () -> bool_expr_t
"""
Factor : WORD
| UNARY_OP WORD
| WORD BINARY_OP WORD
| '(' Expr ')'
"""
if self.b_kind == Kind.BoolUnary:
# Just save the type and not the token itself?
op = self.op_id
self._Next()
w = self.cur_word
# e.g. [[ -f < ]]. But [[ -f '<' ]] is OK
if not isinstance(w, (word__CompoundWord, word__StringWord)):
p_die('Invalid argument to unary operator', word=w)
self._Next()
node = bool_expr.BoolUnary(op, w) # type: bool_expr_t
return node
if self.b_kind == Kind.Word:
# Peek ahead another token.
t2 = self._LookAhead()
t2_op_id = word.BoolId(t2)
t2_b_kind = LookupKind(t2_op_id)
#log('t2 %s / t2_op_id %s / t2_b_kind %s', t2, t2_op_id, t2_b_kind)
# Redir pun for < and >, -a and -o pun
if t2_b_kind in (Kind.BoolBinary, Kind.Redir):
left = self.cur_word
self._Next()
op = self.op_id
# TODO: Need to change to lex_mode_e.BashRegex.
# _Next(lex_mode) then?
is_regex = t2_op_id == Id.BoolBinary_EqualTilde
if is_regex:
self._Next(lex_mode=lex_mode_e.BashRegex)
else:
self._Next()
right = self.cur_word
if is_regex:
# NOTE: StaticEval for checking regex syntax isn't enough. We could
# need to pass do_ere so that the quoted parts get escaped.
#ok, s, unused_quoted = word.StaticEval(right)
pass
self._Next()
return bool_expr.BoolBinary(op, left, right)
else:
# [[ foo ]]
w = self.cur_word
self._Next()
return bool_expr.WordTest(w)
if self.op_id == Id.Op_LParen:
self._Next()
node = self.ParseExpr()
if self.op_id != Id.Op_RParen:
p_die('Expected ), got %s', self.cur_word, word=self.cur_word)
self._Next()
return node
# It's not WORD, UNARY_OP, or '('
p_die('Unexpected token in boolean expression', word=self.cur_word)
|
#!/usr/bin/env python
# Copyright 2016 <NAME>. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
bool_parse.py - Parse boolean expressions.
In contrast to test / [, the parsing of [[ expressions is done BEFORE
evaluation. So we are parsing a list of Word instances to an AST, rather than
a list of strings.
Grammar from http://compilers.iecc.com/crenshaw/tutor6.txt, adapted to ANTLR
syntax.
Expr : Term (OR Term)*
Term : Negated (AND Negated)*
Negated : '!'? Factor
Factor : WORD
| UNARY_OP WORD
| WORD BINARY_OP WORD
| '(' Expr ')'
OR = || -o
AND = && -a
WORD = any word
UNARY_OP: -z -n, etc.
BINARY_OP: -gt, -ot, ==, etc.
"""
from _devbuild.gen.id_kind_asdl import Id, Kind
from _devbuild.gen.types_asdl import lex_mode_t, lex_mode_e
from _devbuild.gen.syntax_asdl import (
word_t, word__CompoundWord, word__StringWord,
bool_expr, bool_expr_t,
)
from osh import word
from core.util import p_die
from core.meta import LookupKind
from typing import List, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from osh.word_parse import WordParser
#try:
# import libc # for regex_parse
#except ImportError:
# from benchmarks import fake_libc as libc
class BoolParser(object):
"""Parses [[ at compile time and [ at runtime."""
def __init__(self, w_parser):
# type: (WordParser) -> None
"""
Args:
w_parser: WordParser
"""
self.w_parser = w_parser
# Either one word or two words for lookahead
self.words = [] # type: List[word_t]
self.cur_word = None # type: Optional[word_t]
self.op_id = Id.Undefined_Tok
self.b_kind = Kind.Undefined
def _NextOne(self, lex_mode=lex_mode_e.DBracket):
# type: (lex_mode_t) -> None
n = len(self.words)
if n == 2:
assert lex_mode == lex_mode_e.DBracket
self.words[0] = self.words[1]
self.cur_word = self.words[0]
del self.words[1]
elif n in (0, 1):
w = self.w_parser.ReadWord(lex_mode) # may raise
if n == 0:
self.words.append(w)
else:
self.words[0] = w
self.cur_word = w
assert self.cur_word is not None
self.op_id = word.BoolId(self.cur_word)
self.b_kind = LookupKind(self.op_id)
#log('--- word %s', self.cur_word)
#log('op_id %s %s %s', self.op_id, self.b_kind, lex_mode)
def _Next(self, lex_mode=lex_mode_e.DBracket):
# type: (lex_mode_t) -> None
"""Advance to the next token, skipping newlines.
We don't handle newlines in the lexer because we want the newline after ]]
to be Id.Op_Newline rather than Id.WS_Newline. It's more complicated if
it's Id.WS_Newline -- we might have to unread tokens, etc.
"""
while True:
self._NextOne(lex_mode=lex_mode)
if self.op_id != Id.Op_Newline:
break
def _LookAhead(self):
# type: () -> word_t
n = len(self.words)
if n != 1:
raise AssertionError(self.words)
w = self.w_parser.ReadWord(lex_mode_e.DBracket) # may raise
self.words.append(w) # Save it for _Next()
return w
def Parse(self):
# type: () -> bool_expr_t
self._Next()
node = self.ParseExpr()
if self.op_id != Id.Lit_DRightBracket:
#p_die("Expected ]], got %r", self.cur_word, word=self.cur_word)
# NOTE: This might be better as unexpected token, since ]] doesn't always
# make sense.
p_die('Expected ]]', word=self.cur_word)
return node
def _TestAtEnd(self):
# type: () -> bool
"""For unit tests only."""
return self.op_id == Id.Lit_DRightBracket
def ParseForBuiltin(self):
# type: () -> bool_expr_t
"""For test builtin."""
self._Next()
node = self.ParseExpr()
if self.op_id != Id.Eof_Real:
p_die('Unexpected trailing word in test expression: %s',
self.cur_word, word=self.cur_word)
return node
def ParseExpr(self):
# type: () -> bool_expr_t
"""
Iterative:
Expr : Term (OR Term)*
Right recursion:
Expr : Term (OR Expr)?
"""
left = self.ParseTerm()
# [[ uses || but [ uses -o
if self.op_id in (Id.Op_DPipe, Id.BoolUnary_o):
self._Next()
right = self.ParseExpr()
return bool_expr.LogicalOr(left, right)
else:
return left
def ParseTerm(self):
# type: () -> bool_expr_t
"""
Term : Negated (AND Negated)*
Right recursion:
Term : Negated (AND Term)?
"""
left = self.ParseNegatedFactor()
# [[ uses && but [ uses -a
if self.op_id in (Id.Op_DAmp, Id.BoolUnary_a):
self._Next()
right = self.ParseTerm()
return bool_expr.LogicalAnd(left, right)
else:
return left
def ParseNegatedFactor(self):
# type: () -> bool_expr_t
"""
Negated : '!'? Factor
"""
if self.op_id == Id.KW_Bang:
self._Next()
child = self.ParseFactor()
return bool_expr.LogicalNot(child)
else:
return self.ParseFactor()
def ParseFactor(self):
# type: () -> bool_expr_t
"""
Factor : WORD
| UNARY_OP WORD
| WORD BINARY_OP WORD
| '(' Expr ')'
"""
if self.b_kind == Kind.BoolUnary:
# Just save the type and not the token itself?
op = self.op_id
self._Next()
w = self.cur_word
# e.g. [[ -f < ]]. But [[ -f '<' ]] is OK
if not isinstance(w, (word__CompoundWord, word__StringWord)):
p_die('Invalid argument to unary operator', word=w)
self._Next()
node = bool_expr.BoolUnary(op, w) # type: bool_expr_t
return node
if self.b_kind == Kind.Word:
# Peek ahead another token.
t2 = self._LookAhead()
t2_op_id = word.BoolId(t2)
t2_b_kind = LookupKind(t2_op_id)
#log('t2 %s / t2_op_id %s / t2_b_kind %s', t2, t2_op_id, t2_b_kind)
# Redir pun for < and >, -a and -o pun
if t2_b_kind in (Kind.BoolBinary, Kind.Redir):
left = self.cur_word
self._Next()
op = self.op_id
# TODO: Need to change to lex_mode_e.BashRegex.
# _Next(lex_mode) then?
is_regex = t2_op_id == Id.BoolBinary_EqualTilde
if is_regex:
self._Next(lex_mode=lex_mode_e.BashRegex)
else:
self._Next()
right = self.cur_word
if is_regex:
# NOTE: StaticEval for checking regex syntax isn't enough. We could
# need to pass do_ere so that the quoted parts get escaped.
#ok, s, unused_quoted = word.StaticEval(right)
pass
self._Next()
return bool_expr.BoolBinary(op, left, right)
else:
# [[ foo ]]
w = self.cur_word
self._Next()
return bool_expr.WordTest(w)
if self.op_id == Id.Op_LParen:
self._Next()
node = self.ParseExpr()
if self.op_id != Id.Op_RParen:
p_die('Expected ), got %s', self.cur_word, word=self.cur_word)
self._Next()
return node
# It's not WORD, UNARY_OP, or '('
p_die('Unexpected token in boolean expression', word=self.cur_word)
|
en
| 0.698922
|
#!/usr/bin/env python # Copyright 2016 <NAME>. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 bool_parse.py - Parse boolean expressions. In contrast to test / [, the parsing of [[ expressions is done BEFORE evaluation. So we are parsing a list of Word instances to an AST, rather than a list of strings. Grammar from http://compilers.iecc.com/crenshaw/tutor6.txt, adapted to ANTLR syntax. Expr : Term (OR Term)* Term : Negated (AND Negated)* Negated : '!'? Factor Factor : WORD | UNARY_OP WORD | WORD BINARY_OP WORD | '(' Expr ')' OR = || -o AND = && -a WORD = any word UNARY_OP: -z -n, etc. BINARY_OP: -gt, -ot, ==, etc. #try: # import libc # for regex_parse #except ImportError: # from benchmarks import fake_libc as libc Parses [[ at compile time and [ at runtime. # type: (WordParser) -> None Args: w_parser: WordParser # Either one word or two words for lookahead # type: List[word_t] # type: Optional[word_t] # type: (lex_mode_t) -> None # may raise #log('--- word %s', self.cur_word) #log('op_id %s %s %s', self.op_id, self.b_kind, lex_mode) # type: (lex_mode_t) -> None Advance to the next token, skipping newlines. We don't handle newlines in the lexer because we want the newline after ]] to be Id.Op_Newline rather than Id.WS_Newline. It's more complicated if it's Id.WS_Newline -- we might have to unread tokens, etc. # type: () -> word_t # may raise # Save it for _Next() # type: () -> bool_expr_t #p_die("Expected ]], got %r", self.cur_word, word=self.cur_word) # NOTE: This might be better as unexpected token, since ]] doesn't always # make sense. # type: () -> bool For unit tests only. # type: () -> bool_expr_t For test builtin. # type: () -> bool_expr_t Iterative: Expr : Term (OR Term)* Right recursion: Expr : Term (OR Expr)? # [[ uses || but [ uses -o # type: () -> bool_expr_t Term : Negated (AND Negated)* Right recursion: Term : Negated (AND Term)? # [[ uses && but [ uses -a # type: () -> bool_expr_t Negated : '!'? Factor # type: () -> bool_expr_t Factor : WORD | UNARY_OP WORD | WORD BINARY_OP WORD | '(' Expr ')' # Just save the type and not the token itself? # e.g. [[ -f < ]]. But [[ -f '<' ]] is OK # type: bool_expr_t # Peek ahead another token. #log('t2 %s / t2_op_id %s / t2_b_kind %s', t2, t2_op_id, t2_b_kind) # Redir pun for < and >, -a and -o pun # TODO: Need to change to lex_mode_e.BashRegex. # _Next(lex_mode) then? # NOTE: StaticEval for checking regex syntax isn't enough. We could # need to pass do_ere so that the quoted parts get escaped. #ok, s, unused_quoted = word.StaticEval(right) # [[ foo ]] # It's not WORD, UNARY_OP, or '('
| 2.784914
| 3
|
build/docs.py
|
wader/shaka-player
| 1
|
6628914
|
#!/usr/bin/python
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds the documentation from the source code. This deletes the old
documentation first.
"""
import os
import shakaBuildHelpers
import shutil
import subprocess
import sys
def buildDocs(_):
base = shakaBuildHelpers.getSourceBase()
shutil.rmtree(os.path.join(base, 'docs', 'api'), ignore_errors=True)
os.chdir(base)
if shakaBuildHelpers.isWindows() or shakaBuildHelpers.isCygwin():
# Windows has a different command name. The Unix version does not seem to
# work on Cygwin, but the windows one does.
jsdoc = os.path.join('third_party', 'jsdoc', 'jsdoc.cmd')
else:
jsdoc = os.path.join('third_party', 'jsdoc', 'jsdoc')
cmdLine = [jsdoc, '-c', 'jsdoc.conf.json']
shakaBuildHelpers.printCmdLine(cmdLine)
return subprocess.call(cmdLine)
if __name__ == '__main__':
shakaBuildHelpers.runMain(buildDocs)
|
#!/usr/bin/python
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds the documentation from the source code. This deletes the old
documentation first.
"""
import os
import shakaBuildHelpers
import shutil
import subprocess
import sys
def buildDocs(_):
base = shakaBuildHelpers.getSourceBase()
shutil.rmtree(os.path.join(base, 'docs', 'api'), ignore_errors=True)
os.chdir(base)
if shakaBuildHelpers.isWindows() or shakaBuildHelpers.isCygwin():
# Windows has a different command name. The Unix version does not seem to
# work on Cygwin, but the windows one does.
jsdoc = os.path.join('third_party', 'jsdoc', 'jsdoc.cmd')
else:
jsdoc = os.path.join('third_party', 'jsdoc', 'jsdoc')
cmdLine = [jsdoc, '-c', 'jsdoc.conf.json']
shakaBuildHelpers.printCmdLine(cmdLine)
return subprocess.call(cmdLine)
if __name__ == '__main__':
shakaBuildHelpers.runMain(buildDocs)
|
en
| 0.8448
|
#!/usr/bin/python # # Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Builds the documentation from the source code. This deletes the old documentation first. # Windows has a different command name. The Unix version does not seem to # work on Cygwin, but the windows one does.
| 2.085504
| 2
|
architecture_tool_django/modeling/signals.py
|
goldginkgo/architecture_tool_django
| 1
|
6628915
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Schema
from .tasks import schema_validation_task
@receiver(post_save, sender=Schema)
def schema_post_save_handler(sender, instance, created, **kwargs):
schema_validation_task.delay(instance.key)
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Schema
from .tasks import schema_validation_task
@receiver(post_save, sender=Schema)
def schema_post_save_handler(sender, instance, created, **kwargs):
schema_validation_task.delay(instance.key)
|
none
| 1
| 1.973365
| 2
|
|
benchmarks/operator_benchmark/benchmark_core.py
|
stungkit/pytorch
| 2
|
6628916
|
import functools
import numpy as np
import timeit
import json
import torch
import copy
import ast
# needs to be imported after torch
import torch.utils.cpp_extension as cpp_extension # noqa: F401
import benchmark_utils
from collections import namedtuple
"""Performance microbenchmarks.
This module contains core functionalities for performance microbenchmark tests.
"""
"""
This is used to store configs of tests
An example input is:
TestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1',
tag='long', run_backward=False)
"""
TestConfig = namedtuple("TestConfig", "test_name input_config tag run_backward")
BENCHMARK_TESTER = []
def _register_test(*test_metainfo):
""" save the metainfo needed to create a test. Currently test_metainfo
takes two different inputs:
1) This input when adds single op to the benchmark
_register_test(configs, pt_bench_op, create_pytorch_op_test_case,
run_backward=True)
2) This input when addes a list of ops to the benchmark
_register_test(configs, pt_bench_op, create_pytorch_op_test_case,
run_backward=False,
op_name_function=op)
"""
BENCHMARK_TESTER.append(test_metainfo)
def _create_test(bench_op_obj, orig_test_attrs, tags, OperatorTestCase, run_backward, bwd_input):
""" Create tests with the benchmark backend.
Args:
bench_op_obj: an object which instantiated from a subclass of
Caffe2BenchmarkBase/TorchBenchmarkBase which includes tensor
creation and operator execution.
test_attrs: a dictionary includes test configs.
tags: a attribute in test config to filter inputs
OperatorTestCase: a named tuple to save the metadata of an test
run_backward: a bool parameter indicating backward path
"""
test_attrs = copy.deepcopy(orig_test_attrs)
test_attrs = {k: str(v) for k, v in test_attrs.items()}
ascii_test_attrs = ast.literal_eval(json.dumps(test_attrs))
input_config = str(ascii_test_attrs)[1:-1].replace('\'', '')
if bwd_input:
# When auto_set is used, the test name needs to include input.
test_attrs.update({'bwd': bwd_input})
test_name = bench_op_obj.test_name(**test_attrs)
test_config = TestConfig(test_name, input_config, tags, run_backward)
return OperatorTestCase(bench_op_obj, test_config)
def _build_test(configs, bench_op, OperatorTestCase, run_backward, op_name_function=None):
"""Generate PyTorch/Caffe2 tests of operators with different inputs.
Args:
configs: a dictionary that has the input shapes
bench_op: a subclass of Caffe2BenchmarkBase/TorchBenchmarkBase which includes tensor
creation and operator execution
OperatorTestCase: a named tuple to save the metadata of an test
run_backward: a bool parameter indicating backward path
op_name_function: a dictionary includes operator name and function
"""
for config in configs:
test_attrs = {}
tags = None
keep_config = True
for attr in config:
# tags is only used in our benchmark backend to filter tests and
# it will be removed from config which is then passed to the init function
# an example of config and atrr is:
# config: [{'M': 16}, {'N': 16}, {'K': 64}, {'tags': 'short'}]
# attr: {'tags': 'short'}
if "tags" in attr:
tags = attr["tags"]
continue
# if 'cuda' is specified in input shape but the testing machines doesn't
# support, we will skip this input
if 'cuda' in attr.values():
if not torch.cuda.is_available():
keep_config = False
break
test_attrs.update(attr)
if not keep_config:
continue
if tags is None:
raise ValueError("Missing tags in configs")
input_config = str(test_attrs)[1:-1].replace('\'', '')
op = bench_op()
assert op is not None, "Can't create test"
tensor_error_info = None
# op_name_function is a dictionary which has op_name and op_function.
# an example of op_name_function is:
# {'op_name' : 'abs', 'op_function' : torch.abs}
# op_function is concatenated with the input dict then passed to the init function
# op_name is passed to the set_module_name function
init_dict = copy.deepcopy(test_attrs)
if op_name_function is not None:
op_name = op_name_function['op_name']
init_dict.update({'op_func' : op_name_function['op_func']})
op.set_module_name(op_name)
op._set_backward_test(run_backward)
op.init(**init_dict)
op.extract_inputs_tuple()
if not run_backward:
for _, attr in vars(op).items():
if isinstance(attr, torch.nn.Module):
for param in attr.parameters():
param.requires_grad = False
input_name = None
# _num_inputs_require_grads is used to track the number of tensors
# which use auto_set().
if op._num_inputs_require_grads > 0:
input_name = 'all'
yield _create_test(op, test_attrs, tags, OperatorTestCase, run_backward, input_name)
# This for loop is only used when auto_set is used.
# _pass_count counts how many times init has been called.
# _auto_set_counter is reset after init is called.
for i in range(op._num_inputs_require_grads):
op._pass_count += 1
op._auto_set_counter = 0
# TODO(mingzhe09088): remove this deepcopy when we encounter
# performance issue.
new_op = copy.deepcopy(op)
new_op.init(**init_dict)
# Input name index will start from input1
input_name = i + 1
yield _create_test(new_op, test_attrs, tags, OperatorTestCase, run_backward, input_name)
class BenchmarkRunner(object):
"""BenchmarkRunner is responsible for benchmarking all the registered
benchmark test groups.
Attributes:
tag_filter (str): control the benchmarks which matches the tag.
operator (str): only run benchmark test cases that contains
this filter string in the test case's id.
test_name (str): only run benchmark test cases that matches this filter,
this is a case-sensitive substring match and it happens in
the _keep_test method.
"""
def __init__(self, args):
# TODO: consider time-bound constraints as well.
self.args = args
self.iters = 100
self.has_explicit_iteration_count = False
self.multiplier = 2
self.predefined_minimum_secs = 1
self.max_iters = 1e6
self.use_jit = args.use_jit
self.num_runs = args.num_runs
self.print_per_iter = False
self.operator_range = benchmark_utils.get_operator_range(args.operator_range)
# 100 is the default warmup iterations
if self.args.warmup_iterations == -1:
self.args.warmup_iterations = 100
if self.args.iterations and self.args.iterations != -1:
self.has_explicit_iteration_count = True
self.iters = self.args.iterations
# when a specific test is selected by a user, we don't need
# to match the tag anymore
if self.args.test_name is not None:
self.args.tag_filter = None
def _print_header(self):
DASH_LINE = '-' * 40
print("# {}\n"
"# PyTorch/Caffe2 Operator Micro-benchmarks\n"
"# {}\n"
"# Tag : {}\n".format(DASH_LINE, DASH_LINE, self.args.tag_filter))
if self.args.list_tests:
print("# List of tests:")
elif self.args.list_ops:
print("# List of Operators to run:")
self.printed_ops_list = set()
if self.args.operators:
print("# {}".format(self.args.operators))
def _print_perf_result(self, reported_run_time_us, test_case):
if self.args.report_aibench:
# Output for AIBench
# Print out per iteration execution time instead of avg time
return
test_name = '_'.join([test_case.framework, test_case.test_config.test_name])
for run in range(self.num_runs):
print("{}Observer ".format(test_case.framework) + json.dumps(
{
"type": test_name,
"metric": "latency",
"unit": "us",
"value": str(reported_run_time_us[run]),
}
))
else:
if test_case.framework == "PyTorch":
print("# Mode: {}".format("JIT" if self.use_jit else "Eager"))
print("# Name: {}\n"
"# Input: {}".format(
test_case.test_config.test_name,
test_case.test_config.input_config))
mode = "Backward" if test_case.test_config.run_backward else "Forward"
if self.num_runs > 1:
for run in range(self.num_runs):
print("Run: {}, {} Execution Time (us) : {:.3f}".format(
run,
mode, reported_run_time_us[run]))
print()
else:
print("{} Execution Time (us) : {:.3f}\n".format(
mode, reported_run_time_us[0]))
def _predict_num_iter_needed(self, i):
return (i * self.multiplier)
def _iteration_result_is_significant(self, iters, run_time_sec, curr_test_total_time, has_explicit_iteration_count):
""" This function decides whether the measured time can be reported based on the
following conditions: 1) the number of iterations is larger than the max_iters.
2) the execution time is larger than the predefined minimum_time
3) the execution time is larger than user defined minimum_time
"""
return ((iters > self.max_iters or
run_time_sec > self.predefined_minimum_secs or
has_explicit_iteration_count) and
curr_test_total_time > self.args.min_time_per_test)
def _launch_forward(self, test_case, iters, print_per_iter):
""" Use Python's timeit module to measure execution time (unit: second).
"""
cuda_sync = 'cuda' in test_case.test_config.test_name
func = test_case.run_forward
if self.use_jit:
func = test_case.run_jit_forward
forward_time = timeit.timeit(functools.partial(func, iters, print_per_iter, cuda_sync), number=1)
return forward_time
def _launch_backward(self, test_case, iters, print_per_iter=False):
""" This function runs forward path of an op to get an output. Then the backward path is executed
and the execution time is reported
"""
test_case.run_forward(num_runs=1, print_per_iter=False, cuda_sync=False)
if test_case.framework == "PyTorch":
test_case._output_mean()
backward_time = timeit.timeit(functools.partial(test_case.run_backward, iters,
print_per_iter),
number=1)
return backward_time
def _measure_time(self, launch_test, test_case, iters, print_per_iter):
"""
This function execute the operator for <iters> iterations then look at the time.
If it's not significant, the number of iterations will be increased before rerun.
The execution stops when the time becomes significant.
"""
curr_test_total_time = 0
time_trace = []
while True:
run_time_sec = launch_test(test_case, iters, print_per_iter)
curr_test_total_time += run_time_sec
# Analyze time after each run to decide if the result is stable
results_are_significant = self._iteration_result_is_significant(
iters, run_time_sec, curr_test_total_time, self.has_explicit_iteration_count)
report_run_time = 1e6 * run_time_sec / iters
time_trace.append(report_run_time)
# Print out the time spent in each epoch in ms
if self.args.report_aibench:
mode = "JIT" if self.use_jit else "Eager"
test_name = '_'.join([test_case.framework, test_case.test_config.test_name, mode])
print("PyTorchObserver " + json.dumps(
{
"type": test_name,
"metric": "latency",
"unit": "ms",
"value": str(report_run_time / 1e3),
}
))
if results_are_significant:
break
# Re-estimate the hopefully-sufficient
# iteration count, and run the benchmark again...
iters = self._predict_num_iter_needed(iters)
reported_run_time_us = np.percentile(np.array(time_trace), 50)
return reported_run_time_us
def _check_keep(self, test_flag, cmd_flag):
return (cmd_flag is None or test_flag == cmd_flag)
def _check_operator_first_char(self, test_flag, cmd_flag):
if cmd_flag is None or test_flag[:1].lower() in cmd_flag:
return True
return False
def _check_keep_list(self, test_flag, cmd_flag_list):
if (cmd_flag_list is None or
any(test_flag == cmd_flag for cmd_flag in cmd_flag_list)):
return True
return False
def _keep_test(self, test_case):
# TODO: consider regex matching for test filtering.
# Currently, this is a sub-string matching.
op_test_config = test_case.test_config
if self.args.framework:
frameworks = benchmark_utils.process_arg_list(self.args.framework)
operators = benchmark_utils.process_arg_list(self.args.operators) if self.args.operators else None
# Filter framework, operator, test_name, tag, forward_only
if (self._check_keep(op_test_config.test_name, self.args.test_name) and
self._check_keep_list(test_case.op_bench.module_name(), operators) and
self._check_keep_list(test_case.framework, frameworks) and
self._check_operator_first_char(test_case.op_bench.module_name(), self.operator_range) and
(self.args.tag_filter == 'all' or
self._check_keep(op_test_config.tag, self.args.tag_filter)) and
(not self.args.forward_only or op_test_config.run_backward != self.args.forward_only) and
(self.args.device == 'None' or 'device' not in test_case.test_config.input_config or
self.args.device in op_test_config.test_name)):
return True
return False
def _print_test_case_info(self, test_case):
# Print out the test name and skip the real execution
if self.args.list_tests:
print("# {}".format(test_case.test_config.test_name))
return True
elif self.args.list_ops:
if self.args.operators is None:
op_name = test_case.op_bench.module_name()
if op_name not in self.printed_ops_list:
print("# {}".format(op_name))
self.printed_ops_list.add(op_name)
return True
return False
def run(self):
self._print_header()
for test_metainfo in BENCHMARK_TESTER:
for test in _build_test(*test_metainfo):
full_test_id, test_case = test
op_test_config = test_case.test_config
if self._print_test_case_info(test_case):
continue
if not self._keep_test(test_case):
continue
# To reduce variance, fix a numpy randseed to the test case,
# so that the randomly generated input tensors remain the
# same for each test case.
# The random seed is limited to 32-bit because of numpy
# requirement.
np.random.seed(seed=hash(full_test_id) & ((1 << 32) - 1))
print("# Benchmarking {}: {}".format(
test_case.framework,
test_case.op_bench.module_name()))
if op_test_config.run_backward:
launch_func = self._launch_backward
else:
launch_func = self._launch_forward
# Warmup
launch_func(test_case, self.args.warmup_iterations, print_per_iter=False)
# Actual Execution
reported_time = [self._measure_time(launch_func, test_case,
self.iters, self.print_per_iter)
for _ in range(self.num_runs)]
self._print_perf_result(reported_time, test_case)
|
import functools
import numpy as np
import timeit
import json
import torch
import copy
import ast
# needs to be imported after torch
import torch.utils.cpp_extension as cpp_extension # noqa: F401
import benchmark_utils
from collections import namedtuple
"""Performance microbenchmarks.
This module contains core functionalities for performance microbenchmark tests.
"""
"""
This is used to store configs of tests
An example input is:
TestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1',
tag='long', run_backward=False)
"""
TestConfig = namedtuple("TestConfig", "test_name input_config tag run_backward")
BENCHMARK_TESTER = []
def _register_test(*test_metainfo):
""" save the metainfo needed to create a test. Currently test_metainfo
takes two different inputs:
1) This input when adds single op to the benchmark
_register_test(configs, pt_bench_op, create_pytorch_op_test_case,
run_backward=True)
2) This input when addes a list of ops to the benchmark
_register_test(configs, pt_bench_op, create_pytorch_op_test_case,
run_backward=False,
op_name_function=op)
"""
BENCHMARK_TESTER.append(test_metainfo)
def _create_test(bench_op_obj, orig_test_attrs, tags, OperatorTestCase, run_backward, bwd_input):
""" Create tests with the benchmark backend.
Args:
bench_op_obj: an object which instantiated from a subclass of
Caffe2BenchmarkBase/TorchBenchmarkBase which includes tensor
creation and operator execution.
test_attrs: a dictionary includes test configs.
tags: a attribute in test config to filter inputs
OperatorTestCase: a named tuple to save the metadata of an test
run_backward: a bool parameter indicating backward path
"""
test_attrs = copy.deepcopy(orig_test_attrs)
test_attrs = {k: str(v) for k, v in test_attrs.items()}
ascii_test_attrs = ast.literal_eval(json.dumps(test_attrs))
input_config = str(ascii_test_attrs)[1:-1].replace('\'', '')
if bwd_input:
# When auto_set is used, the test name needs to include input.
test_attrs.update({'bwd': bwd_input})
test_name = bench_op_obj.test_name(**test_attrs)
test_config = TestConfig(test_name, input_config, tags, run_backward)
return OperatorTestCase(bench_op_obj, test_config)
def _build_test(configs, bench_op, OperatorTestCase, run_backward, op_name_function=None):
"""Generate PyTorch/Caffe2 tests of operators with different inputs.
Args:
configs: a dictionary that has the input shapes
bench_op: a subclass of Caffe2BenchmarkBase/TorchBenchmarkBase which includes tensor
creation and operator execution
OperatorTestCase: a named tuple to save the metadata of an test
run_backward: a bool parameter indicating backward path
op_name_function: a dictionary includes operator name and function
"""
for config in configs:
test_attrs = {}
tags = None
keep_config = True
for attr in config:
# tags is only used in our benchmark backend to filter tests and
# it will be removed from config which is then passed to the init function
# an example of config and atrr is:
# config: [{'M': 16}, {'N': 16}, {'K': 64}, {'tags': 'short'}]
# attr: {'tags': 'short'}
if "tags" in attr:
tags = attr["tags"]
continue
# if 'cuda' is specified in input shape but the testing machines doesn't
# support, we will skip this input
if 'cuda' in attr.values():
if not torch.cuda.is_available():
keep_config = False
break
test_attrs.update(attr)
if not keep_config:
continue
if tags is None:
raise ValueError("Missing tags in configs")
input_config = str(test_attrs)[1:-1].replace('\'', '')
op = bench_op()
assert op is not None, "Can't create test"
tensor_error_info = None
# op_name_function is a dictionary which has op_name and op_function.
# an example of op_name_function is:
# {'op_name' : 'abs', 'op_function' : torch.abs}
# op_function is concatenated with the input dict then passed to the init function
# op_name is passed to the set_module_name function
init_dict = copy.deepcopy(test_attrs)
if op_name_function is not None:
op_name = op_name_function['op_name']
init_dict.update({'op_func' : op_name_function['op_func']})
op.set_module_name(op_name)
op._set_backward_test(run_backward)
op.init(**init_dict)
op.extract_inputs_tuple()
if not run_backward:
for _, attr in vars(op).items():
if isinstance(attr, torch.nn.Module):
for param in attr.parameters():
param.requires_grad = False
input_name = None
# _num_inputs_require_grads is used to track the number of tensors
# which use auto_set().
if op._num_inputs_require_grads > 0:
input_name = 'all'
yield _create_test(op, test_attrs, tags, OperatorTestCase, run_backward, input_name)
# This for loop is only used when auto_set is used.
# _pass_count counts how many times init has been called.
# _auto_set_counter is reset after init is called.
for i in range(op._num_inputs_require_grads):
op._pass_count += 1
op._auto_set_counter = 0
# TODO(mingzhe09088): remove this deepcopy when we encounter
# performance issue.
new_op = copy.deepcopy(op)
new_op.init(**init_dict)
# Input name index will start from input1
input_name = i + 1
yield _create_test(new_op, test_attrs, tags, OperatorTestCase, run_backward, input_name)
class BenchmarkRunner(object):
"""BenchmarkRunner is responsible for benchmarking all the registered
benchmark test groups.
Attributes:
tag_filter (str): control the benchmarks which matches the tag.
operator (str): only run benchmark test cases that contains
this filter string in the test case's id.
test_name (str): only run benchmark test cases that matches this filter,
this is a case-sensitive substring match and it happens in
the _keep_test method.
"""
def __init__(self, args):
# TODO: consider time-bound constraints as well.
self.args = args
self.iters = 100
self.has_explicit_iteration_count = False
self.multiplier = 2
self.predefined_minimum_secs = 1
self.max_iters = 1e6
self.use_jit = args.use_jit
self.num_runs = args.num_runs
self.print_per_iter = False
self.operator_range = benchmark_utils.get_operator_range(args.operator_range)
# 100 is the default warmup iterations
if self.args.warmup_iterations == -1:
self.args.warmup_iterations = 100
if self.args.iterations and self.args.iterations != -1:
self.has_explicit_iteration_count = True
self.iters = self.args.iterations
# when a specific test is selected by a user, we don't need
# to match the tag anymore
if self.args.test_name is not None:
self.args.tag_filter = None
def _print_header(self):
DASH_LINE = '-' * 40
print("# {}\n"
"# PyTorch/Caffe2 Operator Micro-benchmarks\n"
"# {}\n"
"# Tag : {}\n".format(DASH_LINE, DASH_LINE, self.args.tag_filter))
if self.args.list_tests:
print("# List of tests:")
elif self.args.list_ops:
print("# List of Operators to run:")
self.printed_ops_list = set()
if self.args.operators:
print("# {}".format(self.args.operators))
def _print_perf_result(self, reported_run_time_us, test_case):
if self.args.report_aibench:
# Output for AIBench
# Print out per iteration execution time instead of avg time
return
test_name = '_'.join([test_case.framework, test_case.test_config.test_name])
for run in range(self.num_runs):
print("{}Observer ".format(test_case.framework) + json.dumps(
{
"type": test_name,
"metric": "latency",
"unit": "us",
"value": str(reported_run_time_us[run]),
}
))
else:
if test_case.framework == "PyTorch":
print("# Mode: {}".format("JIT" if self.use_jit else "Eager"))
print("# Name: {}\n"
"# Input: {}".format(
test_case.test_config.test_name,
test_case.test_config.input_config))
mode = "Backward" if test_case.test_config.run_backward else "Forward"
if self.num_runs > 1:
for run in range(self.num_runs):
print("Run: {}, {} Execution Time (us) : {:.3f}".format(
run,
mode, reported_run_time_us[run]))
print()
else:
print("{} Execution Time (us) : {:.3f}\n".format(
mode, reported_run_time_us[0]))
def _predict_num_iter_needed(self, i):
return (i * self.multiplier)
def _iteration_result_is_significant(self, iters, run_time_sec, curr_test_total_time, has_explicit_iteration_count):
""" This function decides whether the measured time can be reported based on the
following conditions: 1) the number of iterations is larger than the max_iters.
2) the execution time is larger than the predefined minimum_time
3) the execution time is larger than user defined minimum_time
"""
return ((iters > self.max_iters or
run_time_sec > self.predefined_minimum_secs or
has_explicit_iteration_count) and
curr_test_total_time > self.args.min_time_per_test)
def _launch_forward(self, test_case, iters, print_per_iter):
""" Use Python's timeit module to measure execution time (unit: second).
"""
cuda_sync = 'cuda' in test_case.test_config.test_name
func = test_case.run_forward
if self.use_jit:
func = test_case.run_jit_forward
forward_time = timeit.timeit(functools.partial(func, iters, print_per_iter, cuda_sync), number=1)
return forward_time
def _launch_backward(self, test_case, iters, print_per_iter=False):
""" This function runs forward path of an op to get an output. Then the backward path is executed
and the execution time is reported
"""
test_case.run_forward(num_runs=1, print_per_iter=False, cuda_sync=False)
if test_case.framework == "PyTorch":
test_case._output_mean()
backward_time = timeit.timeit(functools.partial(test_case.run_backward, iters,
print_per_iter),
number=1)
return backward_time
def _measure_time(self, launch_test, test_case, iters, print_per_iter):
"""
This function execute the operator for <iters> iterations then look at the time.
If it's not significant, the number of iterations will be increased before rerun.
The execution stops when the time becomes significant.
"""
curr_test_total_time = 0
time_trace = []
while True:
run_time_sec = launch_test(test_case, iters, print_per_iter)
curr_test_total_time += run_time_sec
# Analyze time after each run to decide if the result is stable
results_are_significant = self._iteration_result_is_significant(
iters, run_time_sec, curr_test_total_time, self.has_explicit_iteration_count)
report_run_time = 1e6 * run_time_sec / iters
time_trace.append(report_run_time)
# Print out the time spent in each epoch in ms
if self.args.report_aibench:
mode = "JIT" if self.use_jit else "Eager"
test_name = '_'.join([test_case.framework, test_case.test_config.test_name, mode])
print("PyTorchObserver " + json.dumps(
{
"type": test_name,
"metric": "latency",
"unit": "ms",
"value": str(report_run_time / 1e3),
}
))
if results_are_significant:
break
# Re-estimate the hopefully-sufficient
# iteration count, and run the benchmark again...
iters = self._predict_num_iter_needed(iters)
reported_run_time_us = np.percentile(np.array(time_trace), 50)
return reported_run_time_us
def _check_keep(self, test_flag, cmd_flag):
return (cmd_flag is None or test_flag == cmd_flag)
def _check_operator_first_char(self, test_flag, cmd_flag):
if cmd_flag is None or test_flag[:1].lower() in cmd_flag:
return True
return False
def _check_keep_list(self, test_flag, cmd_flag_list):
if (cmd_flag_list is None or
any(test_flag == cmd_flag for cmd_flag in cmd_flag_list)):
return True
return False
def _keep_test(self, test_case):
# TODO: consider regex matching for test filtering.
# Currently, this is a sub-string matching.
op_test_config = test_case.test_config
if self.args.framework:
frameworks = benchmark_utils.process_arg_list(self.args.framework)
operators = benchmark_utils.process_arg_list(self.args.operators) if self.args.operators else None
# Filter framework, operator, test_name, tag, forward_only
if (self._check_keep(op_test_config.test_name, self.args.test_name) and
self._check_keep_list(test_case.op_bench.module_name(), operators) and
self._check_keep_list(test_case.framework, frameworks) and
self._check_operator_first_char(test_case.op_bench.module_name(), self.operator_range) and
(self.args.tag_filter == 'all' or
self._check_keep(op_test_config.tag, self.args.tag_filter)) and
(not self.args.forward_only or op_test_config.run_backward != self.args.forward_only) and
(self.args.device == 'None' or 'device' not in test_case.test_config.input_config or
self.args.device in op_test_config.test_name)):
return True
return False
def _print_test_case_info(self, test_case):
# Print out the test name and skip the real execution
if self.args.list_tests:
print("# {}".format(test_case.test_config.test_name))
return True
elif self.args.list_ops:
if self.args.operators is None:
op_name = test_case.op_bench.module_name()
if op_name not in self.printed_ops_list:
print("# {}".format(op_name))
self.printed_ops_list.add(op_name)
return True
return False
def run(self):
self._print_header()
for test_metainfo in BENCHMARK_TESTER:
for test in _build_test(*test_metainfo):
full_test_id, test_case = test
op_test_config = test_case.test_config
if self._print_test_case_info(test_case):
continue
if not self._keep_test(test_case):
continue
# To reduce variance, fix a numpy randseed to the test case,
# so that the randomly generated input tensors remain the
# same for each test case.
# The random seed is limited to 32-bit because of numpy
# requirement.
np.random.seed(seed=hash(full_test_id) & ((1 << 32) - 1))
print("# Benchmarking {}: {}".format(
test_case.framework,
test_case.op_bench.module_name()))
if op_test_config.run_backward:
launch_func = self._launch_backward
else:
launch_func = self._launch_forward
# Warmup
launch_func(test_case, self.args.warmup_iterations, print_per_iter=False)
# Actual Execution
reported_time = [self._measure_time(launch_func, test_case,
self.iters, self.print_per_iter)
for _ in range(self.num_runs)]
self._print_perf_result(reported_time, test_case)
|
en
| 0.760557
|
# needs to be imported after torch # noqa: F401 Performance microbenchmarks. This module contains core functionalities for performance microbenchmark tests. This is used to store configs of tests An example input is: TestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1', tag='long', run_backward=False) save the metainfo needed to create a test. Currently test_metainfo takes two different inputs: 1) This input when adds single op to the benchmark _register_test(configs, pt_bench_op, create_pytorch_op_test_case, run_backward=True) 2) This input when addes a list of ops to the benchmark _register_test(configs, pt_bench_op, create_pytorch_op_test_case, run_backward=False, op_name_function=op) Create tests with the benchmark backend. Args: bench_op_obj: an object which instantiated from a subclass of Caffe2BenchmarkBase/TorchBenchmarkBase which includes tensor creation and operator execution. test_attrs: a dictionary includes test configs. tags: a attribute in test config to filter inputs OperatorTestCase: a named tuple to save the metadata of an test run_backward: a bool parameter indicating backward path # When auto_set is used, the test name needs to include input. Generate PyTorch/Caffe2 tests of operators with different inputs. Args: configs: a dictionary that has the input shapes bench_op: a subclass of Caffe2BenchmarkBase/TorchBenchmarkBase which includes tensor creation and operator execution OperatorTestCase: a named tuple to save the metadata of an test run_backward: a bool parameter indicating backward path op_name_function: a dictionary includes operator name and function # tags is only used in our benchmark backend to filter tests and # it will be removed from config which is then passed to the init function # an example of config and atrr is: # config: [{'M': 16}, {'N': 16}, {'K': 64}, {'tags': 'short'}] # attr: {'tags': 'short'} # if 'cuda' is specified in input shape but the testing machines doesn't # support, we will skip this input # op_name_function is a dictionary which has op_name and op_function. # an example of op_name_function is: # {'op_name' : 'abs', 'op_function' : torch.abs} # op_function is concatenated with the input dict then passed to the init function # op_name is passed to the set_module_name function # _num_inputs_require_grads is used to track the number of tensors # which use auto_set(). # This for loop is only used when auto_set is used. # _pass_count counts how many times init has been called. # _auto_set_counter is reset after init is called. # TODO(mingzhe09088): remove this deepcopy when we encounter # performance issue. # Input name index will start from input1 BenchmarkRunner is responsible for benchmarking all the registered benchmark test groups. Attributes: tag_filter (str): control the benchmarks which matches the tag. operator (str): only run benchmark test cases that contains this filter string in the test case's id. test_name (str): only run benchmark test cases that matches this filter, this is a case-sensitive substring match and it happens in the _keep_test method. # TODO: consider time-bound constraints as well. # 100 is the default warmup iterations # when a specific test is selected by a user, we don't need # to match the tag anymore # Output for AIBench # Print out per iteration execution time instead of avg time This function decides whether the measured time can be reported based on the following conditions: 1) the number of iterations is larger than the max_iters. 2) the execution time is larger than the predefined minimum_time 3) the execution time is larger than user defined minimum_time Use Python's timeit module to measure execution time (unit: second). This function runs forward path of an op to get an output. Then the backward path is executed and the execution time is reported This function execute the operator for <iters> iterations then look at the time. If it's not significant, the number of iterations will be increased before rerun. The execution stops when the time becomes significant. # Analyze time after each run to decide if the result is stable # Print out the time spent in each epoch in ms # Re-estimate the hopefully-sufficient # iteration count, and run the benchmark again... # TODO: consider regex matching for test filtering. # Currently, this is a sub-string matching. # Filter framework, operator, test_name, tag, forward_only # Print out the test name and skip the real execution # To reduce variance, fix a numpy randseed to the test case, # so that the randomly generated input tensors remain the # same for each test case. # The random seed is limited to 32-bit because of numpy # requirement. # Warmup # Actual Execution
| 2.487978
| 2
|
azure-devops/azext_devops/devops_sdk/v6_0/audit/audit_client.py
|
dhilmathy/azure-devops-cli-extension
| 248
|
6628917
|
<reponame>dhilmathy/azure-devops-cli-extension<gh_stars>100-1000
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class AuditClient(Client):
"""Audit
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(AuditClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '94ff054d-5ee1-413d-9341-3f4a7827de2e'
def get_actions(self, area_name=None):
"""GetActions.
[Preview API] Get all auditable actions filterable by area.
:param str area_name: Optional. Get actions scoped to area
:rtype: [AuditActionInfo]
"""
query_parameters = {}
if area_name is not None:
query_parameters['areaName'] = self._serialize.query('area_name', area_name, 'str')
response = self._send(http_method='GET',
location_id='6fa30b9a-9558-4e3b-a95f-a12572caa6e6',
version='6.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('[AuditActionInfo]', self._unwrap_collection(response))
def query_log(self, start_time=None, end_time=None, batch_size=None, continuation_token=None, skip_aggregation=None):
"""QueryLog.
[Preview API] Queries audit log entries
:param datetime start_time: Start time of download window. Optional
:param datetime end_time: End time of download window. Optional
:param int batch_size: Max number of results to return. Optional
:param str continuation_token: Token used for returning next set of results from previous query. Optional
:param bool skip_aggregation: Skips aggregating events and leaves them as individual entries instead. By default events are aggregated. Event types that are aggregated: AuditLog.AccessLog.
:rtype: :class:`<AuditLogQueryResult> <azure.devops.v6_0.audit.models.AuditLogQueryResult>`
"""
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query('start_time', start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query('end_time', end_time, 'iso-8601')
if batch_size is not None:
query_parameters['batchSize'] = self._serialize.query('batch_size', batch_size, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if skip_aggregation is not None:
query_parameters['skipAggregation'] = self._serialize.query('skip_aggregation', skip_aggregation, 'bool')
response = self._send(http_method='GET',
location_id='4e5fa14f-7097-4b73-9c85-00abc7353c61',
version='6.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('AuditLogQueryResult', response)
def download_log(self, format, start_time=None, end_time=None, **kwargs):
"""DownloadLog.
[Preview API] Downloads audit log entries.
:param str format: File format for download. Can be "json" or "csv".
:param datetime start_time: Start time of download window. Optional
:param datetime end_time: End time of download window. Optional
:rtype: object
"""
query_parameters = {}
if format is not None:
query_parameters['format'] = self._serialize.query('format', format, 'str')
if start_time is not None:
query_parameters['startTime'] = self._serialize.query('start_time', start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query('end_time', end_time, 'iso-8601')
response = self._send(http_method='GET',
location_id='b7b98a76-04e8-4f4d-ac72-9d46492caaac',
version='6.0-preview.1',
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def create_stream(self, stream, days_to_backfill):
"""CreateStream.
[Preview API] Create new Audit Stream
:param :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>` stream: Stream entry
:param int days_to_backfill: The number of days of previously recorded audit data that will be replayed into the stream. A value of zero will result in only new events being streamed.
:rtype: :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>`
"""
query_parameters = {}
if days_to_backfill is not None:
query_parameters['daysToBackfill'] = self._serialize.query('days_to_backfill', days_to_backfill, 'int')
content = self._serialize.body(stream, 'AuditStream')
response = self._send(http_method='POST',
location_id='77d60bf9-1882-41c5-a90d-3a6d3c13fd3b',
version='6.0-preview.1',
query_parameters=query_parameters,
content=content)
return self._deserialize('AuditStream', response)
def delete_stream(self, stream_id):
"""DeleteStream.
[Preview API] Delete Audit Stream
:param int stream_id: Id of stream entry to delete
"""
route_values = {}
if stream_id is not None:
route_values['streamId'] = self._serialize.url('stream_id', stream_id, 'int')
self._send(http_method='DELETE',
location_id='77d60bf9-1882-41c5-a90d-3a6d3c13fd3b',
version='6.0-preview.1',
route_values=route_values)
def query_all_streams(self):
"""QueryAllStreams.
[Preview API] Return all Audit Streams scoped to an organization
:rtype: [AuditStream]
"""
response = self._send(http_method='GET',
location_id='77d60bf9-1882-41c5-a90d-3a6d3c13fd3b',
version='6.0-preview.1')
return self._deserialize('[AuditStream]', self._unwrap_collection(response))
def query_stream_by_id(self, stream_id):
"""QueryStreamById.
[Preview API] Return Audit Stream with id of streamId if one exists otherwise throw
:param int stream_id: Id of stream entry to retrieve
:rtype: :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>`
"""
route_values = {}
if stream_id is not None:
route_values['streamId'] = self._serialize.url('stream_id', stream_id, 'int')
response = self._send(http_method='GET',
location_id='77d60bf9-1882-41c5-a90d-3a6d3c13fd3b',
version='6.0-preview.1',
route_values=route_values)
return self._deserialize('AuditStream', response)
def update_status(self, stream_id, status):
"""UpdateStatus.
[Preview API] Update existing Audit Stream status
:param int stream_id: Id of stream entry to be updated
:param str status: Status of the stream
:rtype: :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>`
"""
route_values = {}
if stream_id is not None:
route_values['streamId'] = self._serialize.url('stream_id', stream_id, 'int')
query_parameters = {}
if status is not None:
query_parameters['status'] = self._serialize.query('status', status, 'str')
response = self._send(http_method='PUT',
location_id='77d60bf9-1882-41c5-a90d-3a6d3c13fd3b',
version='6.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('AuditStream', response)
def update_stream(self, stream):
"""UpdateStream.
[Preview API] Update existing Audit Stream
:param :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>` stream: Stream entry
:rtype: :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>`
"""
content = self._serialize.body(stream, 'AuditStream')
response = self._send(http_method='PUT',
location_id='77d60bf9-1882-41c5-a90d-3a6d3c13fd3b',
version='6.0-preview.1',
content=content)
return self._deserialize('AuditStream', response)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class AuditClient(Client):
"""Audit
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(AuditClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '94ff054d-5ee1-413d-9341-3f4a7827de2e'
def get_actions(self, area_name=None):
"""GetActions.
[Preview API] Get all auditable actions filterable by area.
:param str area_name: Optional. Get actions scoped to area
:rtype: [AuditActionInfo]
"""
query_parameters = {}
if area_name is not None:
query_parameters['areaName'] = self._serialize.query('area_name', area_name, 'str')
response = self._send(http_method='GET',
location_id='6fa30b9a-9558-4e3b-a95f-a12572caa6e6',
version='6.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('[AuditActionInfo]', self._unwrap_collection(response))
def query_log(self, start_time=None, end_time=None, batch_size=None, continuation_token=None, skip_aggregation=None):
"""QueryLog.
[Preview API] Queries audit log entries
:param datetime start_time: Start time of download window. Optional
:param datetime end_time: End time of download window. Optional
:param int batch_size: Max number of results to return. Optional
:param str continuation_token: Token used for returning next set of results from previous query. Optional
:param bool skip_aggregation: Skips aggregating events and leaves them as individual entries instead. By default events are aggregated. Event types that are aggregated: AuditLog.AccessLog.
:rtype: :class:`<AuditLogQueryResult> <azure.devops.v6_0.audit.models.AuditLogQueryResult>`
"""
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query('start_time', start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query('end_time', end_time, 'iso-8601')
if batch_size is not None:
query_parameters['batchSize'] = self._serialize.query('batch_size', batch_size, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if skip_aggregation is not None:
query_parameters['skipAggregation'] = self._serialize.query('skip_aggregation', skip_aggregation, 'bool')
response = self._send(http_method='GET',
location_id='4e5fa14f-7097-4b73-9c85-00abc7353c61',
version='6.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('AuditLogQueryResult', response)
def download_log(self, format, start_time=None, end_time=None, **kwargs):
"""DownloadLog.
[Preview API] Downloads audit log entries.
:param str format: File format for download. Can be "json" or "csv".
:param datetime start_time: Start time of download window. Optional
:param datetime end_time: End time of download window. Optional
:rtype: object
"""
query_parameters = {}
if format is not None:
query_parameters['format'] = self._serialize.query('format', format, 'str')
if start_time is not None:
query_parameters['startTime'] = self._serialize.query('start_time', start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query('end_time', end_time, 'iso-8601')
response = self._send(http_method='GET',
location_id='b7b98a76-04e8-4f4d-ac72-9d46492caaac',
version='6.0-preview.1',
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def create_stream(self, stream, days_to_backfill):
"""CreateStream.
[Preview API] Create new Audit Stream
:param :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>` stream: Stream entry
:param int days_to_backfill: The number of days of previously recorded audit data that will be replayed into the stream. A value of zero will result in only new events being streamed.
:rtype: :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>`
"""
query_parameters = {}
if days_to_backfill is not None:
query_parameters['daysToBackfill'] = self._serialize.query('days_to_backfill', days_to_backfill, 'int')
content = self._serialize.body(stream, 'AuditStream')
response = self._send(http_method='POST',
location_id='77d60bf9-1882-41c5-a90d-3a6d3c13fd3b',
version='6.0-preview.1',
query_parameters=query_parameters,
content=content)
return self._deserialize('AuditStream', response)
def delete_stream(self, stream_id):
"""DeleteStream.
[Preview API] Delete Audit Stream
:param int stream_id: Id of stream entry to delete
"""
route_values = {}
if stream_id is not None:
route_values['streamId'] = self._serialize.url('stream_id', stream_id, 'int')
self._send(http_method='DELETE',
location_id='77d60bf9-1882-41c5-a90d-3a6d3c13fd3b',
version='6.0-preview.1',
route_values=route_values)
def query_all_streams(self):
"""QueryAllStreams.
[Preview API] Return all Audit Streams scoped to an organization
:rtype: [AuditStream]
"""
response = self._send(http_method='GET',
location_id='77d60bf9-1882-41c5-a90d-3a6d3c13fd3b',
version='6.0-preview.1')
return self._deserialize('[AuditStream]', self._unwrap_collection(response))
def query_stream_by_id(self, stream_id):
"""QueryStreamById.
[Preview API] Return Audit Stream with id of streamId if one exists otherwise throw
:param int stream_id: Id of stream entry to retrieve
:rtype: :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>`
"""
route_values = {}
if stream_id is not None:
route_values['streamId'] = self._serialize.url('stream_id', stream_id, 'int')
response = self._send(http_method='GET',
location_id='77d60bf9-1882-41c5-a90d-3a6d3c13fd3b',
version='6.0-preview.1',
route_values=route_values)
return self._deserialize('AuditStream', response)
def update_status(self, stream_id, status):
"""UpdateStatus.
[Preview API] Update existing Audit Stream status
:param int stream_id: Id of stream entry to be updated
:param str status: Status of the stream
:rtype: :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>`
"""
route_values = {}
if stream_id is not None:
route_values['streamId'] = self._serialize.url('stream_id', stream_id, 'int')
query_parameters = {}
if status is not None:
query_parameters['status'] = self._serialize.query('status', status, 'str')
response = self._send(http_method='PUT',
location_id='77d60bf9-1882-41c5-a90d-3a6d3c13fd3b',
version='6.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('AuditStream', response)
def update_stream(self, stream):
"""UpdateStream.
[Preview API] Update existing Audit Stream
:param :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>` stream: Stream entry
:rtype: :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>`
"""
content = self._serialize.body(stream, 'AuditStream')
response = self._send(http_method='PUT',
location_id='77d60bf9-1882-41c5-a90d-3a6d3c13fd3b',
version='6.0-preview.1',
content=content)
return self._deserialize('AuditStream', response)
|
en
| 0.631841
|
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- Audit :param str base_url: Service URL :param Authentication creds: Authenticated credentials. GetActions. [Preview API] Get all auditable actions filterable by area. :param str area_name: Optional. Get actions scoped to area :rtype: [AuditActionInfo] QueryLog. [Preview API] Queries audit log entries :param datetime start_time: Start time of download window. Optional :param datetime end_time: End time of download window. Optional :param int batch_size: Max number of results to return. Optional :param str continuation_token: Token used for returning next set of results from previous query. Optional :param bool skip_aggregation: Skips aggregating events and leaves them as individual entries instead. By default events are aggregated. Event types that are aggregated: AuditLog.AccessLog. :rtype: :class:`<AuditLogQueryResult> <azure.devops.v6_0.audit.models.AuditLogQueryResult>` DownloadLog. [Preview API] Downloads audit log entries. :param str format: File format for download. Can be "json" or "csv". :param datetime start_time: Start time of download window. Optional :param datetime end_time: End time of download window. Optional :rtype: object CreateStream. [Preview API] Create new Audit Stream :param :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>` stream: Stream entry :param int days_to_backfill: The number of days of previously recorded audit data that will be replayed into the stream. A value of zero will result in only new events being streamed. :rtype: :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>` DeleteStream. [Preview API] Delete Audit Stream :param int stream_id: Id of stream entry to delete QueryAllStreams. [Preview API] Return all Audit Streams scoped to an organization :rtype: [AuditStream] QueryStreamById. [Preview API] Return Audit Stream with id of streamId if one exists otherwise throw :param int stream_id: Id of stream entry to retrieve :rtype: :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>` UpdateStatus. [Preview API] Update existing Audit Stream status :param int stream_id: Id of stream entry to be updated :param str status: Status of the stream :rtype: :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>` UpdateStream. [Preview API] Update existing Audit Stream :param :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>` stream: Stream entry :rtype: :class:`<AuditStream> <azure.devops.v6_0.audit.models.AuditStream>`
| 2.054092
| 2
|
cdk-code/tests/test_static_website_stack.py
|
KMK-Git/kaustubhk
| 0
|
6628918
|
"""
Test Static Website Stack.
"""
# pylint: disable=line-too-long
import aws_cdk as cdk
from aws_cdk import assertions
from application_stacks.static_website_stack import StaticWebsiteStack
def test_static_website_stack() -> None:
"""
Test Static Website Stack.
"""
app = cdk.App()
stack = StaticWebsiteStack(
app,
"StaticWebsiteStack",
hostedzone_domain_name="example.com",
website_subdomain="",
alternative_subdomains=["www"],
env=cdk.Environment(account="123456789012", region="ap-south-1"),
)
template = assertions.Template.from_stack(stack)
template.has_resource_properties(
"AWS::S3::Bucket",
{
"PublicAccessBlockConfiguration": {
"BlockPublicAcls": True,
"BlockPublicPolicy": True,
"IgnorePublicAcls": True,
"RestrictPublicBuckets": True,
},
"Tags": [{"Key": "aws-cdk:cr-owned:33647188", "Value": "true"}],
},
)
template.has_resource_properties(
"AWS::S3::BucketPolicy",
{
"Bucket": {"Ref": "WebsiteBucket75C24D94"},
"PolicyDocument": {
"Statement": [
{
"Action": "s3:GetObject",
"Effect": "Allow",
"Principal": {
"CanonicalUser": {
"Fn::GetAtt": [
"WebsiteDistributionOrigin1S3Origin432B5882",
"S3CanonicalUserId",
]
}
},
"Resource": {
"Fn::Join": [
"",
[
{"Fn::GetAtt": ["WebsiteBucket75C24D94", "Arn"]},
"/*",
],
]
},
}
],
"Version": "2012-10-17",
},
},
)
template.has_resource_properties(
"AWS::IAM::Role",
{
"AssumeRolePolicyDocument": {
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {"Service": "lambda.amazonaws.com"},
}
],
"Version": "2012-10-17",
},
"ManagedPolicyArns": [
{
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
],
]
}
],
},
)
template.has_resource_properties(
"AWS::IAM::Policy",
{
"PolicyDocument": {
"Statement": [
{
"Action": [
"acm:RequestCertificate",
"acm:DescribeCertificate",
"acm:DeleteCertificate",
"acm:AddTagsToCertificate",
],
"Effect": "Allow",
"Resource": "*",
},
{"Action": "route53:GetChange", "Effect": "Allow", "Resource": "*"},
{
"Action": "route53:changeResourceRecordSets",
"Effect": "Allow",
"Resource": {
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":route53:::hostedzone/DUMMY",
],
]
},
},
],
"Version": "2012-10-17",
},
"PolicyName": "SiteCertificateCertificateRequestorFunctionServiceRoleDefaultPolicy96ED5C9C",
"Roles": [
{
"Ref": "SiteCertificateCertificateRequestorFunctionServiceRole645E891D"
}
],
},
)
template.has_resource_properties(
"AWS::Lambda::Function",
{
"Code": {
"S3Bucket": "cdk-hnb659fds-assets-123456789012-ap-south-1",
"S3Key": "4d3f21fe611d8ebfd4f1f69754b7f986fed4ecf648d4fafe941cd81ede6cf60c.zip",
},
"Role": {
"Fn::GetAtt": [
"SiteCertificateCertificateRequestorFunctionServiceRole645E891D",
"Arn",
]
},
"Handler": "index.certificateRequestHandler",
"Runtime": "nodejs12.x",
"Timeout": 900,
},
)
template.has_resource_properties(
"AWS::CloudFormation::CustomResource",
{
"ServiceToken": {
"Fn::GetAtt": [
"SiteCertificateCertificateRequestorFunction7CFA7DEA",
"Arn",
]
},
"DomainName": "example.com",
"SubjectAlternativeNames": ["www.example.com"],
"HostedZoneId": "DUMMY",
"Region": "us-east-1",
},
)
template.has_resource_properties(
"AWS::CloudFront::Function",
{
"AutoPublish": True,
"FunctionCode": "// https://github.com/aws-samples/amazon-cloudfront-functions/tree/main/url-rewrite-single-page-apps\nfunction handler(event) {\n var request = event.request;\n var uri = request.uri;\n\n // Check whether the URI is missing a file name.\n if (uri.endsWith('/')) {\n request.uri += 'index.html';\n }\n // Check whether the URI is missing a file extension.\n else if (!uri.includes('.')) {\n request.uri += '/index.html';\n }\n\n return request;\n}",
"FunctionConfig": {
"Runtime": "cloudfront-js-1.0",
},
},
)
template.has_resource_properties(
"AWS::CloudFront::CloudFrontOriginAccessIdentity",
{"CloudFrontOriginAccessIdentityConfig": {}},
)
template.has_resource_properties(
"AWS::CloudFront::ResponseHeadersPolicy",
{
"ResponseHeadersPolicyConfig": {
"Comment": "Security Headers",
"Name": "kaustubhk-SecurityHeadersPolicy",
"SecurityHeadersConfig": {
"ContentSecurityPolicy": {
"ContentSecurityPolicy": "default-src 'self'; img-src https://*; child-src 'none'; object-src 'none'; script-src 'unsafe-inline' 'self'; style-src 'unsafe-inline' 'self'; font-src 'self' data:;",
"Override": True,
},
"ContentTypeOptions": {"Override": True},
"FrameOptions": {"FrameOption": "DENY", "Override": True},
"ReferrerPolicy": {
"Override": True,
"ReferrerPolicy": "no-referrer",
},
"StrictTransportSecurity": {
"AccessControlMaxAgeSec": 63072000,
"IncludeSubdomains": True,
"Override": True,
"Preload": True,
},
"XSSProtection": {
"ModeBlock": True,
"Override": True,
"Protection": True,
},
},
}
},
)
template.has_resource_properties(
"AWS::CloudFront::Distribution",
{
"DistributionConfig": {
"Aliases": ["example.com", "www.example.com"],
"CustomErrorResponses": [
{
"ErrorCode": 403,
"ResponseCode": 404,
"ResponsePagePath": "/404.html",
}
],
"DefaultCacheBehavior": {
"CachePolicyId": "658327ea-f89d-4fab-a63d-7e88639e58f6",
"Compress": True,
"FunctionAssociations": [
{
"EventType": "viewer-request",
"FunctionARN": {
"Fn::GetAtt": [
"CloudfrontFunction11FEE36B",
"FunctionARN",
]
},
}
],
"ResponseHeadersPolicyId": {"Ref": "ResponseHeadersPolicy13DBF9E0"},
"ViewerProtocolPolicy": "redirect-to-https",
},
"Enabled": True,
"HttpVersion": "http2",
"IPV6Enabled": True,
"Origins": [
{
"DomainName": {
"Fn::GetAtt": [
"WebsiteBucket75C24D94",
"RegionalDomainName",
]
},
"S3OriginConfig": {
"OriginAccessIdentity": {
"Fn::Join": [
"",
[
"origin-access-identity/cloudfront/",
{
"Ref": "WebsiteDistributionOrigin1S3Origin432B5882"
},
],
]
}
},
}
],
"ViewerCertificate": {
"AcmCertificateArn": {
"Fn::GetAtt": [
"SiteCertificateCertificateRequestorResource6021082A",
"Arn",
]
},
"MinimumProtocolVersion": "TLSv1.2_2021",
"SslSupportMethod": "sni-only",
},
}
},
)
template.has_resource_properties(
"AWS::Route53::RecordSet",
{
"Name": "example.com.",
"Type": "A",
"AliasTarget": {
"DNSName": {
"Fn::GetAtt": ["WebsiteDistribution75DCDA0B", "DomainName"]
},
"HostedZoneId": {
"Fn::FindInMap": [
"AWSCloudFrontPartitionHostedZoneIdMap",
{"Ref": "AWS::Partition"},
"zoneId",
]
},
},
"HostedZoneId": "DUMMY",
},
)
template.has_resource_properties(
"AWS::Route53::RecordSet",
{
"Name": "www.example.com.",
"Type": "A",
"AliasTarget": {
"DNSName": {
"Fn::GetAtt": ["WebsiteDistribution75DCDA0B", "DomainName"]
},
"HostedZoneId": {
"Fn::FindInMap": [
"AWSCloudFrontPartitionHostedZoneIdMap",
{"Ref": "AWS::Partition"},
"zoneId",
]
},
},
"HostedZoneId": "DUMMY",
},
)
template.has_resource_properties(
"AWS::Route53::RecordSet",
{
"Name": "example.com.",
"Type": "AAAA",
"AliasTarget": {
"DNSName": {
"Fn::GetAtt": ["WebsiteDistribution75DCDA0B", "DomainName"]
},
"HostedZoneId": {
"Fn::FindInMap": [
"AWSCloudFrontPartitionHostedZoneIdMap",
{"Ref": "AWS::Partition"},
"zoneId",
]
},
},
"HostedZoneId": "DUMMY",
},
)
template.has_resource_properties(
"AWS::Route53::RecordSet",
{
"Name": "www.example.com.",
"Type": "AAAA",
"AliasTarget": {
"DNSName": {
"Fn::GetAtt": ["WebsiteDistribution75DCDA0B", "DomainName"]
},
"HostedZoneId": {
"Fn::FindInMap": [
"AWSCloudFrontPartitionHostedZoneIdMap",
{"Ref": "AWS::Partition"},
"zoneId",
]
},
},
"HostedZoneId": "DUMMY",
},
)
template.has_resource_properties(
"AWS::Lambda::LayerVersion",
{
"Content": {
"S3Bucket": "cdk-hnb659fds-assets-123456789012-ap-south-1",
"S3Key": "<KEY>",
},
"Description": "/opt/awscli/aws",
},
)
template.has_resource_properties(
"Custom::CDKBucketDeployment",
{
"ServiceToken": {
"Fn::GetAtt": [
"CustomCDKBucketDeployment8693BB64968944B69AAFB0CC9EB8756C81C01536",
"Arn",
]
},
"SourceBucketNames": ["cdk-hnb659fds-assets-123456789012-ap-south-1"],
"DestinationBucketName": {"Ref": "WebsiteBucket75C24D94"},
"Prune": True,
"DistributionId": {"Ref": "WebsiteDistribution75DCDA0B"},
"DistributionPaths": ["/*"],
},
)
template.has_resource_properties(
"AWS::IAM::Role",
{
"AssumeRolePolicyDocument": {
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {"Service": "lambda.amazonaws.com"},
}
],
"Version": "2012-10-17",
},
"ManagedPolicyArns": [
{
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
],
]
}
],
},
)
template.has_resource_properties(
"AWS::IAM::Policy",
{
"PolicyDocument": {
"Statement": [
{
"Action": ["s3:GetObject*", "s3:GetBucket*", "s3:List*"],
"Effect": "Allow",
"Resource": [
{
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":s3:::cdk-hnb659fds-assets-123456789012-ap-south-1",
],
]
},
{
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":s3:::cdk-hnb659fds-assets-123456789012-ap-south-1/*",
],
]
},
],
},
{
"Action": [
"s3:GetObject*",
"s3:GetBucket*",
"s3:List*",
"s3:DeleteObject*",
"s3:PutObject",
"s3:PutObjectLegalHold",
"s3:PutObjectRetention",
"s3:PutObjectTagging",
"s3:PutObjectVersionTagging",
"s3:Abort*",
],
"Effect": "Allow",
"Resource": [
{"Fn::GetAtt": ["WebsiteBucket75C24D94", "Arn"]},
{
"Fn::Join": [
"",
[
{
"Fn::GetAtt": [
"WebsiteBucket75C24D94",
"Arn",
]
},
"/*",
],
]
},
],
},
{
"Action": [
"cloudfront:GetInvalidation",
"cloudfront:CreateInvalidation",
],
"Effect": "Allow",
"Resource": "*",
},
],
"Version": "2012-10-17",
},
"PolicyName": "CustomCDKBucketDeployment8693BB64968944B69AAFB0CC9EB8756CServiceRoleDefaultPolicy88902FDF",
"Roles": [
{
"Ref": "CustomCDKBucketDeployment8693BB64968944B69AAFB0CC9EB8756CServiceRole89A01265"
}
],
},
)
template.has_resource_properties(
"AWS::Lambda::Function",
{
"Code": {
"S3Bucket": "cdk-hnb659fds-assets-123456789012-ap-south-1",
"S3Key": "f98b78092dcdd31f5e6d47489beb5f804d4835ef86a8085d0a2053cb9ae711da.zip",
},
"Role": {
"Fn::GetAtt": [
"CustomCDKBucketDeployment8693BB64968944B69AAFB0CC9EB8756CServiceRole89A01265",
"Arn",
]
},
"Handler": "index.handler",
"Layers": [{"Ref": "S3DeploymentAwsCliLayer8AAFE44F"}],
"Runtime": "python3.7",
"Timeout": 900,
},
)
|
"""
Test Static Website Stack.
"""
# pylint: disable=line-too-long
import aws_cdk as cdk
from aws_cdk import assertions
from application_stacks.static_website_stack import StaticWebsiteStack
def test_static_website_stack() -> None:
"""
Test Static Website Stack.
"""
app = cdk.App()
stack = StaticWebsiteStack(
app,
"StaticWebsiteStack",
hostedzone_domain_name="example.com",
website_subdomain="",
alternative_subdomains=["www"],
env=cdk.Environment(account="123456789012", region="ap-south-1"),
)
template = assertions.Template.from_stack(stack)
template.has_resource_properties(
"AWS::S3::Bucket",
{
"PublicAccessBlockConfiguration": {
"BlockPublicAcls": True,
"BlockPublicPolicy": True,
"IgnorePublicAcls": True,
"RestrictPublicBuckets": True,
},
"Tags": [{"Key": "aws-cdk:cr-owned:33647188", "Value": "true"}],
},
)
template.has_resource_properties(
"AWS::S3::BucketPolicy",
{
"Bucket": {"Ref": "WebsiteBucket75C24D94"},
"PolicyDocument": {
"Statement": [
{
"Action": "s3:GetObject",
"Effect": "Allow",
"Principal": {
"CanonicalUser": {
"Fn::GetAtt": [
"WebsiteDistributionOrigin1S3Origin432B5882",
"S3CanonicalUserId",
]
}
},
"Resource": {
"Fn::Join": [
"",
[
{"Fn::GetAtt": ["WebsiteBucket75C24D94", "Arn"]},
"/*",
],
]
},
}
],
"Version": "2012-10-17",
},
},
)
template.has_resource_properties(
"AWS::IAM::Role",
{
"AssumeRolePolicyDocument": {
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {"Service": "lambda.amazonaws.com"},
}
],
"Version": "2012-10-17",
},
"ManagedPolicyArns": [
{
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
],
]
}
],
},
)
template.has_resource_properties(
"AWS::IAM::Policy",
{
"PolicyDocument": {
"Statement": [
{
"Action": [
"acm:RequestCertificate",
"acm:DescribeCertificate",
"acm:DeleteCertificate",
"acm:AddTagsToCertificate",
],
"Effect": "Allow",
"Resource": "*",
},
{"Action": "route53:GetChange", "Effect": "Allow", "Resource": "*"},
{
"Action": "route53:changeResourceRecordSets",
"Effect": "Allow",
"Resource": {
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":route53:::hostedzone/DUMMY",
],
]
},
},
],
"Version": "2012-10-17",
},
"PolicyName": "SiteCertificateCertificateRequestorFunctionServiceRoleDefaultPolicy96ED5C9C",
"Roles": [
{
"Ref": "SiteCertificateCertificateRequestorFunctionServiceRole645E891D"
}
],
},
)
template.has_resource_properties(
"AWS::Lambda::Function",
{
"Code": {
"S3Bucket": "cdk-hnb659fds-assets-123456789012-ap-south-1",
"S3Key": "4d3f21fe611d8ebfd4f1f69754b7f986fed4ecf648d4fafe941cd81ede6cf60c.zip",
},
"Role": {
"Fn::GetAtt": [
"SiteCertificateCertificateRequestorFunctionServiceRole645E891D",
"Arn",
]
},
"Handler": "index.certificateRequestHandler",
"Runtime": "nodejs12.x",
"Timeout": 900,
},
)
template.has_resource_properties(
"AWS::CloudFormation::CustomResource",
{
"ServiceToken": {
"Fn::GetAtt": [
"SiteCertificateCertificateRequestorFunction7CFA7DEA",
"Arn",
]
},
"DomainName": "example.com",
"SubjectAlternativeNames": ["www.example.com"],
"HostedZoneId": "DUMMY",
"Region": "us-east-1",
},
)
template.has_resource_properties(
"AWS::CloudFront::Function",
{
"AutoPublish": True,
"FunctionCode": "// https://github.com/aws-samples/amazon-cloudfront-functions/tree/main/url-rewrite-single-page-apps\nfunction handler(event) {\n var request = event.request;\n var uri = request.uri;\n\n // Check whether the URI is missing a file name.\n if (uri.endsWith('/')) {\n request.uri += 'index.html';\n }\n // Check whether the URI is missing a file extension.\n else if (!uri.includes('.')) {\n request.uri += '/index.html';\n }\n\n return request;\n}",
"FunctionConfig": {
"Runtime": "cloudfront-js-1.0",
},
},
)
template.has_resource_properties(
"AWS::CloudFront::CloudFrontOriginAccessIdentity",
{"CloudFrontOriginAccessIdentityConfig": {}},
)
template.has_resource_properties(
"AWS::CloudFront::ResponseHeadersPolicy",
{
"ResponseHeadersPolicyConfig": {
"Comment": "Security Headers",
"Name": "kaustubhk-SecurityHeadersPolicy",
"SecurityHeadersConfig": {
"ContentSecurityPolicy": {
"ContentSecurityPolicy": "default-src 'self'; img-src https://*; child-src 'none'; object-src 'none'; script-src 'unsafe-inline' 'self'; style-src 'unsafe-inline' 'self'; font-src 'self' data:;",
"Override": True,
},
"ContentTypeOptions": {"Override": True},
"FrameOptions": {"FrameOption": "DENY", "Override": True},
"ReferrerPolicy": {
"Override": True,
"ReferrerPolicy": "no-referrer",
},
"StrictTransportSecurity": {
"AccessControlMaxAgeSec": 63072000,
"IncludeSubdomains": True,
"Override": True,
"Preload": True,
},
"XSSProtection": {
"ModeBlock": True,
"Override": True,
"Protection": True,
},
},
}
},
)
template.has_resource_properties(
"AWS::CloudFront::Distribution",
{
"DistributionConfig": {
"Aliases": ["example.com", "www.example.com"],
"CustomErrorResponses": [
{
"ErrorCode": 403,
"ResponseCode": 404,
"ResponsePagePath": "/404.html",
}
],
"DefaultCacheBehavior": {
"CachePolicyId": "658327ea-f89d-4fab-a63d-7e88639e58f6",
"Compress": True,
"FunctionAssociations": [
{
"EventType": "viewer-request",
"FunctionARN": {
"Fn::GetAtt": [
"CloudfrontFunction11FEE36B",
"FunctionARN",
]
},
}
],
"ResponseHeadersPolicyId": {"Ref": "ResponseHeadersPolicy13DBF9E0"},
"ViewerProtocolPolicy": "redirect-to-https",
},
"Enabled": True,
"HttpVersion": "http2",
"IPV6Enabled": True,
"Origins": [
{
"DomainName": {
"Fn::GetAtt": [
"WebsiteBucket75C24D94",
"RegionalDomainName",
]
},
"S3OriginConfig": {
"OriginAccessIdentity": {
"Fn::Join": [
"",
[
"origin-access-identity/cloudfront/",
{
"Ref": "WebsiteDistributionOrigin1S3Origin432B5882"
},
],
]
}
},
}
],
"ViewerCertificate": {
"AcmCertificateArn": {
"Fn::GetAtt": [
"SiteCertificateCertificateRequestorResource6021082A",
"Arn",
]
},
"MinimumProtocolVersion": "TLSv1.2_2021",
"SslSupportMethod": "sni-only",
},
}
},
)
template.has_resource_properties(
"AWS::Route53::RecordSet",
{
"Name": "example.com.",
"Type": "A",
"AliasTarget": {
"DNSName": {
"Fn::GetAtt": ["WebsiteDistribution75DCDA0B", "DomainName"]
},
"HostedZoneId": {
"Fn::FindInMap": [
"AWSCloudFrontPartitionHostedZoneIdMap",
{"Ref": "AWS::Partition"},
"zoneId",
]
},
},
"HostedZoneId": "DUMMY",
},
)
template.has_resource_properties(
"AWS::Route53::RecordSet",
{
"Name": "www.example.com.",
"Type": "A",
"AliasTarget": {
"DNSName": {
"Fn::GetAtt": ["WebsiteDistribution75DCDA0B", "DomainName"]
},
"HostedZoneId": {
"Fn::FindInMap": [
"AWSCloudFrontPartitionHostedZoneIdMap",
{"Ref": "AWS::Partition"},
"zoneId",
]
},
},
"HostedZoneId": "DUMMY",
},
)
template.has_resource_properties(
"AWS::Route53::RecordSet",
{
"Name": "example.com.",
"Type": "AAAA",
"AliasTarget": {
"DNSName": {
"Fn::GetAtt": ["WebsiteDistribution75DCDA0B", "DomainName"]
},
"HostedZoneId": {
"Fn::FindInMap": [
"AWSCloudFrontPartitionHostedZoneIdMap",
{"Ref": "AWS::Partition"},
"zoneId",
]
},
},
"HostedZoneId": "DUMMY",
},
)
template.has_resource_properties(
"AWS::Route53::RecordSet",
{
"Name": "www.example.com.",
"Type": "AAAA",
"AliasTarget": {
"DNSName": {
"Fn::GetAtt": ["WebsiteDistribution75DCDA0B", "DomainName"]
},
"HostedZoneId": {
"Fn::FindInMap": [
"AWSCloudFrontPartitionHostedZoneIdMap",
{"Ref": "AWS::Partition"},
"zoneId",
]
},
},
"HostedZoneId": "DUMMY",
},
)
template.has_resource_properties(
"AWS::Lambda::LayerVersion",
{
"Content": {
"S3Bucket": "cdk-hnb659fds-assets-123456789012-ap-south-1",
"S3Key": "<KEY>",
},
"Description": "/opt/awscli/aws",
},
)
template.has_resource_properties(
"Custom::CDKBucketDeployment",
{
"ServiceToken": {
"Fn::GetAtt": [
"CustomCDKBucketDeployment8693BB64968944B69AAFB0CC9EB8756C81C01536",
"Arn",
]
},
"SourceBucketNames": ["cdk-hnb659fds-assets-123456789012-ap-south-1"],
"DestinationBucketName": {"Ref": "WebsiteBucket75C24D94"},
"Prune": True,
"DistributionId": {"Ref": "WebsiteDistribution75DCDA0B"},
"DistributionPaths": ["/*"],
},
)
template.has_resource_properties(
"AWS::IAM::Role",
{
"AssumeRolePolicyDocument": {
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {"Service": "lambda.amazonaws.com"},
}
],
"Version": "2012-10-17",
},
"ManagedPolicyArns": [
{
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
],
]
}
],
},
)
template.has_resource_properties(
"AWS::IAM::Policy",
{
"PolicyDocument": {
"Statement": [
{
"Action": ["s3:GetObject*", "s3:GetBucket*", "s3:List*"],
"Effect": "Allow",
"Resource": [
{
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":s3:::cdk-hnb659fds-assets-123456789012-ap-south-1",
],
]
},
{
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":s3:::cdk-hnb659fds-assets-123456789012-ap-south-1/*",
],
]
},
],
},
{
"Action": [
"s3:GetObject*",
"s3:GetBucket*",
"s3:List*",
"s3:DeleteObject*",
"s3:PutObject",
"s3:PutObjectLegalHold",
"s3:PutObjectRetention",
"s3:PutObjectTagging",
"s3:PutObjectVersionTagging",
"s3:Abort*",
],
"Effect": "Allow",
"Resource": [
{"Fn::GetAtt": ["WebsiteBucket75C24D94", "Arn"]},
{
"Fn::Join": [
"",
[
{
"Fn::GetAtt": [
"WebsiteBucket75C24D94",
"Arn",
]
},
"/*",
],
]
},
],
},
{
"Action": [
"cloudfront:GetInvalidation",
"cloudfront:CreateInvalidation",
],
"Effect": "Allow",
"Resource": "*",
},
],
"Version": "2012-10-17",
},
"PolicyName": "CustomCDKBucketDeployment8693BB64968944B69AAFB0CC9EB8756CServiceRoleDefaultPolicy88902FDF",
"Roles": [
{
"Ref": "CustomCDKBucketDeployment8693BB64968944B69AAFB0CC9EB8756CServiceRole89A01265"
}
],
},
)
template.has_resource_properties(
"AWS::Lambda::Function",
{
"Code": {
"S3Bucket": "cdk-hnb659fds-assets-123456789012-ap-south-1",
"S3Key": "f98b78092dcdd31f5e6d47489beb5f804d4835ef86a8085d0a2053cb9ae711da.zip",
},
"Role": {
"Fn::GetAtt": [
"CustomCDKBucketDeployment8693BB64968944B69AAFB0CC9EB8756CServiceRole89A01265",
"Arn",
]
},
"Handler": "index.handler",
"Layers": [{"Ref": "S3DeploymentAwsCliLayer8AAFE44F"}],
"Runtime": "python3.7",
"Timeout": 900,
},
)
|
en
| 0.640722
|
Test Static Website Stack. # pylint: disable=line-too-long Test Static Website Stack.
| 2.35971
| 2
|
test.py
|
abhimanoj/101-Days-Of-MLCode
| 0
|
6628919
|
from utils import *
def test_error():
"""
raise exception..
"""
raise DataError("error is here")
"""
this will close the block here..
"""
try:
test_error()
except (DataError) as e:
print("errro",e)
|
from utils import *
def test_error():
"""
raise exception..
"""
raise DataError("error is here")
"""
this will close the block here..
"""
try:
test_error()
except (DataError) as e:
print("errro",e)
|
en
| 0.659761
|
raise exception.. this will close the block here..
| 2.515329
| 3
|
catalyst/support/ccxt_issue_1358.py
|
guilhermeprokisch/catalyst
| 0
|
6628920
|
import ccxt
bitfinex = ccxt.bitfinex()
bitfinex.verbose = True
ohlcvs = bitfinex.fetch_ohlcv('ETH/BTC', '30m', 1504224000000)
dt = bitfinex.iso8601(ohlcvs[0][0])
print(dt) # should print '2017-09-01T00:00:00.000Z'
|
import ccxt
bitfinex = ccxt.bitfinex()
bitfinex.verbose = True
ohlcvs = bitfinex.fetch_ohlcv('ETH/BTC', '30m', 1504224000000)
dt = bitfinex.iso8601(ohlcvs[0][0])
print(dt) # should print '2017-09-01T00:00:00.000Z'
|
en
| 0.419676
|
# should print '2017-09-01T00:00:00.000Z'
| 2.248142
| 2
|
qiskit_experiments/library/characterization/ef_spectroscopy.py
|
eliarbel/qiskit-experiments
| 1
|
6628921
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Spectroscopy for the e-f transition."""
from qiskit import QuantumCircuit
from qiskit.circuit import Gate
from qiskit_experiments.curve_analysis import ParameterRepr
from qiskit_experiments.library.characterization.qubit_spectroscopy import QubitSpectroscopy
from qiskit_experiments.framework import Options
class EFSpectroscopy(QubitSpectroscopy):
"""Class that runs spectroscopy on the e-f transition by scanning the frequency.
The circuits produced by spectroscopy, i.e.
.. parsed-literal::
┌───┐┌────────────┐ ░ ┌─┐
q_0: ┤ X ├┤ Spec(freq) ├─░─┤M├
└───┘└────────────┘ ░ └╥┘
measure: 1/═══════════════════════╩═
0
"""
@classmethod
def _default_analysis_options(cls) -> Options:
"""Default analysis options."""
options = super()._default_analysis_options()
options.result_parameters = [ParameterRepr("freq", "f12", "Hz")]
return options
def _template_circuit(self, freq_param) -> QuantumCircuit:
"""Return the template quantum circuit."""
circuit = QuantumCircuit(1)
circuit.x(0)
circuit.append(Gate(name=self.__spec_gate_name__, num_qubits=1, params=[freq_param]), (0,))
circuit.measure_active()
return circuit
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Spectroscopy for the e-f transition."""
from qiskit import QuantumCircuit
from qiskit.circuit import Gate
from qiskit_experiments.curve_analysis import ParameterRepr
from qiskit_experiments.library.characterization.qubit_spectroscopy import QubitSpectroscopy
from qiskit_experiments.framework import Options
class EFSpectroscopy(QubitSpectroscopy):
"""Class that runs spectroscopy on the e-f transition by scanning the frequency.
The circuits produced by spectroscopy, i.e.
.. parsed-literal::
┌───┐┌────────────┐ ░ ┌─┐
q_0: ┤ X ├┤ Spec(freq) ├─░─┤M├
└───┘└────────────┘ ░ └╥┘
measure: 1/═══════════════════════╩═
0
"""
@classmethod
def _default_analysis_options(cls) -> Options:
"""Default analysis options."""
options = super()._default_analysis_options()
options.result_parameters = [ParameterRepr("freq", "f12", "Hz")]
return options
def _template_circuit(self, freq_param) -> QuantumCircuit:
"""Return the template quantum circuit."""
circuit = QuantumCircuit(1)
circuit.x(0)
circuit.append(Gate(name=self.__spec_gate_name__, num_qubits=1, params=[freq_param]), (0,))
circuit.measure_active()
return circuit
|
en
| 0.786369
|
# This code is part of Qiskit. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. Spectroscopy for the e-f transition. Class that runs spectroscopy on the e-f transition by scanning the frequency. The circuits produced by spectroscopy, i.e. .. parsed-literal:: ┌───┐┌────────────┐ ░ ┌─┐ q_0: ┤ X ├┤ Spec(freq) ├─░─┤M├ └───┘└────────────┘ ░ └╥┘ measure: 1/═══════════════════════╩═ 0 Default analysis options. Return the template quantum circuit.
| 2.426081
| 2
|
SLpackage/private/pacbio/pythonpkgs/pbcore/lib/python2.7/site-packages/pbcore/util/statistics.py
|
fanglab/6mASCOPE
| 5
|
6628922
|
<reponame>fanglab/6mASCOPE
import math
import numpy as np
class Constants(object):
MAX_QV = 60
def accuracy_as_phred_qv(accuracy, max_qv=Constants.MAX_QV):
"""
Convert fractional accuracy to Phred QV: 0.999 --> 30
returns: float or numpy array
"""
if isinstance(accuracy, (float, int)):
assert 0 <= accuracy <= 1.0
if accuracy == 1:
return max_qv
return -10 * math.log10(1 - accuracy)
else:
if isinstance(accuracy, (tuple, list)):
accuracy = np.array(accuracy)
error_rate = 1.0 - accuracy
min_error_rate = 10 ** (-max_qv / 10.0)
zero_error = error_rate < min_error_rate
error_rate[zero_error] = min_error_rate
return -10 * np.log10(error_rate)
|
import math
import numpy as np
class Constants(object):
MAX_QV = 60
def accuracy_as_phred_qv(accuracy, max_qv=Constants.MAX_QV):
"""
Convert fractional accuracy to Phred QV: 0.999 --> 30
returns: float or numpy array
"""
if isinstance(accuracy, (float, int)):
assert 0 <= accuracy <= 1.0
if accuracy == 1:
return max_qv
return -10 * math.log10(1 - accuracy)
else:
if isinstance(accuracy, (tuple, list)):
accuracy = np.array(accuracy)
error_rate = 1.0 - accuracy
min_error_rate = 10 ** (-max_qv / 10.0)
zero_error = error_rate < min_error_rate
error_rate[zero_error] = min_error_rate
return -10 * np.log10(error_rate)
|
en
| 0.383677
|
Convert fractional accuracy to Phred QV: 0.999 --> 30 returns: float or numpy array
| 3.199331
| 3
|
mnelab/dialogs/findeventsdialog.py
|
rob-luke/mnelab
| 156
|
6628923
|
<gh_stars>100-1000
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from qtpy.QtWidgets import (QDialog, QVBoxLayout, QGridLayout, QLabel, QCheckBox, QSpinBox,
QDialogButtonBox, QComboBox)
MAX_INT = 2147483647
class FindEventsDialog(QDialog):
def __init__(self, parent, channels, default_stim):
super().__init__(parent)
self.setWindowTitle("Find Events")
vbox = QVBoxLayout(self)
grid = QGridLayout()
grid.addWidget(QLabel("Stim channel:"), 0, 0)
self.stimchan = QComboBox()
self.stimchan.addItems(channels)
self.stimchan.setCurrentIndex(default_stim)
grid.addWidget(self.stimchan, 0, 1)
grid.addWidget(QLabel("Consecutive"), 1, 0)
self.consecutive = QCheckBox()
self.consecutive.setChecked(True)
grid.addWidget(self.consecutive, 1, 1)
grid.addWidget(QLabel("Initial event"), 2, 0)
self.initial_event = QCheckBox()
self.initial_event.setChecked(True)
grid.addWidget(self.initial_event, 2, 1)
grid.addWidget(QLabel("Cast to unsigned integer"), 3, 0)
self.uint_cast = QCheckBox()
self.uint_cast.setChecked(True)
grid.addWidget(self.uint_cast, 3, 1)
grid.addWidget(QLabel("Minimum duration:"), 4, 0)
self.minduredit = QSpinBox()
self.minduredit.setMaximum(MAX_INT)
grid.addWidget(self.minduredit, 4, 1)
grid.addWidget(QLabel("Shortest event:"), 5, 0)
self.shortesteventedit = QSpinBox()
self.shortesteventedit.setMaximum(MAX_INT)
grid.addWidget(self.shortesteventedit, 5, 1)
vbox.addLayout(grid)
buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
vbox.addWidget(buttonbox)
buttonbox.accepted.connect(self.accept)
buttonbox.rejected.connect(self.reject)
vbox.setSizeConstraint(QVBoxLayout.SetFixedSize)
|
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from qtpy.QtWidgets import (QDialog, QVBoxLayout, QGridLayout, QLabel, QCheckBox, QSpinBox,
QDialogButtonBox, QComboBox)
MAX_INT = 2147483647
class FindEventsDialog(QDialog):
def __init__(self, parent, channels, default_stim):
super().__init__(parent)
self.setWindowTitle("Find Events")
vbox = QVBoxLayout(self)
grid = QGridLayout()
grid.addWidget(QLabel("Stim channel:"), 0, 0)
self.stimchan = QComboBox()
self.stimchan.addItems(channels)
self.stimchan.setCurrentIndex(default_stim)
grid.addWidget(self.stimchan, 0, 1)
grid.addWidget(QLabel("Consecutive"), 1, 0)
self.consecutive = QCheckBox()
self.consecutive.setChecked(True)
grid.addWidget(self.consecutive, 1, 1)
grid.addWidget(QLabel("Initial event"), 2, 0)
self.initial_event = QCheckBox()
self.initial_event.setChecked(True)
grid.addWidget(self.initial_event, 2, 1)
grid.addWidget(QLabel("Cast to unsigned integer"), 3, 0)
self.uint_cast = QCheckBox()
self.uint_cast.setChecked(True)
grid.addWidget(self.uint_cast, 3, 1)
grid.addWidget(QLabel("Minimum duration:"), 4, 0)
self.minduredit = QSpinBox()
self.minduredit.setMaximum(MAX_INT)
grid.addWidget(self.minduredit, 4, 1)
grid.addWidget(QLabel("Shortest event:"), 5, 0)
self.shortesteventedit = QSpinBox()
self.shortesteventedit.setMaximum(MAX_INT)
grid.addWidget(self.shortesteventedit, 5, 1)
vbox.addLayout(grid)
buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
vbox.addWidget(buttonbox)
buttonbox.accepted.connect(self.accept)
buttonbox.rejected.connect(self.reject)
vbox.setSizeConstraint(QVBoxLayout.SetFixedSize)
|
en
| 0.4752
|
# Authors: <NAME> <<EMAIL>> # # License: BSD (3-clause)
| 1.997694
| 2
|
icarus_simulator/sat_core/orbit_util.py
|
RubenFr/ICARUS-framework
| 5
|
6628924
|
<reponame>RubenFr/ICARUS-framework
# 2020 <NAME> and <NAME>
from datetime import datetime, timedelta
import ephem
import numpy as np
from .planetary_const import *
def right_ascension_ascending_node(orbit_idx: int, num_orbits: int) -> ephem.Angle:
"""Compute the right ascension of the ascending node (raan).
Args:
orbit_idx: Index of the orbit for which the raan is computed.
num_orbits: Number of orbits in the constellation.
Returns:
ephem.Angle: The raan in degrees.
"""
if num_orbits < 1:
raise ValueError
raan = float(orbit_idx * 360 / num_orbits)
raan = ephem.degrees(raan)
return raan
def mean_anomaly(
sat_idx_in_orbit: int, num_sat_per_orbit: int, orbit_shift: float = 0.0
) -> ephem.Angle:
"""Compute the mean anomaly for the current satellite.
Args:
sat_idx_in_orbit: Index of the satellite inside its orbit.
num_sat_per_orbit: Total number of satellites in each orbit of the
constellation.
orbit_shift: Shift of the orbit from the equatorial plane. This
can be used to create more complex walker-type constellations.
Returns:
ephem.Angle: The mean anomaly for the current satellite.
"""
if num_sat_per_orbit < 1:
raise ValueError
ma = orbit_shift + sat_idx_in_orbit * 360 / num_sat_per_orbit
ma = ephem.degrees(ma)
return ma
def elevation_to_period(elevation: float) -> float:
"""Compute the period of an orbit in seconds.
Returns:
float: The orbital period of a satellite at the given elevation in
seconds.
"""
assert elevation > 0
elevation = float(elevation)
radius = elevation + EARTH_RADIUS
period = 2 * np.pi * np.sqrt(np.power(radius, 3) / MU)
return period
def elevation_to_mean_motion(elevation: float, unit: str = "revs") -> float:
"""Compute the mean motion of the satellite given its elevation.
Args:
elevation: The elevation of the satellite, in meters from sea level.
unit: The unit in which to compute the mean motion. Can be either
"radians", "degrees", or "revs" for revolutions.
Returns:
float: The mean motion of a satellite orbiting at the given elevation.
The measure is `unit / day`.
"""
# Get the period in days
period = elevation_to_period(elevation) / SEC_IN_DAY
if unit == "radians":
return 2 * np.pi / period
elif unit == "degrees":
return 360 / period
elif unit == "revs":
return 1 / period
else:
raise ValueError("Specify a valid unit for mean motion")
def epoch_offset_to_date(
epoch: str, hours: int = 0, minutes: int = 0, seconds: int = 0, millisecs: int = 0
) -> ephem.Date:
"""Compute the date obtained by adding an offset to epoch."""
epoch_datetime = datetime.strptime(epoch, "%Y/%m/%d %H:%M:%S")
delta = timedelta(
hours=hours, minutes=minutes, seconds=seconds, milliseconds=millisecs
)
target = epoch_datetime + delta
targetstr = target.strftime("%Y/%m/%d %H:%M:%S.%f")
return targetstr
|
# 2020 <NAME> and <NAME>
from datetime import datetime, timedelta
import ephem
import numpy as np
from .planetary_const import *
def right_ascension_ascending_node(orbit_idx: int, num_orbits: int) -> ephem.Angle:
"""Compute the right ascension of the ascending node (raan).
Args:
orbit_idx: Index of the orbit for which the raan is computed.
num_orbits: Number of orbits in the constellation.
Returns:
ephem.Angle: The raan in degrees.
"""
if num_orbits < 1:
raise ValueError
raan = float(orbit_idx * 360 / num_orbits)
raan = ephem.degrees(raan)
return raan
def mean_anomaly(
sat_idx_in_orbit: int, num_sat_per_orbit: int, orbit_shift: float = 0.0
) -> ephem.Angle:
"""Compute the mean anomaly for the current satellite.
Args:
sat_idx_in_orbit: Index of the satellite inside its orbit.
num_sat_per_orbit: Total number of satellites in each orbit of the
constellation.
orbit_shift: Shift of the orbit from the equatorial plane. This
can be used to create more complex walker-type constellations.
Returns:
ephem.Angle: The mean anomaly for the current satellite.
"""
if num_sat_per_orbit < 1:
raise ValueError
ma = orbit_shift + sat_idx_in_orbit * 360 / num_sat_per_orbit
ma = ephem.degrees(ma)
return ma
def elevation_to_period(elevation: float) -> float:
"""Compute the period of an orbit in seconds.
Returns:
float: The orbital period of a satellite at the given elevation in
seconds.
"""
assert elevation > 0
elevation = float(elevation)
radius = elevation + EARTH_RADIUS
period = 2 * np.pi * np.sqrt(np.power(radius, 3) / MU)
return period
def elevation_to_mean_motion(elevation: float, unit: str = "revs") -> float:
"""Compute the mean motion of the satellite given its elevation.
Args:
elevation: The elevation of the satellite, in meters from sea level.
unit: The unit in which to compute the mean motion. Can be either
"radians", "degrees", or "revs" for revolutions.
Returns:
float: The mean motion of a satellite orbiting at the given elevation.
The measure is `unit / day`.
"""
# Get the period in days
period = elevation_to_period(elevation) / SEC_IN_DAY
if unit == "radians":
return 2 * np.pi / period
elif unit == "degrees":
return 360 / period
elif unit == "revs":
return 1 / period
else:
raise ValueError("Specify a valid unit for mean motion")
def epoch_offset_to_date(
epoch: str, hours: int = 0, minutes: int = 0, seconds: int = 0, millisecs: int = 0
) -> ephem.Date:
"""Compute the date obtained by adding an offset to epoch."""
epoch_datetime = datetime.strptime(epoch, "%Y/%m/%d %H:%M:%S")
delta = timedelta(
hours=hours, minutes=minutes, seconds=seconds, milliseconds=millisecs
)
target = epoch_datetime + delta
targetstr = target.strftime("%Y/%m/%d %H:%M:%S.%f")
return targetstr
|
en
| 0.792939
|
# 2020 <NAME> and <NAME> Compute the right ascension of the ascending node (raan). Args: orbit_idx: Index of the orbit for which the raan is computed. num_orbits: Number of orbits in the constellation. Returns: ephem.Angle: The raan in degrees. Compute the mean anomaly for the current satellite. Args: sat_idx_in_orbit: Index of the satellite inside its orbit. num_sat_per_orbit: Total number of satellites in each orbit of the constellation. orbit_shift: Shift of the orbit from the equatorial plane. This can be used to create more complex walker-type constellations. Returns: ephem.Angle: The mean anomaly for the current satellite. Compute the period of an orbit in seconds. Returns: float: The orbital period of a satellite at the given elevation in seconds. Compute the mean motion of the satellite given its elevation. Args: elevation: The elevation of the satellite, in meters from sea level. unit: The unit in which to compute the mean motion. Can be either "radians", "degrees", or "revs" for revolutions. Returns: float: The mean motion of a satellite orbiting at the given elevation. The measure is `unit / day`. # Get the period in days Compute the date obtained by adding an offset to epoch.
| 3.253297
| 3
|
tests/test_iiasa.py
|
mabudz/pyam
| 2
|
6628925
|
<filename>tests/test_iiasa.py
import os
import pytest
import pandas as pd
import numpy as np
import numpy.testing as npt
import pandas.testing as pdt
from pyam import IamDataFrame, iiasa, read_iiasa, META_IDX
from pyam.testing import assert_iamframe_equal
from .conftest import IIASA_UNAVAILABLE, META_COLS, TEST_API, TEST_API_NAME
if IIASA_UNAVAILABLE:
pytest.skip("IIASA database API unavailable", allow_module_level=True)
# check to see if we can do online testing of db authentication
TEST_ENV_USER = "IIASA_CONN_TEST_USER"
TEST_ENV_PW = "IIASA_CONN_TEST_PW"
CONN_ENV_AVAILABLE = TEST_ENV_USER in os.environ and TEST_ENV_PW in os.environ
CONN_ENV_REASON = "Requires env variables defined: {} and {}".format(
TEST_ENV_USER, TEST_ENV_PW
)
VERSION_COLS = ["version", "is_default"]
META_DF = pd.DataFrame(
[
["model_a", "scen_a", 1, True, 1, "foo"],
["model_a", "scen_b", 1, True, 2, np.nan],
["model_a", "scen_a", 2, False, 1, "bar"],
["model_b", "scen_a", 1, True, 3, "baz"],
],
columns=META_IDX + VERSION_COLS + META_COLS,
).set_index(META_IDX)
MODEL_B_DF = pd.DataFrame(
[
["Primary Energy", "EJ/yr", "Summer", 1, 3],
["Primary Energy", "EJ/yr", "Year", 3, 8],
["Primary Energy|Coal", "EJ/yr", "Summer", 0.4, 2],
["Primary Energy|Coal", "EJ/yr", "Year", 0.9, 5],
],
columns=["variable", "unit", "subannual", 2005, 2010],
)
NON_DEFAULT_DF = pd.DataFrame(
[
["model_a", "scen_a", 2, "Primary Energy", "EJ/yr", "Year", 2, 7],
["model_a", "scen_a", 2, "Primary Energy|Coal", "EJ/yr", "Year", 0.8, 4],
["model_b", "scen_a", 1, "Primary Energy", "EJ/yr", "Summer", 1, 3],
["model_b", "scen_a", 1, "Primary Energy", "EJ/yr", "Year", 3, 8],
["model_b", "scen_a", 1, "Primary Energy|Coal", "EJ/yr", "Summer", 0.4, 2],
["model_b", "scen_a", 1, "Primary Energy|Coal", "EJ/yr", "Year", 0.9, 5],
],
columns=META_IDX + ["version", "variable", "unit", "subannual", 2005, 2010],
)
def test_unknown_conn():
# connecting to an unknown API raises an error
pytest.raises(ValueError, iiasa.Connection, "foo")
def test_valid_connections():
# connecting to an unknown API raises an error
assert TEST_API in iiasa.Connection().valid_connections
def test_anon_conn(conn):
assert conn.current_connection == TEST_API_NAME
@pytest.mark.skipif(not CONN_ENV_AVAILABLE, reason=CONN_ENV_REASON)
def test_conn_creds_config():
iiasa.set_config(os.environ[TEST_ENV_USER], os.environ[TEST_ENV_PW])
conn = iiasa.Connection(TEST_API)
assert conn.current_connection == TEST_API_NAME
@pytest.mark.skipif(not CONN_ENV_AVAILABLE, reason=CONN_ENV_REASON)
def test_conn_creds_tuple():
user, pw = os.environ[TEST_ENV_USER], os.environ[TEST_ENV_PW]
conn = iiasa.Connection(TEST_API, creds=(user, pw))
assert conn.current_connection == TEST_API_NAME
@pytest.mark.skipif(not CONN_ENV_AVAILABLE, reason=CONN_ENV_REASON)
def test_conn_creds_dict():
user, pw = os.environ[TEST_ENV_USER], os.environ[TEST_ENV_PW]
conn = iiasa.Connection(TEST_API, creds={"username": user, "password": pw})
assert conn.current_connection == TEST_API_NAME
def test_conn_cleartext_raises():
# connecting with invalid credentials raises an error
creds = ("_foo", "_bar")
pytest.raises(DeprecationWarning, iiasa.Connection, TEST_API, creds=creds)
def test_variables(conn):
# check that connection returns the correct variables
npt.assert_array_equal(conn.variables(), ["Primary Energy", "Primary Energy|Coal"])
def test_regions(conn):
# check that connection returns the correct regions
npt.assert_array_equal(conn.regions(), ["World", "region_a"])
def test_regions_with_synonyms(conn):
obs = conn.regions(include_synonyms=True)
exp = pd.DataFrame(
[["World", None], ["region_a", "ISO_a"]], columns=["region", "synonym"]
)
pdt.assert_frame_equal(obs, exp)
def test_regions_empty_response():
obs = iiasa.Connection.convert_regions_payload("[]", include_synonyms=True)
assert obs.empty
def test_regions_no_synonyms_response():
json = '[{"id":1,"name":"World","parent":"World","hierarchy":"common"}]'
obs = iiasa.Connection.convert_regions_payload(json, include_synonyms=True)
assert not obs.empty
def test_regions_with_synonyms_response():
json = """
[
{
"id":1,"name":"World","parent":"World","hierarchy":"common",
"synonyms":[]
},
{
"id":2,"name":"USA","parent":"World","hierarchy":"country",
"synonyms":["US","United States"]
},
{
"id":3,"name":"Germany","parent":"World","hierarchy":"country",
"synonyms":["Deutschland","DE"]
}
]
"""
obs = iiasa.Connection.convert_regions_payload(json, include_synonyms=True)
assert not obs.empty
assert (obs[obs.region == "USA"].synonym.isin(["US", "United States"])).all()
assert (obs[obs.region == "Germany"].synonym.isin(["Deutschland", "DE"])).all()
def test_meta_columns(conn):
# test that connection returns the correct list of meta indicators
npt.assert_array_equal(conn.meta_columns, META_COLS)
@pytest.mark.parametrize("default", [True, False])
def test_index(conn, default):
# test that connection returns the correct index
if default:
exp = META_DF.loc[META_DF.is_default, ["version"]]
else:
exp = META_DF[VERSION_COLS]
pdt.assert_frame_equal(conn.index(default=default), exp, check_dtype=False)
@pytest.mark.parametrize("default", [True, False])
def test_meta(conn, default):
# test that connection returns the correct meta dataframe
v = "version"
if default:
exp = META_DF.loc[META_DF.is_default, [v] + META_COLS]
else:
exp = META_DF[VERSION_COLS + META_COLS].set_index(v, append=True)
pdt.assert_frame_equal(conn.meta(default=default), exp, check_dtype=False)
@pytest.mark.parametrize("default", [True, False])
def test_properties(conn, default):
# test that connection returns the correct properties dataframe
obs = conn.properties(default=default)
if default:
exp_cols = ["version"]
exp = META_DF.loc[META_DF.is_default, exp_cols]
else:
exp_cols = VERSION_COLS
exp = META_DF[exp_cols]
# assert that the expected audit columns are included
for col in ["create_user", "create_date", "update_user", "update_date"]:
assert col in obs.columns
# assert that the values of some columns is as expected
pdt.assert_frame_equal(obs[exp_cols], exp, check_dtype=False)
@pytest.mark.parametrize(
"kwargs",
[
{},
dict(variable="Primary Energy"),
dict(scenario="scen_a", variable="Primary Energy"),
],
)
def test_query_year(conn, test_df_year, kwargs):
# test reading timeseries data (`model_a` has only yearly data)
exp = test_df_year.copy()
for i in ["version"] + META_COLS:
exp.set_meta(META_DF.iloc[[0, 1]][i])
# test method via Connection
df = conn.query(model="model_a", **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
# test top-level method
df = read_iiasa(TEST_API, model="model_a", **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
@pytest.mark.parametrize(
"kwargs",
[
{},
dict(variable="Primary Energy"),
dict(scenario="scen_a", variable="Primary Energy"),
],
)
def test_query_with_subannual(conn, test_pd_df, kwargs):
# test reading timeseries data (including subannual data)
exp = IamDataFrame(test_pd_df, subannual="Year").append(
MODEL_B_DF, model="model_b", scenario="scen_a", region="World"
)
for i in ["version"] + META_COLS:
exp.set_meta(META_DF.iloc[[0, 1, 3]][i])
# test method via Connection
df = conn.query(**kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
# test top-level method
df = read_iiasa(TEST_API, **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
@pytest.mark.parametrize(
"meta",
[
["string"], # version column is added whether or not stated explicitly
["string", "version"],
],
)
@pytest.mark.parametrize(
"kwargs",
[
{},
dict(variable="Primary Energy"),
dict(scenario="scen_a", variable="Primary Energy"),
],
)
def test_query_with_meta_arg(conn, test_pd_df, meta, kwargs):
# test reading timeseries data (including subannual data)
exp = IamDataFrame(test_pd_df, subannual="Year").append(
MODEL_B_DF, model="model_b", scenario="scen_a", region="World"
)
for i in ["version", "string"]:
exp.set_meta(META_DF.iloc[[0, 1, 3]][i])
# test method via Connection
df = conn.query(meta=meta, **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
# test top-level method
df = read_iiasa(TEST_API, meta=meta, **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
@pytest.mark.parametrize(
"kwargs",
[
{},
dict(variable="Primary Energy"),
dict(scenario="scen_a", variable="Primary Energy"),
],
)
def test_query_with_meta_false(conn, test_pd_df, kwargs):
# test reading timeseries data (including subannual data)
exp = IamDataFrame(test_pd_df, subannual="Year").append(
MODEL_B_DF, model="model_b", scenario="scen_a", region="World"
)
# test method via Connection
df = conn.query(meta=False, **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
# test top-level method
df = read_iiasa(TEST_API, meta=False, **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
def test_query_non_default(conn, test_pd_df):
# test reading timeseries data with non-default versions & index
test_pd_df["subannual"] = "Year"
test_pd_df["version"] = 1
df = pd.concat([test_pd_df[NON_DEFAULT_DF.columns], NON_DEFAULT_DF])
meta = META_DF.set_index("version", append=True)
index = ["model", "scenario", "version"]
exp = IamDataFrame(df, meta=meta, index=index, region="World")
# test method via Connection
df = conn.query(default=False)
assert_iamframe_equal(df, exp)
# test top-level method
df = read_iiasa(TEST_API, default=False)
assert_iamframe_equal(df, exp)
|
<filename>tests/test_iiasa.py
import os
import pytest
import pandas as pd
import numpy as np
import numpy.testing as npt
import pandas.testing as pdt
from pyam import IamDataFrame, iiasa, read_iiasa, META_IDX
from pyam.testing import assert_iamframe_equal
from .conftest import IIASA_UNAVAILABLE, META_COLS, TEST_API, TEST_API_NAME
if IIASA_UNAVAILABLE:
pytest.skip("IIASA database API unavailable", allow_module_level=True)
# check to see if we can do online testing of db authentication
TEST_ENV_USER = "IIASA_CONN_TEST_USER"
TEST_ENV_PW = "IIASA_CONN_TEST_PW"
CONN_ENV_AVAILABLE = TEST_ENV_USER in os.environ and TEST_ENV_PW in os.environ
CONN_ENV_REASON = "Requires env variables defined: {} and {}".format(
TEST_ENV_USER, TEST_ENV_PW
)
VERSION_COLS = ["version", "is_default"]
META_DF = pd.DataFrame(
[
["model_a", "scen_a", 1, True, 1, "foo"],
["model_a", "scen_b", 1, True, 2, np.nan],
["model_a", "scen_a", 2, False, 1, "bar"],
["model_b", "scen_a", 1, True, 3, "baz"],
],
columns=META_IDX + VERSION_COLS + META_COLS,
).set_index(META_IDX)
MODEL_B_DF = pd.DataFrame(
[
["Primary Energy", "EJ/yr", "Summer", 1, 3],
["Primary Energy", "EJ/yr", "Year", 3, 8],
["Primary Energy|Coal", "EJ/yr", "Summer", 0.4, 2],
["Primary Energy|Coal", "EJ/yr", "Year", 0.9, 5],
],
columns=["variable", "unit", "subannual", 2005, 2010],
)
NON_DEFAULT_DF = pd.DataFrame(
[
["model_a", "scen_a", 2, "Primary Energy", "EJ/yr", "Year", 2, 7],
["model_a", "scen_a", 2, "Primary Energy|Coal", "EJ/yr", "Year", 0.8, 4],
["model_b", "scen_a", 1, "Primary Energy", "EJ/yr", "Summer", 1, 3],
["model_b", "scen_a", 1, "Primary Energy", "EJ/yr", "Year", 3, 8],
["model_b", "scen_a", 1, "Primary Energy|Coal", "EJ/yr", "Summer", 0.4, 2],
["model_b", "scen_a", 1, "Primary Energy|Coal", "EJ/yr", "Year", 0.9, 5],
],
columns=META_IDX + ["version", "variable", "unit", "subannual", 2005, 2010],
)
def test_unknown_conn():
# connecting to an unknown API raises an error
pytest.raises(ValueError, iiasa.Connection, "foo")
def test_valid_connections():
# connecting to an unknown API raises an error
assert TEST_API in iiasa.Connection().valid_connections
def test_anon_conn(conn):
assert conn.current_connection == TEST_API_NAME
@pytest.mark.skipif(not CONN_ENV_AVAILABLE, reason=CONN_ENV_REASON)
def test_conn_creds_config():
iiasa.set_config(os.environ[TEST_ENV_USER], os.environ[TEST_ENV_PW])
conn = iiasa.Connection(TEST_API)
assert conn.current_connection == TEST_API_NAME
@pytest.mark.skipif(not CONN_ENV_AVAILABLE, reason=CONN_ENV_REASON)
def test_conn_creds_tuple():
user, pw = os.environ[TEST_ENV_USER], os.environ[TEST_ENV_PW]
conn = iiasa.Connection(TEST_API, creds=(user, pw))
assert conn.current_connection == TEST_API_NAME
@pytest.mark.skipif(not CONN_ENV_AVAILABLE, reason=CONN_ENV_REASON)
def test_conn_creds_dict():
user, pw = os.environ[TEST_ENV_USER], os.environ[TEST_ENV_PW]
conn = iiasa.Connection(TEST_API, creds={"username": user, "password": pw})
assert conn.current_connection == TEST_API_NAME
def test_conn_cleartext_raises():
# connecting with invalid credentials raises an error
creds = ("_foo", "_bar")
pytest.raises(DeprecationWarning, iiasa.Connection, TEST_API, creds=creds)
def test_variables(conn):
# check that connection returns the correct variables
npt.assert_array_equal(conn.variables(), ["Primary Energy", "Primary Energy|Coal"])
def test_regions(conn):
# check that connection returns the correct regions
npt.assert_array_equal(conn.regions(), ["World", "region_a"])
def test_regions_with_synonyms(conn):
obs = conn.regions(include_synonyms=True)
exp = pd.DataFrame(
[["World", None], ["region_a", "ISO_a"]], columns=["region", "synonym"]
)
pdt.assert_frame_equal(obs, exp)
def test_regions_empty_response():
obs = iiasa.Connection.convert_regions_payload("[]", include_synonyms=True)
assert obs.empty
def test_regions_no_synonyms_response():
json = '[{"id":1,"name":"World","parent":"World","hierarchy":"common"}]'
obs = iiasa.Connection.convert_regions_payload(json, include_synonyms=True)
assert not obs.empty
def test_regions_with_synonyms_response():
json = """
[
{
"id":1,"name":"World","parent":"World","hierarchy":"common",
"synonyms":[]
},
{
"id":2,"name":"USA","parent":"World","hierarchy":"country",
"synonyms":["US","United States"]
},
{
"id":3,"name":"Germany","parent":"World","hierarchy":"country",
"synonyms":["Deutschland","DE"]
}
]
"""
obs = iiasa.Connection.convert_regions_payload(json, include_synonyms=True)
assert not obs.empty
assert (obs[obs.region == "USA"].synonym.isin(["US", "United States"])).all()
assert (obs[obs.region == "Germany"].synonym.isin(["Deutschland", "DE"])).all()
def test_meta_columns(conn):
# test that connection returns the correct list of meta indicators
npt.assert_array_equal(conn.meta_columns, META_COLS)
@pytest.mark.parametrize("default", [True, False])
def test_index(conn, default):
# test that connection returns the correct index
if default:
exp = META_DF.loc[META_DF.is_default, ["version"]]
else:
exp = META_DF[VERSION_COLS]
pdt.assert_frame_equal(conn.index(default=default), exp, check_dtype=False)
@pytest.mark.parametrize("default", [True, False])
def test_meta(conn, default):
# test that connection returns the correct meta dataframe
v = "version"
if default:
exp = META_DF.loc[META_DF.is_default, [v] + META_COLS]
else:
exp = META_DF[VERSION_COLS + META_COLS].set_index(v, append=True)
pdt.assert_frame_equal(conn.meta(default=default), exp, check_dtype=False)
@pytest.mark.parametrize("default", [True, False])
def test_properties(conn, default):
# test that connection returns the correct properties dataframe
obs = conn.properties(default=default)
if default:
exp_cols = ["version"]
exp = META_DF.loc[META_DF.is_default, exp_cols]
else:
exp_cols = VERSION_COLS
exp = META_DF[exp_cols]
# assert that the expected audit columns are included
for col in ["create_user", "create_date", "update_user", "update_date"]:
assert col in obs.columns
# assert that the values of some columns is as expected
pdt.assert_frame_equal(obs[exp_cols], exp, check_dtype=False)
@pytest.mark.parametrize(
"kwargs",
[
{},
dict(variable="Primary Energy"),
dict(scenario="scen_a", variable="Primary Energy"),
],
)
def test_query_year(conn, test_df_year, kwargs):
# test reading timeseries data (`model_a` has only yearly data)
exp = test_df_year.copy()
for i in ["version"] + META_COLS:
exp.set_meta(META_DF.iloc[[0, 1]][i])
# test method via Connection
df = conn.query(model="model_a", **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
# test top-level method
df = read_iiasa(TEST_API, model="model_a", **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
@pytest.mark.parametrize(
"kwargs",
[
{},
dict(variable="Primary Energy"),
dict(scenario="scen_a", variable="Primary Energy"),
],
)
def test_query_with_subannual(conn, test_pd_df, kwargs):
# test reading timeseries data (including subannual data)
exp = IamDataFrame(test_pd_df, subannual="Year").append(
MODEL_B_DF, model="model_b", scenario="scen_a", region="World"
)
for i in ["version"] + META_COLS:
exp.set_meta(META_DF.iloc[[0, 1, 3]][i])
# test method via Connection
df = conn.query(**kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
# test top-level method
df = read_iiasa(TEST_API, **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
@pytest.mark.parametrize(
"meta",
[
["string"], # version column is added whether or not stated explicitly
["string", "version"],
],
)
@pytest.mark.parametrize(
"kwargs",
[
{},
dict(variable="Primary Energy"),
dict(scenario="scen_a", variable="Primary Energy"),
],
)
def test_query_with_meta_arg(conn, test_pd_df, meta, kwargs):
# test reading timeseries data (including subannual data)
exp = IamDataFrame(test_pd_df, subannual="Year").append(
MODEL_B_DF, model="model_b", scenario="scen_a", region="World"
)
for i in ["version", "string"]:
exp.set_meta(META_DF.iloc[[0, 1, 3]][i])
# test method via Connection
df = conn.query(meta=meta, **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
# test top-level method
df = read_iiasa(TEST_API, meta=meta, **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
@pytest.mark.parametrize(
"kwargs",
[
{},
dict(variable="Primary Energy"),
dict(scenario="scen_a", variable="Primary Energy"),
],
)
def test_query_with_meta_false(conn, test_pd_df, kwargs):
# test reading timeseries data (including subannual data)
exp = IamDataFrame(test_pd_df, subannual="Year").append(
MODEL_B_DF, model="model_b", scenario="scen_a", region="World"
)
# test method via Connection
df = conn.query(meta=False, **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
# test top-level method
df = read_iiasa(TEST_API, meta=False, **kwargs)
assert_iamframe_equal(df, exp.filter(**kwargs))
def test_query_non_default(conn, test_pd_df):
# test reading timeseries data with non-default versions & index
test_pd_df["subannual"] = "Year"
test_pd_df["version"] = 1
df = pd.concat([test_pd_df[NON_DEFAULT_DF.columns], NON_DEFAULT_DF])
meta = META_DF.set_index("version", append=True)
index = ["model", "scenario", "version"]
exp = IamDataFrame(df, meta=meta, index=index, region="World")
# test method via Connection
df = conn.query(default=False)
assert_iamframe_equal(df, exp)
# test top-level method
df = read_iiasa(TEST_API, default=False)
assert_iamframe_equal(df, exp)
|
en
| 0.795477
|
# check to see if we can do online testing of db authentication # connecting to an unknown API raises an error # connecting to an unknown API raises an error # connecting with invalid credentials raises an error # check that connection returns the correct variables # check that connection returns the correct regions [ { "id":1,"name":"World","parent":"World","hierarchy":"common", "synonyms":[] }, { "id":2,"name":"USA","parent":"World","hierarchy":"country", "synonyms":["US","United States"] }, { "id":3,"name":"Germany","parent":"World","hierarchy":"country", "synonyms":["Deutschland","DE"] } ] # test that connection returns the correct list of meta indicators # test that connection returns the correct index # test that connection returns the correct meta dataframe # test that connection returns the correct properties dataframe # assert that the expected audit columns are included # assert that the values of some columns is as expected # test reading timeseries data (`model_a` has only yearly data) # test method via Connection # test top-level method # test reading timeseries data (including subannual data) # test method via Connection # test top-level method # version column is added whether or not stated explicitly # test reading timeseries data (including subannual data) # test method via Connection # test top-level method # test reading timeseries data (including subannual data) # test method via Connection # test top-level method # test reading timeseries data with non-default versions & index # test method via Connection # test top-level method
| 2.156795
| 2
|
sanitychecks/basic_shape_dataset2d.py
|
Chumbyte/DiGS
| 0
|
6628926
|
# <NAME> (Itzik) <<EMAIL>>
# <NAME> <<EMAIL>>
import torch.utils.data as data
import numpy as np
import scipy.spatial as spatial
import utils.visualizations as vis
from abc import ABC, abstractmethod
from matplotlib.path import Path
import torch
class BasicShape2D(data.Dataset):
# A class to generate synthetic examples of basic shapes.
# Generates clean and noisy point clouds sampled on Jets + samples no a grid with their distance to the surface
def __init__(self, n_points, n_samples=128, res=128, sample_type='grid', sapmling_std=0.005,
grid_range=1.2):
self.grid_range = grid_range
self.n_points = n_points
self.n_samples = n_samples
self.grid_res = res
self.sample_type = sample_type #grid | gaussian | combined
self.sampling_std = sapmling_std
# Generate shape
self.points = self.get_mnfld_points()
# generate grid points and find distance to closest point on the line
x, y = np.linspace(-grid_range, grid_range, self.grid_res), np.linspace(-grid_range, grid_range, self.grid_res)
xx, yy = np.meshgrid(x, y)
xx, yy = xx.ravel(), yy.ravel()
self.grid_points = np.stack([xx, yy], axis=1).astype('f')
self.nonmnfld_points = self.get_nonmnfld_points()
# Compute gt mnfld normals
self.mnfld_n = self.get_mnfld_n()
self.grid_dist, self.grid_n = self.get_points_distances_and_normals(self.grid_points)
self.nonmnfld_dist, self.nonmnfld_n = self.get_points_distances_and_normals(self.nonmnfld_points)
self.dist_img = np.reshape(self.grid_dist, [self.grid_res, self.grid_res])
self.point_idxs = np.arange(self.points.shape[1])
self.grid_points_idxs = np.arange(self.grid_points.shape[0])
self.nonmnfld_points_idxs = np.arange(self.nonmnfld_points.shape[0])
self.sample_probs = np.ones_like(self.grid_points_idxs) / self.grid_points.shape[0]
self.generate_batch_indices()
@abstractmethod
def get_mnfld_points(self):
# implement a function that returns points on the manifold
pass
@abstractmethod
def get_mnfld_n(self):
#implement a function that returns normal vectors for points on the manifold
pass
@abstractmethod
def get_points_distances_and_normals(self, points):
# implement a function that computes the distance and normal vectors of nonmanifold points.
# default implementation finds the nearest neighbor and return its normal and the distance to it.
# which is a coarse approxiamation
distances = []
normals = []
# compute distance and normal (general case)
for i, point_cloud in enumerate(self.points):
kdtree = spatial.cKDTree(point_cloud)
distances, nn_idx = kdtree.query(points, k=1)
signs = np.sign(np.einsum('ij,ij->i', points - point_cloud[nn_idx],
self.mnfld_n[i, nn_idx]))
normals.append(self.mnfld_n[i, nn_idx])
distances.append(signs*distances)
distances = np.stack(distances).astype('f')
normals = np.stack(normals).astype('f')
return distances, normals
def get_grid_divergence(self):
# 2D implementation
n_img = np.reshape(self.grid_n, [self.grid_res, self.grid_res, -1])
frac_45 = 1./np.sqrt(2)
filter = np.array([[[frac_45, -frac_45], [1., 0.], [frac_45, frac_45]],
[[0., -1.], [0., 0.], [0., 1.]],
[[-frac_45, -frac_45], [ -1., 0.], [-frac_45, frac_45]]]) # [y, x]
padding = self.get_padding(n_img, filter, strides=[1, 1])
n_img = torch.nn.functional.pad(torch.tensor(n_img, dtype=torch.float32), padding)
div_img = torch.nn.functional.conv2d(n_img.permute([2, 0, 1]).unsqueeze(0),
torch.tensor(filter, dtype=torch.float32).permute([2, 0, 1]).unsqueeze(0),
).squeeze().numpy()
return div_img.flatten()
def get_offgrid_divergnce(self, off_grid_points, method='nn'):
#TODO implement interpulation method?
if method == 'nn':
# find the nearest grid point and return its divergence
kdtree = spatial.cKDTree(self.grid_points)
_, nn_idx = kdtree.query(off_grid_points, k=1)
else:
raise Warning('unsupported offgrid div computeation method')
return self.grid_div[nn_idx]
def get_padding(self, img, filter, strides=[1, 1]):
# from https://discuss.pytorch.org/t/same-padding-equivalent-in-pytorch/85121/3
in_height, in_width, _ = img.shape
filter_height, filter_width, _ = filter.shape
# The total padding applied along the height and width is computed as:
if (in_height % strides[0] == 0):
pad_along_height = max(filter_height - strides[0], 0)
else:
pad_along_height = max(filter_height - (in_height % strides[0]), 0)
if (in_width % strides[1] == 0):
pad_along_width = max(filter_width - strides[1], 0)
else:
pad_along_width = max(filter_width - (in_width % strides[1]), 0)
# Finally, the padding on the top, bottom, left and right are:
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
return (0, 0, pad_left, pad_right, pad_top, pad_bottom)
def get_nonmnfld_points(self):
if self.sample_type == 'grid':
nonmnfld_points = self.grid_points
elif self.sample_type == 'uniform':
nonmnfld_points = np.random.uniform(-self.grid_range, self.grid_range,
size=(self.grid_res * self.grid_res , 2)).astype(np.float32)
elif self.sample_type == 'gaussian':
nonmnfld_points = self.sample_gaussian_noise_around_shape()
idx = np.random.choice(range(nonmnfld_points.shape[1]), self.grid_res * self.grid_res)
sample_idx = np.random.choice(range(nonmnfld_points.shape[0]), self.grid_res * self.grid_res)
nonmnfld_points = nonmnfld_points[sample_idx, idx]
elif self.sample_type == 'combined':
nonmnfld_points1 = self.sample_gaussian_noise_around_shape()
nonmnfld_points2 = self.grid_points
idx1 = np.random.choice(range(nonmnfld_points1.shape[1]), int(np.ceil(self.grid_res * self.grid_res / 2)))
idx2 = np.random.choice(range(nonmnfld_points2.shape[0]), int(np.floor(self.grid_res * self.grid_res / 2)))
sample_idx = np.random.choice(range(nonmnfld_points1.shape[0]), int(np.ceil(self.grid_res * self.grid_res / 2)))
nonmnfld_points = np.concatenate([nonmnfld_points1[sample_idx, idx1], nonmnfld_points2[idx2]], axis=0)
else:
raise Warning("Unsupported non manfold sampling type {}".format(self.sample_type))
return nonmnfld_points
def sample_gaussian_noise_around_shape(self):
n_noisy_points = int(np.round(self.grid_res * self.grid_res / self.n_points))
noise = np.random.multivariate_normal([0, 0], [[self.sampling_std, 0], [0, self.sampling_std]],
size=(self.n_samples, self.n_points, n_noisy_points)).astype(np.float32)
nonmnfld_points = np.tile(self.points[:, :, None, :], [1, 1, n_noisy_points, 1]) + noise
nonmnfld_points = nonmnfld_points.reshape([nonmnfld_points.shape[0], -1, nonmnfld_points.shape[-1]])
return nonmnfld_points
def generate_batch_indices(self):
mnfld_idx = []
nonmnfld_idx = []
for i in range(self.n_samples):
mnfld_idx.append(np.random.choice(self.point_idxs, self.n_points))
nonmnfld_idx.append(np.random.choice(self.nonmnfld_points_idxs, self.n_points))
self.mnfld_idx = np.array(mnfld_idx)
self.nonmnfld_idx = np.array(nonmnfld_idx)
def __getitem__(self, index):
nonmnfld_idx = self.nonmnfld_idx[index]
mnfld_idx = self.mnfld_idx[index]
if self.nonmnfld_dist is not None:
nonmnfld_dist = self.nonmnfld_dist[nonmnfld_idx]
else:
nonmnfld_dist = torch.tensor(0)
return {'points' : self.points[index, mnfld_idx, :], 'mnfld_n': self.mnfld_n[index, mnfld_idx, :], \
'nonmnfld_dist': nonmnfld_dist, 'nonmnfld_n': self.nonmnfld_n[nonmnfld_idx],
'nonmnfld_points': self.nonmnfld_points[nonmnfld_idx],
}
def __len__(self):
return self.n_samples
class Circle(BasicShape2D):
def __init__(self, *args, r=0.5):
self.r = r
BasicShape2D.__init__(self, *args)
def get_mnfld_points(self):
theta = np.random.uniform(0, 2*np.pi, size=(self.n_samples, self.n_points)).astype('f')
x = self.r * np.sin(theta)
y = self.r * np.cos(theta)
points = np.stack([x, y], axis=2)
return points
def get_mnfld_n(self):
return self.points / np.linalg.norm(self.points, axis=2, keepdims=True)
def get_points_distances_and_normals(self, points):
point_dist = np.linalg.norm(points, axis=1, keepdims=True)
distances = point_dist - self.r
normals = points / point_dist
return distances, normals
class Polygon(BasicShape2D):
def __init__(self, *args, vertices=[], line_sample_type='uniform'):
# vertices: x,y points specifying the polygon
self.vertices = np.array(vertices)
self.lines = self.get_line_props()
self.line_sample_type = line_sample_type
BasicShape2D.__init__(self, *args)
def get_mnfld_points(self):
# sample points on the lines
n_points_to_sample = self.n_points - len(self.vertices)
if n_points_to_sample < 0:
raise Warning("Fewer points to sample than polygon vertices. Please change the number of points")
sample_prob = self.lines['line_length'] / np.sum(self.lines['line_length'])
points_per_segment = np.floor(n_points_to_sample * sample_prob).astype(np.int32)
points_leftover = int(n_points_to_sample - points_per_segment.sum())
if not points_leftover == 0:
for j in range(points_leftover):
actual_prob = points_per_segment / points_per_segment.sum()
prob_diff = sample_prob - actual_prob
add_idx = np.argmax(prob_diff)
points_per_segment[add_idx] = points_per_segment[add_idx] + 1
points = []
self.point_normal = []
for point_idx, point in enumerate(self.vertices):
l1_idx = len(self.vertices) - 1 if point_idx == 0 else point_idx - 1
l2_idx = point_idx
n = self.lines['nl'][l1_idx] + self.lines['nl'][l2_idx]
self.point_normal.append(n / np.linalg.norm(n))
points.append(point)
points = np.repeat(np.array(points)[None, :], self.n_samples, axis=0)
self.point_normal = np.repeat(np.array(self.point_normal)[None, :], self.n_samples, axis=0)
for line_idx in range(len(self.lines['A'])):
if self.line_sample_type == 'uniform':
t = np.linspace(0, 1, points_per_segment[line_idx] + 1, endpoint=False)[1:]
t = np.repeat(t[None, :], self.n_samples, axis=0)
else:
t = np.random.uniform(0, 1, [self.n_samples, points_per_segment[line_idx]])
p1 = np.array(self.vertices[self.lines['start_idx'][line_idx]])
p2 = np.array(self.vertices[self.lines['end_idx'][line_idx]])
points = np.concatenate([points, p1 + t[:, :, None]*(p2 - p1)], axis=1)
self.point_normal = np.concatenate([self.point_normal,
np.tile(self.lines['nl'][line_idx][None, None, :],
[self.n_samples, points_per_segment[line_idx], 1])], axis=1)
return points.astype('f')
def get_mnfld_n(self):
return self.point_normal
def get_points_distances_and_normals(self, points):
# iterate over all the lines and finds the minimum distance between all points and line segments
# good explenation ref : https://stackoverflow.com/questions/10983872/distance-from-a-point-to-a-polygon
n_grid_points = len(points)
p1x = np.vstack(self.vertices[self.lines['start_idx']][:, 0])
p1y = np.vstack(self.vertices[self.lines['start_idx']][:, 1])
p2x = np.vstack(self.vertices[self.lines['end_idx']][:, 0])
p2y = np.vstack(self.vertices[self.lines['end_idx']][:, 1])
p1p2 = np.array(self.lines['direction'])
px = points[:, 0]
py = points[:, 1]
pp1 = np.vstack([px - np.tile(p1x, [1, 1, n_grid_points]), py - np.tile(p1y, [1, 1, n_grid_points])])
pp2 = np.vstack([px - np.tile(p2x, [1, 1, n_grid_points]), py - np.tile(p2y, [1, 1, n_grid_points])])
r = (p1p2[:, 0, None] * pp1[0, :, :] + p1p2[:, 1, None] * pp1[1, :, :]) / np.array(self.lines['line_length'])[:, None]
d1 = np.linalg.norm(pp1, axis=0)
d2 = np.linalg.norm(pp2, axis=0)
dp = np.sqrt(np.square(d1) - np.square(r * np.array(self.lines['line_length'])[:, None]))
d = np.where(r < 0, d1, np.where(r > 1, d2, dp))
distances = np.min(d, axis=0)
idx = np.argmin(d, axis=0)
# compute normal vector
polygon_path = Path(self.vertices)
point_in_polygon = polygon_path.contains_points(points)
point_sign = np.where(point_in_polygon, -1, 1)
n = np.where(r < 0, pp1, np.where(r > 1, pp2, point_sign *
np.tile(np.array(self.lines['nl']).transpose()[:, :, None],
[1, 1, n_grid_points])))
normals = np.take_along_axis(n, idx[None, None, :], axis=1).squeeze().transpose()
normals = point_sign[:, None] * normals / np.linalg.norm(normals, axis=1, keepdims=True)
distances = point_sign * distances
return distances, normals
def get_line_props(self):
lines = {'A': [], 'B': [], 'C': [], 'nl': [], 'line_length': [], 'start_idx': [], 'end_idx': [], 'direction': []}
for start_idx, start_point in enumerate(self.vertices):
end_idx = 0 if start_idx == len(self.vertices)-1 else start_idx + 1
end_point = self.vertices[end_idx]
# Compute standard form coefficients
A = start_point[1] - end_point[1]
B = end_point[0] - start_point[0]
C = - (A * start_point[0] + B * start_point[1])
line_length = np.sqrt(np.square(A) + np.square(B))
direction = [end_point[0] - start_point[0], end_point[1] - start_point[1]] / line_length
nl = [A, B]
nl = nl / np.linalg.norm(nl)
line_props = {'A': A, 'B': B, 'C': C, 'nl': nl, 'line_length': line_length,
'start_idx': start_idx, 'end_idx': end_idx, 'direction': direction}
for key in lines.keys():
lines[key].append(line_props[key])
return lines
def koch_line(start, end, factor):
"""
Segments a line to Koch line, creating fractals.
:param tuple start: (x, y) coordinates of the starting point
:param tuple end: (x, y) coordinates of the end point
:param float factor: the multiple of sixty degrees to rotate
:returns tuple: tuple of all points of segmentation
"""
# coordinates of the start
x1, y1 = start[0], start[1]
# coordinates of the end
x2, y2 = end[0], end[1]
# the length of the line
l = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
# first point: same as the start
a = (x1, y1)
# second point: one third in each direction from the first point
b = (x1 + (x2 - x1) / 3., y1 + (y2 - y1) / 3.)
# third point: rotation for multiple of 60 degrees
c = (b[0] + l / 3. * np.cos(factor * np.pi / 3.), b[1] + l / 3. * np.sin(factor * np.pi / 3.))
# fourth point: two thirds in each direction from the first point
d = (x1 + 2. * (x2 - x1) / 3., y1 + 2. * (y2 - y1) / 3.)
# the last point
e = end
return {'a': a, 'b': b, 'c': c, 'd': d, 'e': e, 'factor': factor}
def koch_snowflake(degree, s=1.0):
"""Generates all lines for a Koch Snowflake with a given degree.
code from: https://github.com/IlievskiV/Amusive-Blogging-N-Coding/blob/master/Visualizations/snowflake.ipynb
:param int degree: how deep to go in the branching process
:param float s: the length of the initial equilateral triangle
:returns list: list of all lines that form the snowflake
"""
# all lines of the snowflake
lines = []
# we rotate in multiples of 60 degrees
sixty_degrees = np.pi / 3.
# vertices of the initial equilateral triangle
A = (0., 0.)
B = (s, 0.)
C = (s * np.cos(sixty_degrees), s * np.sin(sixty_degrees))
# set the initial lines
if degree == 0:
lines.append(koch_line(A, B, 0))
lines.append(koch_line(B, C, 2))
lines.append(koch_line(C, A, 4))
else:
lines.append(koch_line(A, B, 5))
lines.append(koch_line(B, C, 1))
lines.append(koch_line(C, A, 3))
for i in range(1, degree):
# every lines produce 4 more lines
for _ in range(3 * 4 ** (i - 1)):
line = lines.pop(0)
factor = line['factor']
lines.append(koch_line(line['a'], line['b'], factor % 6)) # a to b
lines.append(koch_line(line['b'], line['c'], (factor - 1) % 6)) # b to c
lines.append(koch_line(line['c'], line['d'], (factor + 1) % 6)) # d to c
lines.append(koch_line(line['d'], line['e'], factor % 6)) # d to e
return lines
def get_koch_points(degree, s=1.0):
lines = koch_snowflake(degree, s=s)
points = []
for line in lines:
for key in line.keys():
if not key == 'factor' and not key == 'e':
points.append(line[key])
points = np.array(points) - np.array([s/2, (s/2)*np.tan(np.pi/6)])
points = np.flipud(points) #reorder the points clockwise
return points
def get2D_dataset(*args, shape_type='circle'):
if shape_type == 'circle':
out_shape = Circle(*args)
elif shape_type == 'L':
out_shape = Polygon(*args, vertices=[[0., 0.], [0.5, 0.], [0.5, -0.5],
[-0.5, -0.5], [-0.5, 0.5], [0, 0.5]])
elif shape_type == 'square':
out_shape = Polygon(*args, vertices=[[-0.5, 0.5], [0.5, 0.5], [0.5, -0.5], [-0.5, -0.5]])
elif shape_type == 'snowflake':
vertices = get_koch_points(degree=2, s=1.0)
out_shape = Polygon(*args, vertices=vertices)
else:
raise Warning("Unsupportaed shape")
return out_shape
if __name__ == "__main__":
np.random.seed(0)
shape_type = 'L'
res = 128 # has to be even
example_idx = 0
sample_type = 'grid'
n_samples = 2
n_points = 24
dataset = get2D_dataset(n_points, n_samples, res, sample_type, 0.005, shape_type=shape_type) # BasicShape2D(100, 20, res=50)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=3, pin_memory=True)
data = next(iter(dataloader))
clean_points_gt = data['points'][example_idx].detach().cpu().numpy()
n_gt = data['mnfld_n'][example_idx].detach().cpu().numpy()
nonmnfld_points = data['nonmnfld_points'][example_idx].detach().cpu().numpy()
grid_normals = dataset.grid_n
vis.plot_sdf_indicator(dataset.vertices, dataset.grid_points[:, 0], dataset.grid_points[:, 1],
dataset.dist_img.flatten(), title_text='', show_ax=False, output_path='./vis/') # plot sdf, indicator function, and points
# vis.plot_shape_data(dataset.grid_points[:, 0], dataset.grid_points[:, 1], dataset.dist_img.flatten(),
# clean_points_gt, n_gt=n_gt, show_ax=True, show_bar=True,
# title_text='', colorscale='Geyser', nonmnfld_points=nonmnfld_points, divergence=None,
# grid_normals=grid_normals) # plot shape, sdf and other data
# vis.plot_paper_teaser_images(dataset.grid_points[:, 0], dataset.grid_points[:, 1] ,dataset.dist_img.flatten(),
# clean_points_gt, grid_normals) # plot images for DiGS paper teaser image
|
# <NAME> (Itzik) <<EMAIL>>
# <NAME> <<EMAIL>>
import torch.utils.data as data
import numpy as np
import scipy.spatial as spatial
import utils.visualizations as vis
from abc import ABC, abstractmethod
from matplotlib.path import Path
import torch
class BasicShape2D(data.Dataset):
# A class to generate synthetic examples of basic shapes.
# Generates clean and noisy point clouds sampled on Jets + samples no a grid with their distance to the surface
def __init__(self, n_points, n_samples=128, res=128, sample_type='grid', sapmling_std=0.005,
grid_range=1.2):
self.grid_range = grid_range
self.n_points = n_points
self.n_samples = n_samples
self.grid_res = res
self.sample_type = sample_type #grid | gaussian | combined
self.sampling_std = sapmling_std
# Generate shape
self.points = self.get_mnfld_points()
# generate grid points and find distance to closest point on the line
x, y = np.linspace(-grid_range, grid_range, self.grid_res), np.linspace(-grid_range, grid_range, self.grid_res)
xx, yy = np.meshgrid(x, y)
xx, yy = xx.ravel(), yy.ravel()
self.grid_points = np.stack([xx, yy], axis=1).astype('f')
self.nonmnfld_points = self.get_nonmnfld_points()
# Compute gt mnfld normals
self.mnfld_n = self.get_mnfld_n()
self.grid_dist, self.grid_n = self.get_points_distances_and_normals(self.grid_points)
self.nonmnfld_dist, self.nonmnfld_n = self.get_points_distances_and_normals(self.nonmnfld_points)
self.dist_img = np.reshape(self.grid_dist, [self.grid_res, self.grid_res])
self.point_idxs = np.arange(self.points.shape[1])
self.grid_points_idxs = np.arange(self.grid_points.shape[0])
self.nonmnfld_points_idxs = np.arange(self.nonmnfld_points.shape[0])
self.sample_probs = np.ones_like(self.grid_points_idxs) / self.grid_points.shape[0]
self.generate_batch_indices()
@abstractmethod
def get_mnfld_points(self):
# implement a function that returns points on the manifold
pass
@abstractmethod
def get_mnfld_n(self):
#implement a function that returns normal vectors for points on the manifold
pass
@abstractmethod
def get_points_distances_and_normals(self, points):
# implement a function that computes the distance and normal vectors of nonmanifold points.
# default implementation finds the nearest neighbor and return its normal and the distance to it.
# which is a coarse approxiamation
distances = []
normals = []
# compute distance and normal (general case)
for i, point_cloud in enumerate(self.points):
kdtree = spatial.cKDTree(point_cloud)
distances, nn_idx = kdtree.query(points, k=1)
signs = np.sign(np.einsum('ij,ij->i', points - point_cloud[nn_idx],
self.mnfld_n[i, nn_idx]))
normals.append(self.mnfld_n[i, nn_idx])
distances.append(signs*distances)
distances = np.stack(distances).astype('f')
normals = np.stack(normals).astype('f')
return distances, normals
def get_grid_divergence(self):
# 2D implementation
n_img = np.reshape(self.grid_n, [self.grid_res, self.grid_res, -1])
frac_45 = 1./np.sqrt(2)
filter = np.array([[[frac_45, -frac_45], [1., 0.], [frac_45, frac_45]],
[[0., -1.], [0., 0.], [0., 1.]],
[[-frac_45, -frac_45], [ -1., 0.], [-frac_45, frac_45]]]) # [y, x]
padding = self.get_padding(n_img, filter, strides=[1, 1])
n_img = torch.nn.functional.pad(torch.tensor(n_img, dtype=torch.float32), padding)
div_img = torch.nn.functional.conv2d(n_img.permute([2, 0, 1]).unsqueeze(0),
torch.tensor(filter, dtype=torch.float32).permute([2, 0, 1]).unsqueeze(0),
).squeeze().numpy()
return div_img.flatten()
def get_offgrid_divergnce(self, off_grid_points, method='nn'):
#TODO implement interpulation method?
if method == 'nn':
# find the nearest grid point and return its divergence
kdtree = spatial.cKDTree(self.grid_points)
_, nn_idx = kdtree.query(off_grid_points, k=1)
else:
raise Warning('unsupported offgrid div computeation method')
return self.grid_div[nn_idx]
def get_padding(self, img, filter, strides=[1, 1]):
# from https://discuss.pytorch.org/t/same-padding-equivalent-in-pytorch/85121/3
in_height, in_width, _ = img.shape
filter_height, filter_width, _ = filter.shape
# The total padding applied along the height and width is computed as:
if (in_height % strides[0] == 0):
pad_along_height = max(filter_height - strides[0], 0)
else:
pad_along_height = max(filter_height - (in_height % strides[0]), 0)
if (in_width % strides[1] == 0):
pad_along_width = max(filter_width - strides[1], 0)
else:
pad_along_width = max(filter_width - (in_width % strides[1]), 0)
# Finally, the padding on the top, bottom, left and right are:
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
return (0, 0, pad_left, pad_right, pad_top, pad_bottom)
def get_nonmnfld_points(self):
if self.sample_type == 'grid':
nonmnfld_points = self.grid_points
elif self.sample_type == 'uniform':
nonmnfld_points = np.random.uniform(-self.grid_range, self.grid_range,
size=(self.grid_res * self.grid_res , 2)).astype(np.float32)
elif self.sample_type == 'gaussian':
nonmnfld_points = self.sample_gaussian_noise_around_shape()
idx = np.random.choice(range(nonmnfld_points.shape[1]), self.grid_res * self.grid_res)
sample_idx = np.random.choice(range(nonmnfld_points.shape[0]), self.grid_res * self.grid_res)
nonmnfld_points = nonmnfld_points[sample_idx, idx]
elif self.sample_type == 'combined':
nonmnfld_points1 = self.sample_gaussian_noise_around_shape()
nonmnfld_points2 = self.grid_points
idx1 = np.random.choice(range(nonmnfld_points1.shape[1]), int(np.ceil(self.grid_res * self.grid_res / 2)))
idx2 = np.random.choice(range(nonmnfld_points2.shape[0]), int(np.floor(self.grid_res * self.grid_res / 2)))
sample_idx = np.random.choice(range(nonmnfld_points1.shape[0]), int(np.ceil(self.grid_res * self.grid_res / 2)))
nonmnfld_points = np.concatenate([nonmnfld_points1[sample_idx, idx1], nonmnfld_points2[idx2]], axis=0)
else:
raise Warning("Unsupported non manfold sampling type {}".format(self.sample_type))
return nonmnfld_points
def sample_gaussian_noise_around_shape(self):
n_noisy_points = int(np.round(self.grid_res * self.grid_res / self.n_points))
noise = np.random.multivariate_normal([0, 0], [[self.sampling_std, 0], [0, self.sampling_std]],
size=(self.n_samples, self.n_points, n_noisy_points)).astype(np.float32)
nonmnfld_points = np.tile(self.points[:, :, None, :], [1, 1, n_noisy_points, 1]) + noise
nonmnfld_points = nonmnfld_points.reshape([nonmnfld_points.shape[0], -1, nonmnfld_points.shape[-1]])
return nonmnfld_points
def generate_batch_indices(self):
mnfld_idx = []
nonmnfld_idx = []
for i in range(self.n_samples):
mnfld_idx.append(np.random.choice(self.point_idxs, self.n_points))
nonmnfld_idx.append(np.random.choice(self.nonmnfld_points_idxs, self.n_points))
self.mnfld_idx = np.array(mnfld_idx)
self.nonmnfld_idx = np.array(nonmnfld_idx)
def __getitem__(self, index):
nonmnfld_idx = self.nonmnfld_idx[index]
mnfld_idx = self.mnfld_idx[index]
if self.nonmnfld_dist is not None:
nonmnfld_dist = self.nonmnfld_dist[nonmnfld_idx]
else:
nonmnfld_dist = torch.tensor(0)
return {'points' : self.points[index, mnfld_idx, :], 'mnfld_n': self.mnfld_n[index, mnfld_idx, :], \
'nonmnfld_dist': nonmnfld_dist, 'nonmnfld_n': self.nonmnfld_n[nonmnfld_idx],
'nonmnfld_points': self.nonmnfld_points[nonmnfld_idx],
}
def __len__(self):
return self.n_samples
class Circle(BasicShape2D):
def __init__(self, *args, r=0.5):
self.r = r
BasicShape2D.__init__(self, *args)
def get_mnfld_points(self):
theta = np.random.uniform(0, 2*np.pi, size=(self.n_samples, self.n_points)).astype('f')
x = self.r * np.sin(theta)
y = self.r * np.cos(theta)
points = np.stack([x, y], axis=2)
return points
def get_mnfld_n(self):
return self.points / np.linalg.norm(self.points, axis=2, keepdims=True)
def get_points_distances_and_normals(self, points):
point_dist = np.linalg.norm(points, axis=1, keepdims=True)
distances = point_dist - self.r
normals = points / point_dist
return distances, normals
class Polygon(BasicShape2D):
def __init__(self, *args, vertices=[], line_sample_type='uniform'):
# vertices: x,y points specifying the polygon
self.vertices = np.array(vertices)
self.lines = self.get_line_props()
self.line_sample_type = line_sample_type
BasicShape2D.__init__(self, *args)
def get_mnfld_points(self):
# sample points on the lines
n_points_to_sample = self.n_points - len(self.vertices)
if n_points_to_sample < 0:
raise Warning("Fewer points to sample than polygon vertices. Please change the number of points")
sample_prob = self.lines['line_length'] / np.sum(self.lines['line_length'])
points_per_segment = np.floor(n_points_to_sample * sample_prob).astype(np.int32)
points_leftover = int(n_points_to_sample - points_per_segment.sum())
if not points_leftover == 0:
for j in range(points_leftover):
actual_prob = points_per_segment / points_per_segment.sum()
prob_diff = sample_prob - actual_prob
add_idx = np.argmax(prob_diff)
points_per_segment[add_idx] = points_per_segment[add_idx] + 1
points = []
self.point_normal = []
for point_idx, point in enumerate(self.vertices):
l1_idx = len(self.vertices) - 1 if point_idx == 0 else point_idx - 1
l2_idx = point_idx
n = self.lines['nl'][l1_idx] + self.lines['nl'][l2_idx]
self.point_normal.append(n / np.linalg.norm(n))
points.append(point)
points = np.repeat(np.array(points)[None, :], self.n_samples, axis=0)
self.point_normal = np.repeat(np.array(self.point_normal)[None, :], self.n_samples, axis=0)
for line_idx in range(len(self.lines['A'])):
if self.line_sample_type == 'uniform':
t = np.linspace(0, 1, points_per_segment[line_idx] + 1, endpoint=False)[1:]
t = np.repeat(t[None, :], self.n_samples, axis=0)
else:
t = np.random.uniform(0, 1, [self.n_samples, points_per_segment[line_idx]])
p1 = np.array(self.vertices[self.lines['start_idx'][line_idx]])
p2 = np.array(self.vertices[self.lines['end_idx'][line_idx]])
points = np.concatenate([points, p1 + t[:, :, None]*(p2 - p1)], axis=1)
self.point_normal = np.concatenate([self.point_normal,
np.tile(self.lines['nl'][line_idx][None, None, :],
[self.n_samples, points_per_segment[line_idx], 1])], axis=1)
return points.astype('f')
def get_mnfld_n(self):
return self.point_normal
def get_points_distances_and_normals(self, points):
# iterate over all the lines and finds the minimum distance between all points and line segments
# good explenation ref : https://stackoverflow.com/questions/10983872/distance-from-a-point-to-a-polygon
n_grid_points = len(points)
p1x = np.vstack(self.vertices[self.lines['start_idx']][:, 0])
p1y = np.vstack(self.vertices[self.lines['start_idx']][:, 1])
p2x = np.vstack(self.vertices[self.lines['end_idx']][:, 0])
p2y = np.vstack(self.vertices[self.lines['end_idx']][:, 1])
p1p2 = np.array(self.lines['direction'])
px = points[:, 0]
py = points[:, 1]
pp1 = np.vstack([px - np.tile(p1x, [1, 1, n_grid_points]), py - np.tile(p1y, [1, 1, n_grid_points])])
pp2 = np.vstack([px - np.tile(p2x, [1, 1, n_grid_points]), py - np.tile(p2y, [1, 1, n_grid_points])])
r = (p1p2[:, 0, None] * pp1[0, :, :] + p1p2[:, 1, None] * pp1[1, :, :]) / np.array(self.lines['line_length'])[:, None]
d1 = np.linalg.norm(pp1, axis=0)
d2 = np.linalg.norm(pp2, axis=0)
dp = np.sqrt(np.square(d1) - np.square(r * np.array(self.lines['line_length'])[:, None]))
d = np.where(r < 0, d1, np.where(r > 1, d2, dp))
distances = np.min(d, axis=0)
idx = np.argmin(d, axis=0)
# compute normal vector
polygon_path = Path(self.vertices)
point_in_polygon = polygon_path.contains_points(points)
point_sign = np.where(point_in_polygon, -1, 1)
n = np.where(r < 0, pp1, np.where(r > 1, pp2, point_sign *
np.tile(np.array(self.lines['nl']).transpose()[:, :, None],
[1, 1, n_grid_points])))
normals = np.take_along_axis(n, idx[None, None, :], axis=1).squeeze().transpose()
normals = point_sign[:, None] * normals / np.linalg.norm(normals, axis=1, keepdims=True)
distances = point_sign * distances
return distances, normals
def get_line_props(self):
lines = {'A': [], 'B': [], 'C': [], 'nl': [], 'line_length': [], 'start_idx': [], 'end_idx': [], 'direction': []}
for start_idx, start_point in enumerate(self.vertices):
end_idx = 0 if start_idx == len(self.vertices)-1 else start_idx + 1
end_point = self.vertices[end_idx]
# Compute standard form coefficients
A = start_point[1] - end_point[1]
B = end_point[0] - start_point[0]
C = - (A * start_point[0] + B * start_point[1])
line_length = np.sqrt(np.square(A) + np.square(B))
direction = [end_point[0] - start_point[0], end_point[1] - start_point[1]] / line_length
nl = [A, B]
nl = nl / np.linalg.norm(nl)
line_props = {'A': A, 'B': B, 'C': C, 'nl': nl, 'line_length': line_length,
'start_idx': start_idx, 'end_idx': end_idx, 'direction': direction}
for key in lines.keys():
lines[key].append(line_props[key])
return lines
def koch_line(start, end, factor):
"""
Segments a line to Koch line, creating fractals.
:param tuple start: (x, y) coordinates of the starting point
:param tuple end: (x, y) coordinates of the end point
:param float factor: the multiple of sixty degrees to rotate
:returns tuple: tuple of all points of segmentation
"""
# coordinates of the start
x1, y1 = start[0], start[1]
# coordinates of the end
x2, y2 = end[0], end[1]
# the length of the line
l = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
# first point: same as the start
a = (x1, y1)
# second point: one third in each direction from the first point
b = (x1 + (x2 - x1) / 3., y1 + (y2 - y1) / 3.)
# third point: rotation for multiple of 60 degrees
c = (b[0] + l / 3. * np.cos(factor * np.pi / 3.), b[1] + l / 3. * np.sin(factor * np.pi / 3.))
# fourth point: two thirds in each direction from the first point
d = (x1 + 2. * (x2 - x1) / 3., y1 + 2. * (y2 - y1) / 3.)
# the last point
e = end
return {'a': a, 'b': b, 'c': c, 'd': d, 'e': e, 'factor': factor}
def koch_snowflake(degree, s=1.0):
"""Generates all lines for a Koch Snowflake with a given degree.
code from: https://github.com/IlievskiV/Amusive-Blogging-N-Coding/blob/master/Visualizations/snowflake.ipynb
:param int degree: how deep to go in the branching process
:param float s: the length of the initial equilateral triangle
:returns list: list of all lines that form the snowflake
"""
# all lines of the snowflake
lines = []
# we rotate in multiples of 60 degrees
sixty_degrees = np.pi / 3.
# vertices of the initial equilateral triangle
A = (0., 0.)
B = (s, 0.)
C = (s * np.cos(sixty_degrees), s * np.sin(sixty_degrees))
# set the initial lines
if degree == 0:
lines.append(koch_line(A, B, 0))
lines.append(koch_line(B, C, 2))
lines.append(koch_line(C, A, 4))
else:
lines.append(koch_line(A, B, 5))
lines.append(koch_line(B, C, 1))
lines.append(koch_line(C, A, 3))
for i in range(1, degree):
# every lines produce 4 more lines
for _ in range(3 * 4 ** (i - 1)):
line = lines.pop(0)
factor = line['factor']
lines.append(koch_line(line['a'], line['b'], factor % 6)) # a to b
lines.append(koch_line(line['b'], line['c'], (factor - 1) % 6)) # b to c
lines.append(koch_line(line['c'], line['d'], (factor + 1) % 6)) # d to c
lines.append(koch_line(line['d'], line['e'], factor % 6)) # d to e
return lines
def get_koch_points(degree, s=1.0):
lines = koch_snowflake(degree, s=s)
points = []
for line in lines:
for key in line.keys():
if not key == 'factor' and not key == 'e':
points.append(line[key])
points = np.array(points) - np.array([s/2, (s/2)*np.tan(np.pi/6)])
points = np.flipud(points) #reorder the points clockwise
return points
def get2D_dataset(*args, shape_type='circle'):
if shape_type == 'circle':
out_shape = Circle(*args)
elif shape_type == 'L':
out_shape = Polygon(*args, vertices=[[0., 0.], [0.5, 0.], [0.5, -0.5],
[-0.5, -0.5], [-0.5, 0.5], [0, 0.5]])
elif shape_type == 'square':
out_shape = Polygon(*args, vertices=[[-0.5, 0.5], [0.5, 0.5], [0.5, -0.5], [-0.5, -0.5]])
elif shape_type == 'snowflake':
vertices = get_koch_points(degree=2, s=1.0)
out_shape = Polygon(*args, vertices=vertices)
else:
raise Warning("Unsupportaed shape")
return out_shape
if __name__ == "__main__":
np.random.seed(0)
shape_type = 'L'
res = 128 # has to be even
example_idx = 0
sample_type = 'grid'
n_samples = 2
n_points = 24
dataset = get2D_dataset(n_points, n_samples, res, sample_type, 0.005, shape_type=shape_type) # BasicShape2D(100, 20, res=50)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=3, pin_memory=True)
data = next(iter(dataloader))
clean_points_gt = data['points'][example_idx].detach().cpu().numpy()
n_gt = data['mnfld_n'][example_idx].detach().cpu().numpy()
nonmnfld_points = data['nonmnfld_points'][example_idx].detach().cpu().numpy()
grid_normals = dataset.grid_n
vis.plot_sdf_indicator(dataset.vertices, dataset.grid_points[:, 0], dataset.grid_points[:, 1],
dataset.dist_img.flatten(), title_text='', show_ax=False, output_path='./vis/') # plot sdf, indicator function, and points
# vis.plot_shape_data(dataset.grid_points[:, 0], dataset.grid_points[:, 1], dataset.dist_img.flatten(),
# clean_points_gt, n_gt=n_gt, show_ax=True, show_bar=True,
# title_text='', colorscale='Geyser', nonmnfld_points=nonmnfld_points, divergence=None,
# grid_normals=grid_normals) # plot shape, sdf and other data
# vis.plot_paper_teaser_images(dataset.grid_points[:, 0], dataset.grid_points[:, 1] ,dataset.dist_img.flatten(),
# clean_points_gt, grid_normals) # plot images for DiGS paper teaser image
|
en
| 0.773691
|
# <NAME> (Itzik) <<EMAIL>> # <NAME> <<EMAIL>> # A class to generate synthetic examples of basic shapes. # Generates clean and noisy point clouds sampled on Jets + samples no a grid with their distance to the surface #grid | gaussian | combined # Generate shape # generate grid points and find distance to closest point on the line # Compute gt mnfld normals # implement a function that returns points on the manifold #implement a function that returns normal vectors for points on the manifold # implement a function that computes the distance and normal vectors of nonmanifold points. # default implementation finds the nearest neighbor and return its normal and the distance to it. # which is a coarse approxiamation # compute distance and normal (general case) # 2D implementation # [y, x] #TODO implement interpulation method? # find the nearest grid point and return its divergence # from https://discuss.pytorch.org/t/same-padding-equivalent-in-pytorch/85121/3 # The total padding applied along the height and width is computed as: # Finally, the padding on the top, bottom, left and right are: # vertices: x,y points specifying the polygon # sample points on the lines # iterate over all the lines and finds the minimum distance between all points and line segments # good explenation ref : https://stackoverflow.com/questions/10983872/distance-from-a-point-to-a-polygon # compute normal vector # Compute standard form coefficients Segments a line to Koch line, creating fractals. :param tuple start: (x, y) coordinates of the starting point :param tuple end: (x, y) coordinates of the end point :param float factor: the multiple of sixty degrees to rotate :returns tuple: tuple of all points of segmentation # coordinates of the start # coordinates of the end # the length of the line # first point: same as the start # second point: one third in each direction from the first point # third point: rotation for multiple of 60 degrees # fourth point: two thirds in each direction from the first point # the last point Generates all lines for a Koch Snowflake with a given degree. code from: https://github.com/IlievskiV/Amusive-Blogging-N-Coding/blob/master/Visualizations/snowflake.ipynb :param int degree: how deep to go in the branching process :param float s: the length of the initial equilateral triangle :returns list: list of all lines that form the snowflake # all lines of the snowflake # we rotate in multiples of 60 degrees # vertices of the initial equilateral triangle # set the initial lines # every lines produce 4 more lines # a to b # b to c # d to c # d to e #reorder the points clockwise # has to be even # BasicShape2D(100, 20, res=50) # plot sdf, indicator function, and points # vis.plot_shape_data(dataset.grid_points[:, 0], dataset.grid_points[:, 1], dataset.dist_img.flatten(), # clean_points_gt, n_gt=n_gt, show_ax=True, show_bar=True, # title_text='', colorscale='Geyser', nonmnfld_points=nonmnfld_points, divergence=None, # grid_normals=grid_normals) # plot shape, sdf and other data # vis.plot_paper_teaser_images(dataset.grid_points[:, 0], dataset.grid_points[:, 1] ,dataset.dist_img.flatten(), # clean_points_gt, grid_normals) # plot images for DiGS paper teaser image
| 2.688491
| 3
|
web/web_tracker/migrations/0014_auto_20180905_0016.py
|
igorxxl8/Calistra
| 0
|
6628927
|
<reponame>igorxxl8/Calistra
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-09-04 21:16
from __future__ import unicode_literals
from django.db import migrations, models
import web_tracker.validators
class Migration(migrations.Migration):
dependencies = [
('web_tracker', '0013_auto_20180905_0011'),
]
operations = [
migrations.AlterField(
model_name='task',
name='reminder',
field=models.TextField(default='', help_text='Use format: frequency:time, where frequency - every_day, every_week, or day of week', validators=[web_tracker.validators.validate_reminder]),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-09-04 21:16
from __future__ import unicode_literals
from django.db import migrations, models
import web_tracker.validators
class Migration(migrations.Migration):
dependencies = [
('web_tracker', '0013_auto_20180905_0011'),
]
operations = [
migrations.AlterField(
model_name='task',
name='reminder',
field=models.TextField(default='', help_text='Use format: frequency:time, where frequency - every_day, every_week, or day of week', validators=[web_tracker.validators.validate_reminder]),
),
]
|
en
| 0.658481
|
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-09-04 21:16
| 1.749299
| 2
|
io_scene_xray/dm/ops.py
|
clayne/blender-xray
| 0
|
6628928
|
<gh_stars>0
# standart modules
import os
# blender modules
import bpy
import bpy_extras
# addon modules
from . import imp
from . import exp
from .. import contexts
from .. import icons
from .. import log
from .. import utils
from .. import ie_props
from .. import version_utils
class ImportDmContext(contexts.ImportMeshContext):
def __init__(self):
super().__init__()
class ExportDmContext(contexts.ExportMeshContext):
def __init__(self):
super().__init__()
def menu_func_import(self, context):
icon = icons.get_stalker_icon()
self.layout.operator(
XRAY_OT_import_dm.bl_idname,
text=utils.build_op_label(XRAY_OT_import_dm),
icon_value=icon
)
def menu_func_export(self, context):
icon = icons.get_stalker_icon()
self.layout.operator(
XRAY_OT_export_dm.bl_idname,
text=utils.build_op_label(XRAY_OT_export_dm),
icon_value=icon
)
filename_ext = '.dm'
op_text = 'Detail Model'
op_import_dm_props = {
'filter_glob': bpy.props.StringProperty(
default='*.dm', options={'HIDDEN'}
),
'directory': bpy.props.StringProperty(
subtype="DIR_PATH", options={'SKIP_SAVE'}
),
'filepath': bpy.props.StringProperty(
subtype="FILE_PATH", options={'SKIP_SAVE'}
),
'files': bpy.props.CollectionProperty(
type=bpy.types.OperatorFileListElement, options={'SKIP_SAVE', 'HIDDEN'}
)
}
class XRAY_OT_import_dm(
ie_props.BaseOperator,
bpy_extras.io_utils.ImportHelper
):
bl_idname = 'xray_import.dm'
bl_label = 'Import .dm'
bl_description = 'Imports X-Ray Detail Models (.dm)'
bl_options = {'REGISTER', 'UNDO'}
draw_fun = menu_func_import
text = op_text
ext = filename_ext
filename_ext = filename_ext
if not version_utils.IS_28:
for prop_name, prop_value in op_import_dm_props.items():
exec('{0} = op_import_dm_props.get("{0}")'.format(prop_name))
@utils.execute_with_logger
@utils.set_cursor_state
def execute(self, context):
textures_folder = version_utils.get_preferences().textures_folder_auto
if not textures_folder:
self.report({'WARNING'}, 'No textures folder specified')
if not self.files:
self.report({'ERROR'}, 'No files selected')
return {'CANCELLED'}
import_context = ImportDmContext()
import_context.textures_folder=textures_folder
import_context.operator=self
for file in self.files:
file_ext = os.path.splitext(file.name)[-1].lower()
if file_ext == filename_ext:
try:
imp.import_file(
os.path.join(self.directory, file.name),
import_context
)
except utils.AppError as err:
import_context.errors.append(err)
else:
self.report(
{'ERROR'},
'Format of {} not recognised'.format(file)
)
for err in import_context.errors:
log.err(err)
return {'FINISHED'}
op_export_dms_props = {
'detail_models': bpy.props.StringProperty(options={'HIDDEN'}),
'directory': bpy.props.StringProperty(subtype="FILE_PATH"),
'texture_name_from_image_path': ie_props.PropObjectTextureNamesFromPath()
}
class XRAY_OT_export_dm(ie_props.BaseOperator):
bl_idname = 'xray_export.dm'
bl_label = 'Export .dm'
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
draw_fun = menu_func_export
text = op_text
ext = filename_ext
filename_ext = filename_ext
if not version_utils.IS_28:
for prop_name, prop_value in op_export_dms_props.items():
exec('{0} = op_export_dms_props.get("{0}")'.format(prop_name))
def draw(self, context):
self.layout.prop(self, 'texture_name_from_image_path')
@utils.execute_with_logger
@utils.set_cursor_state
def execute(self, context):
export_context = ExportDmContext()
export_context.texname_from_path = self.texture_name_from_image_path
export_context.unique_errors = set()
for name in self.detail_models.split(','):
detail_model = context.scene.objects[name]
if not name.lower().endswith(filename_ext):
name += filename_ext
path = self.directory
try:
exp.export_file(
detail_model,
os.path.join(path, name),
export_context
)
except utils.AppError as err:
export_context.errors.append(err)
for err in export_context.errors:
log.err(err)
return {'FINISHED'}
def invoke(self, context, event):
prefs = version_utils.get_preferences()
self.texture_name_from_image_path = prefs.dm_texture_names_from_path
objs = context.selected_objects
if not objs:
self.report({'ERROR'}, 'Cannot find selected object')
return {'CANCELLED'}
if len(objs) == 1:
if objs[0].type != 'MESH':
self.report({'ERROR'}, 'The select object is not mesh')
return {'CANCELLED'}
else:
bpy.ops.xray_export.dm_file('INVOKE_DEFAULT')
else:
self.detail_models = ','.join(
[obj.name for obj in objs if obj.type == 'MESH']
)
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
op_export_dm_props = {
'detail_model': bpy.props.StringProperty(options={'HIDDEN'}),
'filter_glob': bpy.props.StringProperty(
default='*'+filename_ext, options={'HIDDEN'}
),
'texture_name_from_image_path': ie_props.PropObjectTextureNamesFromPath()
}
class XRAY_OT_export_dm_file(
ie_props.BaseOperator,
bpy_extras.io_utils.ExportHelper
):
bl_idname = 'xray_export.dm_file'
bl_label = 'Export .dm'
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
filename_ext = filename_ext
if not version_utils.IS_28:
for prop_name, prop_value in op_export_dm_props.items():
exec('{0} = op_export_dm_props.get("{0}")'.format(prop_name))
@utils.execute_with_logger
@utils.set_cursor_state
def execute(self, context):
try:
self.exp(context.scene.objects[self.detail_model], context)
except utils.AppError as err:
log.err(err)
return {'FINISHED'}
def draw(self, context):
self.layout.prop(self, 'texture_name_from_image_path')
def exp(self, bpy_obj, context):
export_context = ExportDmContext()
export_context.texname_from_path = self.texture_name_from_image_path
export_context.unique_errors = set()
exp.export_file(bpy_obj, self.filepath, export_context)
def invoke(self, context, event):
prefs = version_utils.get_preferences()
self.texture_name_from_image_path = prefs.dm_texture_names_from_path
objs = context.selected_objects
if not objs:
self.report({'ERROR'}, 'Cannot find selected object')
return {'CANCELLED'}
if len(objs) > 1:
self.report({'ERROR'}, 'Too many selected objects found')
return {'CANCELLED'}
if objs[0].type != 'MESH':
self.report({'ERROR'}, 'The selected object is not mesh')
return {'CANCELLED'}
self.detail_model = objs[0].name
self.filepath = self.detail_model
return super().invoke(context, event)
classes = (
(XRAY_OT_import_dm, op_import_dm_props),
(XRAY_OT_export_dm, op_export_dms_props),
(XRAY_OT_export_dm_file, op_export_dm_props)
)
def register():
for operator, props in classes:
version_utils.assign_props([(props, operator), ])
bpy.utils.register_class(operator)
def unregister():
import_menu, export_menu = version_utils.get_import_export_menus()
export_menu.remove(menu_func_export)
import_menu.remove(menu_func_import)
for operator, props in reversed(classes):
bpy.utils.unregister_class(operator)
|
# standart modules
import os
# blender modules
import bpy
import bpy_extras
# addon modules
from . import imp
from . import exp
from .. import contexts
from .. import icons
from .. import log
from .. import utils
from .. import ie_props
from .. import version_utils
class ImportDmContext(contexts.ImportMeshContext):
def __init__(self):
super().__init__()
class ExportDmContext(contexts.ExportMeshContext):
def __init__(self):
super().__init__()
def menu_func_import(self, context):
icon = icons.get_stalker_icon()
self.layout.operator(
XRAY_OT_import_dm.bl_idname,
text=utils.build_op_label(XRAY_OT_import_dm),
icon_value=icon
)
def menu_func_export(self, context):
icon = icons.get_stalker_icon()
self.layout.operator(
XRAY_OT_export_dm.bl_idname,
text=utils.build_op_label(XRAY_OT_export_dm),
icon_value=icon
)
filename_ext = '.dm'
op_text = 'Detail Model'
op_import_dm_props = {
'filter_glob': bpy.props.StringProperty(
default='*.dm', options={'HIDDEN'}
),
'directory': bpy.props.StringProperty(
subtype="DIR_PATH", options={'SKIP_SAVE'}
),
'filepath': bpy.props.StringProperty(
subtype="FILE_PATH", options={'SKIP_SAVE'}
),
'files': bpy.props.CollectionProperty(
type=bpy.types.OperatorFileListElement, options={'SKIP_SAVE', 'HIDDEN'}
)
}
class XRAY_OT_import_dm(
ie_props.BaseOperator,
bpy_extras.io_utils.ImportHelper
):
bl_idname = 'xray_import.dm'
bl_label = 'Import .dm'
bl_description = 'Imports X-Ray Detail Models (.dm)'
bl_options = {'REGISTER', 'UNDO'}
draw_fun = menu_func_import
text = op_text
ext = filename_ext
filename_ext = filename_ext
if not version_utils.IS_28:
for prop_name, prop_value in op_import_dm_props.items():
exec('{0} = op_import_dm_props.get("{0}")'.format(prop_name))
@utils.execute_with_logger
@utils.set_cursor_state
def execute(self, context):
textures_folder = version_utils.get_preferences().textures_folder_auto
if not textures_folder:
self.report({'WARNING'}, 'No textures folder specified')
if not self.files:
self.report({'ERROR'}, 'No files selected')
return {'CANCELLED'}
import_context = ImportDmContext()
import_context.textures_folder=textures_folder
import_context.operator=self
for file in self.files:
file_ext = os.path.splitext(file.name)[-1].lower()
if file_ext == filename_ext:
try:
imp.import_file(
os.path.join(self.directory, file.name),
import_context
)
except utils.AppError as err:
import_context.errors.append(err)
else:
self.report(
{'ERROR'},
'Format of {} not recognised'.format(file)
)
for err in import_context.errors:
log.err(err)
return {'FINISHED'}
op_export_dms_props = {
'detail_models': bpy.props.StringProperty(options={'HIDDEN'}),
'directory': bpy.props.StringProperty(subtype="FILE_PATH"),
'texture_name_from_image_path': ie_props.PropObjectTextureNamesFromPath()
}
class XRAY_OT_export_dm(ie_props.BaseOperator):
bl_idname = 'xray_export.dm'
bl_label = 'Export .dm'
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
draw_fun = menu_func_export
text = op_text
ext = filename_ext
filename_ext = filename_ext
if not version_utils.IS_28:
for prop_name, prop_value in op_export_dms_props.items():
exec('{0} = op_export_dms_props.get("{0}")'.format(prop_name))
def draw(self, context):
self.layout.prop(self, 'texture_name_from_image_path')
@utils.execute_with_logger
@utils.set_cursor_state
def execute(self, context):
export_context = ExportDmContext()
export_context.texname_from_path = self.texture_name_from_image_path
export_context.unique_errors = set()
for name in self.detail_models.split(','):
detail_model = context.scene.objects[name]
if not name.lower().endswith(filename_ext):
name += filename_ext
path = self.directory
try:
exp.export_file(
detail_model,
os.path.join(path, name),
export_context
)
except utils.AppError as err:
export_context.errors.append(err)
for err in export_context.errors:
log.err(err)
return {'FINISHED'}
def invoke(self, context, event):
prefs = version_utils.get_preferences()
self.texture_name_from_image_path = prefs.dm_texture_names_from_path
objs = context.selected_objects
if not objs:
self.report({'ERROR'}, 'Cannot find selected object')
return {'CANCELLED'}
if len(objs) == 1:
if objs[0].type != 'MESH':
self.report({'ERROR'}, 'The select object is not mesh')
return {'CANCELLED'}
else:
bpy.ops.xray_export.dm_file('INVOKE_DEFAULT')
else:
self.detail_models = ','.join(
[obj.name for obj in objs if obj.type == 'MESH']
)
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
op_export_dm_props = {
'detail_model': bpy.props.StringProperty(options={'HIDDEN'}),
'filter_glob': bpy.props.StringProperty(
default='*'+filename_ext, options={'HIDDEN'}
),
'texture_name_from_image_path': ie_props.PropObjectTextureNamesFromPath()
}
class XRAY_OT_export_dm_file(
ie_props.BaseOperator,
bpy_extras.io_utils.ExportHelper
):
bl_idname = 'xray_export.dm_file'
bl_label = 'Export .dm'
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
filename_ext = filename_ext
if not version_utils.IS_28:
for prop_name, prop_value in op_export_dm_props.items():
exec('{0} = op_export_dm_props.get("{0}")'.format(prop_name))
@utils.execute_with_logger
@utils.set_cursor_state
def execute(self, context):
try:
self.exp(context.scene.objects[self.detail_model], context)
except utils.AppError as err:
log.err(err)
return {'FINISHED'}
def draw(self, context):
self.layout.prop(self, 'texture_name_from_image_path')
def exp(self, bpy_obj, context):
export_context = ExportDmContext()
export_context.texname_from_path = self.texture_name_from_image_path
export_context.unique_errors = set()
exp.export_file(bpy_obj, self.filepath, export_context)
def invoke(self, context, event):
prefs = version_utils.get_preferences()
self.texture_name_from_image_path = prefs.dm_texture_names_from_path
objs = context.selected_objects
if not objs:
self.report({'ERROR'}, 'Cannot find selected object')
return {'CANCELLED'}
if len(objs) > 1:
self.report({'ERROR'}, 'Too many selected objects found')
return {'CANCELLED'}
if objs[0].type != 'MESH':
self.report({'ERROR'}, 'The selected object is not mesh')
return {'CANCELLED'}
self.detail_model = objs[0].name
self.filepath = self.detail_model
return super().invoke(context, event)
classes = (
(XRAY_OT_import_dm, op_import_dm_props),
(XRAY_OT_export_dm, op_export_dms_props),
(XRAY_OT_export_dm_file, op_export_dm_props)
)
def register():
for operator, props in classes:
version_utils.assign_props([(props, operator), ])
bpy.utils.register_class(operator)
def unregister():
import_menu, export_menu = version_utils.get_import_export_menus()
export_menu.remove(menu_func_export)
import_menu.remove(menu_func_import)
for operator, props in reversed(classes):
bpy.utils.unregister_class(operator)
|
en
| 0.063826
|
# standart modules # blender modules # addon modules
| 1.956119
| 2
|
examples/fci/10-spin.py
|
gmwang18/pyscf
| 0
|
6628929
|
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
Assign spin state for FCI wavefunction.
By default, the FCI solver will take Mole attribute spin for the spin state.
It can be overwritten by passing kwarg ``nelec`` to the kernel function of FCI
solver. The nelec argument is a two-element tuple. The first is the number
of alpha electrons; the second is the number of beta electrons.
If spin-contamination is observed on FCI wavefunction, we can use the
decoration function :func:`fci.addons.fix_spin_` to level shift the energy of
states which do not have the target spin.
'''
import numpy
from pyscf import gto, scf, fci
mol = gto.M(atom='Ne 0 0 0', basis='631g', spin=2)
m = scf.RHF(mol)
m.kernel()
norb = m.mo_energy.size
fs = fci.FCI(mol, m.mo_coeff)
e, c = fs.kernel()
print('E = %.12f 2S+1 = %.7f' %
(e, fci.spin_op.spin_square0(c, norb, (6,4))[1]))
e, c = fs.kernel(nelec=(5,5))
print('E = %.12f 2S+1 = %.7f' %
(e, fci.spin_op.spin_square0(c, norb, (5,5))[1]))
fs = fci.addons.fix_spin_(fci.FCI(mol, m.mo_coeff), shift=.5)
e, c = fs.kernel()
print('E = %.12f 2S+1 = %.7f' %
(e, fci.spin_op.spin_square0(c, norb, (6,4))[1]))
#
# Example 2: Oxygen molecule singlet state
#
nelec = (8,8)
mol = gto.M(atom='O 0 0 0; O 0 0 1.2', spin=2, basis='sto3g',
symmetry=1, verbose=0)
mf = scf.RHF(mol).run()
mci = fci.FCI(mol, mf.mo_coeff)
mci.wfnsym = 'A1g'
mci = fci.addons.fix_spin_(mci, ss=0)
e, civec = mci.kernel(nelec=nelec)
print('A1g singlet E = %.12f 2S+1 = %.7f' %
(e, mci.spin_square(civec, mf.mo_coeff.shape[1], nelec)[1]))
mci.wfnsym = 'A2g'
mci = fci.addons.fix_spin_(mci, ss=0)
e, civec = mci.kernel(nelec=nelec)
print('A2g singlet E = %.12f 2S+1 = %.7f' %
(e, mci.spin_square(civec, mf.mo_coeff.shape[1], nelec)[1]))
mol = gto.M(atom='O 0 0 0; O 0 0 1.2', spin=2, basis='sto3g',
verbose=0)
mf = scf.RHF(mol).run()
mci = fci.FCI(mol, mf.mo_coeff)
mci = fci.addons.fix_spin_(mci, ss=0)
e, civec = mci.kernel(nelec=nelec)
print('Singlet E = %.12f 2S+1 = %.7f' %
(e, mci.spin_square(civec, mf.mo_coeff.shape[1], nelec)[1]))
|
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
Assign spin state for FCI wavefunction.
By default, the FCI solver will take Mole attribute spin for the spin state.
It can be overwritten by passing kwarg ``nelec`` to the kernel function of FCI
solver. The nelec argument is a two-element tuple. The first is the number
of alpha electrons; the second is the number of beta electrons.
If spin-contamination is observed on FCI wavefunction, we can use the
decoration function :func:`fci.addons.fix_spin_` to level shift the energy of
states which do not have the target spin.
'''
import numpy
from pyscf import gto, scf, fci
mol = gto.M(atom='Ne 0 0 0', basis='631g', spin=2)
m = scf.RHF(mol)
m.kernel()
norb = m.mo_energy.size
fs = fci.FCI(mol, m.mo_coeff)
e, c = fs.kernel()
print('E = %.12f 2S+1 = %.7f' %
(e, fci.spin_op.spin_square0(c, norb, (6,4))[1]))
e, c = fs.kernel(nelec=(5,5))
print('E = %.12f 2S+1 = %.7f' %
(e, fci.spin_op.spin_square0(c, norb, (5,5))[1]))
fs = fci.addons.fix_spin_(fci.FCI(mol, m.mo_coeff), shift=.5)
e, c = fs.kernel()
print('E = %.12f 2S+1 = %.7f' %
(e, fci.spin_op.spin_square0(c, norb, (6,4))[1]))
#
# Example 2: Oxygen molecule singlet state
#
nelec = (8,8)
mol = gto.M(atom='O 0 0 0; O 0 0 1.2', spin=2, basis='sto3g',
symmetry=1, verbose=0)
mf = scf.RHF(mol).run()
mci = fci.FCI(mol, mf.mo_coeff)
mci.wfnsym = 'A1g'
mci = fci.addons.fix_spin_(mci, ss=0)
e, civec = mci.kernel(nelec=nelec)
print('A1g singlet E = %.12f 2S+1 = %.7f' %
(e, mci.spin_square(civec, mf.mo_coeff.shape[1], nelec)[1]))
mci.wfnsym = 'A2g'
mci = fci.addons.fix_spin_(mci, ss=0)
e, civec = mci.kernel(nelec=nelec)
print('A2g singlet E = %.12f 2S+1 = %.7f' %
(e, mci.spin_square(civec, mf.mo_coeff.shape[1], nelec)[1]))
mol = gto.M(atom='O 0 0 0; O 0 0 1.2', spin=2, basis='sto3g',
verbose=0)
mf = scf.RHF(mol).run()
mci = fci.FCI(mol, mf.mo_coeff)
mci = fci.addons.fix_spin_(mci, ss=0)
e, civec = mci.kernel(nelec=nelec)
print('Singlet E = %.12f 2S+1 = %.7f' %
(e, mci.spin_square(civec, mf.mo_coeff.shape[1], nelec)[1]))
|
en
| 0.731974
|
#!/usr/bin/env python # # Author: <NAME> <<EMAIL>> # Assign spin state for FCI wavefunction. By default, the FCI solver will take Mole attribute spin for the spin state. It can be overwritten by passing kwarg ``nelec`` to the kernel function of FCI solver. The nelec argument is a two-element tuple. The first is the number of alpha electrons; the second is the number of beta electrons. If spin-contamination is observed on FCI wavefunction, we can use the decoration function :func:`fci.addons.fix_spin_` to level shift the energy of states which do not have the target spin. # # Example 2: Oxygen molecule singlet state #
| 2.681807
| 3
|
power/coding-challenges/leetcode/217-contains-duplicate.py
|
TuxedoMeow/Hello-World
| 0
|
6628930
|
<filename>power/coding-challenges/leetcode/217-contains-duplicate.py
"""
Notes:
Check if the list contains a duplicate or not.
"""
class Solution:
def contains_duplicate1(self, nums: [int]) -> bool:
nums.sort()
for index in range(len(nums)-1):
if nums[index] == nums[index+1]:
return True
return False
def contains_duplicate2(self, nums: [int]) -> bool:
# Time complexity: O(n^2)
# Space complexity: O(1)
for i_index in range(len(nums)):
for j_index in range(i_index + 1, len(nums)):
if nums[j_index] == nums[i_index]:
return True
return False
test = Solution()
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1]
print("contains_duplicate():", test.contains_duplicate1(nums))
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print("contains_duplicate():", test.contains_duplicate1(nums))
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1]
print("contains_duplicate():", test.contains_duplicate2(nums))
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print("contains_duplicate():", test.contains_duplicate2(nums))
"""
contains_duplicate(): True
contains_duplicate(): False
contains_duplicate(): True
contains_duplicate(): False
"""
"""
Time complexity: O(n log n). Due to sorting time complexity will always add O(n log n).
Space complexity: O(1). This is assuming the built-in sort uses heapsort for example.
"""
|
<filename>power/coding-challenges/leetcode/217-contains-duplicate.py
"""
Notes:
Check if the list contains a duplicate or not.
"""
class Solution:
def contains_duplicate1(self, nums: [int]) -> bool:
nums.sort()
for index in range(len(nums)-1):
if nums[index] == nums[index+1]:
return True
return False
def contains_duplicate2(self, nums: [int]) -> bool:
# Time complexity: O(n^2)
# Space complexity: O(1)
for i_index in range(len(nums)):
for j_index in range(i_index + 1, len(nums)):
if nums[j_index] == nums[i_index]:
return True
return False
test = Solution()
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1]
print("contains_duplicate():", test.contains_duplicate1(nums))
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print("contains_duplicate():", test.contains_duplicate1(nums))
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1]
print("contains_duplicate():", test.contains_duplicate2(nums))
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print("contains_duplicate():", test.contains_duplicate2(nums))
"""
contains_duplicate(): True
contains_duplicate(): False
contains_duplicate(): True
contains_duplicate(): False
"""
"""
Time complexity: O(n log n). Due to sorting time complexity will always add O(n log n).
Space complexity: O(1). This is assuming the built-in sort uses heapsort for example.
"""
|
en
| 0.644143
|
Notes: Check if the list contains a duplicate or not. # Time complexity: O(n^2) # Space complexity: O(1) contains_duplicate(): True contains_duplicate(): False contains_duplicate(): True contains_duplicate(): False Time complexity: O(n log n). Due to sorting time complexity will always add O(n log n). Space complexity: O(1). This is assuming the built-in sort uses heapsort for example.
| 4.011123
| 4
|
pola/logic.py
|
KlubJagiellonski/pola-backend
| 30
|
6628931
|
<gh_stars>10-100
import locale
import re
from django.conf import settings
from company.models import Brand, Company
from pola import produkty_w_sieci_api
from pola.produkty_w_sieci_api import Client, is_code_supported_by_gs1_api
from product.models import Product
from report.models import Report
def get_result_from_code(code, multiple_company_supported=False, report_as_object=False):
result = DEFAULT_RESULT.copy()
stats = DEFAULT_STATS.copy()
report = DEFAULT_REPORT_DATA.copy()
product = None
result['code'] = code
if not multiple_company_supported:
result.update(DEFAULT_COMPANY_DATA)
if code.isdigit() and (len(code) == 8 or len(code) == 13):
# code is EAN8 or EAN13
product = get_by_code(code)
product_company = product.company
brand_company = product.brand.company if product.brand else None
companies = []
if product_company:
companies.append(product_company)
result['name'] = product_company.common_name or product_company.official_name or product_company.name
if brand_company:
companies.append(brand_company)
companies = list(({c.pk: c for c in companies}).values())
result['product_id'] = product.id
stats['was_590'] = code.startswith('590')
if not product_company:
handle_unknown_company(code, report, result)
elif multiple_company_supported:
handle_multiple_companies(companies, result, stats)
else:
handle_companies_when_multiple_companies_are_not_supported(
companies, multiple_company_supported, result, stats
)
else:
# not an EAN8 nor EAN13 code. Probably QR code or some error
result['name'] = 'Nieprawidłowy kod'
result['altText'] = (
'Pola rozpoznaje tylko kody kreskowe typu EAN8 i '
'EAN13. Zeskanowany przez Ciebie kod jest innego '
'typu. Spróbuj zeskanować kod z czegoś innego'
)
if report_as_object:
result['report'] = report
else:
result.update({("report_" + k, v) for k, v in report.items()})
return result, stats, product
def handle_companies_when_multiple_companies_are_not_supported(companies, multiple_company_supported, result, stats):
company = companies[0]
company_data = serialize_company(company)
stats['was_plScore'] = bool(get_plScore(company))
result.update(company_data)
stats['was_verified'] = company.verified
result['card_type'] = TYPE_WHITE if company.verified else TYPE_GREY
def handle_multiple_companies(companies, result, stats):
companies_data = []
for company in companies:
company_data = serialize_company(company)
stats['was_plScore'] = all(get_plScore(c) for c in companies)
companies_data.append(company_data)
if len(companies) > 1:
result['name'] = "<NAME> - <NAME>"
result['companies'] = companies_data
def handle_unknown_company(code, report, result):
# we don't know the manufacturer
if code.startswith('590'):
# the code is registered in Poland, we want more data!
result['name'] = "Tego produktu nie mamy jeszcze w bazie"
result['altText'] = (
"Każde skanowanie jest rejestrowane. Najczęściej skanowane firmy i produkty, "
"których nie mamy jeszcze w bazie, są weryfikowane w pierwszej kolejności. "
"Nie pobieramy przy tym żadnych informacji o użytkowniku.\n"
"\n"
"Jeśli chcesz zgłosić błąd lub wyrazić opinię, prosimy o kontakt: <EMAIL>"
)
result['card_type'] = TYPE_GREY
report['text'] = "Bardzo prosimy o zgłoszenie nam tego produktu"
report['button_type'] = TYPE_RED
elif code.startswith('977') or code.startswith('978') or code.startswith('979'):
# this is an ISBN/ISSN/ISMN number
# (book, music album or magazine)
result['name'] = 'Kod ISBN/ISSN/ISMN'
result['altText'] = (
'Zeskanowany kod jest kodem '
'ISBN/ISSN/ISMN dotyczącym książki, '
'czasopisma lub albumu muzycznego. '
'Wydawnictwa tego typu nie są aktualnie '
'w obszarze zainteresowań Poli.'
)
report['text'] = "To nie jest książka, czasopismo lub album muzyczny? Prosimy o zgłoszenie"
else:
# let's try to associate the code with a country
for prefix in CODE_PREFIX_TO_COUNTRY.keys():
if code.startswith(prefix):
result['plScore'] = 0
result['card_type'] = TYPE_GREY
result['name'] = f'Miejsce rejestracji: {CODE_PREFIX_TO_COUNTRY[prefix]}'
result['altText'] = (
f'Ten produkt został wyprodukowany przez zagraniczną firmę, '
f'której miejscem rejestracji jest: {CODE_PREFIX_TO_COUNTRY[prefix]}.'
)
break
else:
# Ups. It seems to be an internal code
result['name'] = 'Kod wewnętrzny'
result['altText'] = (
'Zeskanowany kod jest wewnętrznym '
'kodem sieci handlowej. Pola nie '
'potrafi powiedzieć o nim nic więcej'
)
def serialize_company(company):
plScore = get_plScore(company)
company_data = DEFAULT_COMPANY_DATA.copy()
# we know the manufacturer of the product
company_data['name'] = company.common_name or company.official_name or company.name
company_data['plCapital'] = company.plCapital
company_data['plCapital_notes'] = company.plCapital_notes
company_data['plWorkers'] = company.plWorkers
company_data['plWorkers_notes'] = company.plWorkers_notes
company_data['plRnD'] = company.plRnD
company_data['plRnD_notes'] = company.plRnD_notes
company_data['plRegistered'] = company.plRegistered
company_data['plRegistered_notes'] = company.plRegistered_notes
company_data['plNotGlobEnt'] = company.plNotGlobEnt
company_data['plNotGlobEnt_notes'] = company.plNotGlobEnt_notes
company_data['is_friend'] = company.is_friend
if company.is_friend:
company_data['friend_text'] = 'To jest <NAME>'
if company.description:
company_data['description'] = company.description
else:
desc = ''
if company.plCapital_notes:
desc += strip_urls_newlines(company.plCapital_notes) + '\n'
if company.plWorkers_notes:
desc += strip_urls_newlines(company.plWorkers_notes) + '\n'
if company.plRnD_notes:
desc += strip_urls_newlines(company.plRnD_notes) + '\n'
if company.plRegistered_notes:
desc += strip_urls_newlines(company.plRegistered_notes) + '\n'
if company.plNotGlobEnt_notes:
desc += strip_urls_newlines(company.plNotGlobEnt_notes) + '\n'
company_data['description'] = desc
brands = company.brand_set.all()
if brands:
company_data['description'] += "\n"
brands_list = ', '.join(sorted(str(d) for d in brands))
if len(brands) == 1:
company_data['description'] += f"<NAME>aca do firmy: {brands_list}."
else:
company_data['description'] += f"Marki naależace do firmy: {brands_list}."
company_data['sources'] = company.get_sources(raise_exp=False)
if plScore:
company_data['plScore'] = plScore
return company_data
ENABLE_GS1_API = False
def get_by_code(code):
try:
return Product.objects.get(code=code)
except Product.DoesNotExist:
pass
try:
if is_code_supported_by_gs1_api(code) and ENABLE_GS1_API:
client = Client(settings.PRODUKTY_W_SIECI_API_USERNAME, settings.PRODUKTY_W_SIECI_API_PASSWORD)
product_info = client.get_product_by_gtin(code)
return create_from_api(code, product_info)
except produkty_w_sieci_api.ApiError:
pass
return Product.objects.create(code=code)
def create_from_api(code, obj, product=None):
obj_owner_name = None
obj_product_name = None
if obj:
obj_owner_name = obj.get('BrandOwner', None)
obj_product_name = obj.get('ProductName', None)
obj_brand = obj.get('Brand', None)
company_created = False
if obj_owner_name:
company, company_created = Company.objects.get_or_create(
name=obj_owner_name, commit_desc='Firma utworzona automatycznie na podstawie API' ' ILiM'
)
else:
company = None
commit_desc = ""
if not product:
product = Product.objects.create(
name=obj_product_name,
code=code,
company=company,
commit_desc="Produkt utworzony automatycznie na podstawie skanu " "użytkownika",
)
else:
if product.name:
if obj_product_name and product.name != obj_product_name:
create_bot_report(
product,
f"Wg. najnowszego odpytania w bazie ILiM nazwa tego produktu to:\"{obj_product_name}\"",
check_if_already_exists=not company_created,
)
else:
if obj_product_name != code:
commit_desc += 'Nazwa produktu zmieniona na podstawie bazy GS1. '
product.name = obj_product_name
if product.company:
if (
company
and product.company.name
and obj_owner_name
and not ilim_compare_str(product.company.name, obj_owner_name)
):
create_bot_report(
product,
f"Wg. najnowszego odpytania w bazie ILiM producent tego produktu to:\"{obj_owner_name}\"",
check_if_already_exists=not company_created,
)
else:
commit_desc += 'Producent produktu zmieniony na podstawie bazy GS1. '
product.company = company
if product.company and obj and obj_brand:
if product.brand:
if product.brand.name != obj_brand:
create_bot_report(
product,
f"Wg. najnowszego odpytania w bazie ILiM marka tego produktu to:\"{obj_brand}\"",
check_if_already_exists=True,
)
else:
brand, _ = Brand.objects.get_or_create(
name=obj_brand,
company=product.company,
commit_desc='Marka utworzona automatycznie na podstawie API' ' ILiM',
)
product.brand = brand
commit_desc += 'Marka produktu zmieniona na podstawie bazy GS1. '
product.save(commit_desc=commit_desc)
return product
def create_bot_report(product, description, check_if_already_exists=False):
if (
check_if_already_exists
and Report.objects.filter(product=product, client='krs-bot', description=description).exists()
):
return
report = Report(description=description)
report.product = product
report.client = 'krs-bot'
report.save()
def get_plScore(company):
if (
company.plCapital is not None
and company.plWorkers is not None
and company.plRnD is not None
and company.plRegistered is not None
and company.plNotGlobEnt is not None
):
return int(
0.35 * company.plCapital
+ 0.30 * company.plWorkers
+ 0.15 * company.plRnD
+ 0.10 * company.plRegistered
+ 0.10 * company.plNotGlobEnt
)
else:
return None
def shareholders_to_str(krs, id, indent):
str = ''
json = krs.query_shareholders(id)
data = json['data']
kapital_zakladowy = data['krs_podmioty.wartosc_kapital_zakladowy']
wspolnicy = json['layers']['wspolnicy']
for wspolnik in wspolnicy:
udzialy_wartosc = wspolnik.get('udzialy_wartosc', None)
if udzialy_wartosc is None:
str += f"{indent}* {wspolnik['nazwa']} -------\n"
else:
str += '{}* {} {}/{} {:.0f}%\n'.format(
indent,
wspolnik['nazwa'],
udzialy_wartosc,
kapital_zakladowy,
100 * locale.atof(udzialy_wartosc) / kapital_zakladowy,
)
if wspolnik['krs_id'] is not None:
str += shareholders_to_str(krs, wspolnik['krs_id'], indent + ' ')
return str
def rem_dbl_newlines(str):
return str.replace('\r\n\r\n', '\r\n').replace('\n\n', '\n')
def strip_dbl_spaces(str):
return re.sub(' +', ' ', str).strip()
def ilim_compare_str(s1, s2):
s1 = strip_dbl_spaces(s1)
s2 = strip_dbl_spaces(s2)
return s1.upper() == s2.upper()
def strip_urls_newlines(str):
s = re.sub(
r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|'
r'(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’'
r']))',
'',
str,
)
s = rem_dbl_newlines(s)
s = s.strip(' \t\n\r')
return s
TYPE_RED = 'type_red'
TYPE_WHITE = 'type_white'
TYPE_GREY = 'type_grey'
DEFAULT_COMPANY_DATA = {
'name': None,
'plCapital': None,
'plCapital_notes': None,
'plWorkers': None,
'plWorkers_notes': None,
'plRnD': None,
'plRnD_notes': None,
'plRegistered': None,
'plRegistered_notes': None,
'plNotGlobEnt': None,
'plNotGlobEnt_notes': None,
'plScore': None,
}
DEFAULT_REPORT_DATA = {
'text': 'Zgłoś jeśli posiadasz bardziej aktualne dane na temat tego produktu',
'button_text': 'Zgłoś',
'button_type': TYPE_WHITE,
}
DEFAULT_RESULT = {
'product_id': None,
'code': None,
'name': None,
'card_type': TYPE_WHITE,
'altText': None,
}
DEFAULT_STATS = {'was_verified': False, 'was_590': False, 'was_plScore': False}
CODE_PREFIX_TO_COUNTRY = {
"30": "Francja",
"31": "Francja",
"32": "Francja",
"33": "Francja",
"34": "Francja",
"35": "Francja",
"36": "Francja",
"37": "Francja",
"380": "Bułgaria",
"383": "Słowenia",
"385": "Chorwacja",
"387": "Bośnia-Hercegowina",
"40": "Niemcy",
"41": "Niemcy",
"42": "Niemcy",
"43": "Niemcy",
"44": "Niemcy",
"45": "Japonia",
"46": "Federacja Rosyjska",
"470": "Kirgistan",
"471": "Taiwan",
"474": "Estonia",
"475": "Łotwa",
"476": "Azerbejdżan",
"477": "Litwa",
"478": "Uzbekistan",
"479": "Sri Lanka",
"480": "Filipiny",
"481": "Białoruś",
"482": "Ukraina",
"484": "Mołdova",
"485": "Armenia",
"486": "Gruzja",
"487": "Kazachstan",
"489": "Hong Kong",
"49": "Japonia",
"50": "Wielka Brytania",
"520": "Grecja",
"528": "Liban",
"529": "Cypr",
"531": "Macedonia",
"535": "Malta",
"539": "Irlandia",
"54": "Belgia & Luksemburg",
"560": "Portugalia",
"569": "Islandia",
"57": "Dania",
# "590": "Polska",
"594": "Rumunia",
"599": "Węgry",
"600": "Południowa Afryka",
"601": "Południowa Afryka",
"608": "Bahrain",
"609": "Mauritius",
"611": "Maroko",
"613": "Algeria",
"619": "Tunezja",
"621": "Syria",
"622": "Egipt",
"624": "Libia",
"625": "Jordania",
"626": "Iran",
"627": "Kuwejt",
"628": "Arabia Saudyjska",
"64": "Finlandia",
"690": "Chiny",
"691": "Chiny",
"692": "Chiny",
"70": "Norwegia",
"729": "Izrael",
"73": "Szwecja",
"740": "Gwatemala",
"741": "Salwador",
"742": "Honduras",
"743": "Nikaragua",
"744": "Kostaryka",
"745": "Panama",
"746": "Dominikana",
"750": "Meksyk",
"759": "Wenezuela",
"76": "Szwajcaria",
"770": "Kolumbia",
"773": "Urugwaj",
"775": "Peru",
"777": "Boliwia",
"779": "Argentyna",
"780": "Chile",
"784": "Paragwaj",
"786": "Ekwador",
"789": "Brazylia",
"790": "Brazylia",
"80": "Włochy",
"81": "Włochy",
"82": "Włochy",
"83": "Włochy",
"84": "Hiszpania",
"850": "Kuba",
"858": "Słowacja",
"859": "Czechy",
"860": "Jugosławia",
"867": "Korea Północna",
"869": "Turcja",
"87": "Holandia",
"880": "Korea Południowa",
"885": "Tajlandia",
"888": "Singapur",
"890": "Indie",
"893": "Wietnam",
"899": "Indonezja",
"90": "Austria",
"91": "Austria",
"93": "Australia",
"94": "Nowa Zelandia",
"950": "EAN - IDA",
"955": "Malezja",
"958": "Makao",
}
|
import locale
import re
from django.conf import settings
from company.models import Brand, Company
from pola import produkty_w_sieci_api
from pola.produkty_w_sieci_api import Client, is_code_supported_by_gs1_api
from product.models import Product
from report.models import Report
def get_result_from_code(code, multiple_company_supported=False, report_as_object=False):
result = DEFAULT_RESULT.copy()
stats = DEFAULT_STATS.copy()
report = DEFAULT_REPORT_DATA.copy()
product = None
result['code'] = code
if not multiple_company_supported:
result.update(DEFAULT_COMPANY_DATA)
if code.isdigit() and (len(code) == 8 or len(code) == 13):
# code is EAN8 or EAN13
product = get_by_code(code)
product_company = product.company
brand_company = product.brand.company if product.brand else None
companies = []
if product_company:
companies.append(product_company)
result['name'] = product_company.common_name or product_company.official_name or product_company.name
if brand_company:
companies.append(brand_company)
companies = list(({c.pk: c for c in companies}).values())
result['product_id'] = product.id
stats['was_590'] = code.startswith('590')
if not product_company:
handle_unknown_company(code, report, result)
elif multiple_company_supported:
handle_multiple_companies(companies, result, stats)
else:
handle_companies_when_multiple_companies_are_not_supported(
companies, multiple_company_supported, result, stats
)
else:
# not an EAN8 nor EAN13 code. Probably QR code or some error
result['name'] = 'Nieprawidłowy kod'
result['altText'] = (
'Pola rozpoznaje tylko kody kreskowe typu EAN8 i '
'EAN13. Zeskanowany przez Ciebie kod jest innego '
'typu. Spróbuj zeskanować kod z czegoś innego'
)
if report_as_object:
result['report'] = report
else:
result.update({("report_" + k, v) for k, v in report.items()})
return result, stats, product
def handle_companies_when_multiple_companies_are_not_supported(companies, multiple_company_supported, result, stats):
company = companies[0]
company_data = serialize_company(company)
stats['was_plScore'] = bool(get_plScore(company))
result.update(company_data)
stats['was_verified'] = company.verified
result['card_type'] = TYPE_WHITE if company.verified else TYPE_GREY
def handle_multiple_companies(companies, result, stats):
companies_data = []
for company in companies:
company_data = serialize_company(company)
stats['was_plScore'] = all(get_plScore(c) for c in companies)
companies_data.append(company_data)
if len(companies) > 1:
result['name'] = "<NAME> - <NAME>"
result['companies'] = companies_data
def handle_unknown_company(code, report, result):
# we don't know the manufacturer
if code.startswith('590'):
# the code is registered in Poland, we want more data!
result['name'] = "Tego produktu nie mamy jeszcze w bazie"
result['altText'] = (
"Każde skanowanie jest rejestrowane. Najczęściej skanowane firmy i produkty, "
"których nie mamy jeszcze w bazie, są weryfikowane w pierwszej kolejności. "
"Nie pobieramy przy tym żadnych informacji o użytkowniku.\n"
"\n"
"Jeśli chcesz zgłosić błąd lub wyrazić opinię, prosimy o kontakt: <EMAIL>"
)
result['card_type'] = TYPE_GREY
report['text'] = "Bardzo prosimy o zgłoszenie nam tego produktu"
report['button_type'] = TYPE_RED
elif code.startswith('977') or code.startswith('978') or code.startswith('979'):
# this is an ISBN/ISSN/ISMN number
# (book, music album or magazine)
result['name'] = 'Kod ISBN/ISSN/ISMN'
result['altText'] = (
'Zeskanowany kod jest kodem '
'ISBN/ISSN/ISMN dotyczącym książki, '
'czasopisma lub albumu muzycznego. '
'Wydawnictwa tego typu nie są aktualnie '
'w obszarze zainteresowań Poli.'
)
report['text'] = "To nie jest książka, czasopismo lub album muzyczny? Prosimy o zgłoszenie"
else:
# let's try to associate the code with a country
for prefix in CODE_PREFIX_TO_COUNTRY.keys():
if code.startswith(prefix):
result['plScore'] = 0
result['card_type'] = TYPE_GREY
result['name'] = f'Miejsce rejestracji: {CODE_PREFIX_TO_COUNTRY[prefix]}'
result['altText'] = (
f'Ten produkt został wyprodukowany przez zagraniczną firmę, '
f'której miejscem rejestracji jest: {CODE_PREFIX_TO_COUNTRY[prefix]}.'
)
break
else:
# Ups. It seems to be an internal code
result['name'] = 'Kod wewnętrzny'
result['altText'] = (
'Zeskanowany kod jest wewnętrznym '
'kodem sieci handlowej. Pola nie '
'potrafi powiedzieć o nim nic więcej'
)
def serialize_company(company):
plScore = get_plScore(company)
company_data = DEFAULT_COMPANY_DATA.copy()
# we know the manufacturer of the product
company_data['name'] = company.common_name or company.official_name or company.name
company_data['plCapital'] = company.plCapital
company_data['plCapital_notes'] = company.plCapital_notes
company_data['plWorkers'] = company.plWorkers
company_data['plWorkers_notes'] = company.plWorkers_notes
company_data['plRnD'] = company.plRnD
company_data['plRnD_notes'] = company.plRnD_notes
company_data['plRegistered'] = company.plRegistered
company_data['plRegistered_notes'] = company.plRegistered_notes
company_data['plNotGlobEnt'] = company.plNotGlobEnt
company_data['plNotGlobEnt_notes'] = company.plNotGlobEnt_notes
company_data['is_friend'] = company.is_friend
if company.is_friend:
company_data['friend_text'] = 'To jest <NAME>'
if company.description:
company_data['description'] = company.description
else:
desc = ''
if company.plCapital_notes:
desc += strip_urls_newlines(company.plCapital_notes) + '\n'
if company.plWorkers_notes:
desc += strip_urls_newlines(company.plWorkers_notes) + '\n'
if company.plRnD_notes:
desc += strip_urls_newlines(company.plRnD_notes) + '\n'
if company.plRegistered_notes:
desc += strip_urls_newlines(company.plRegistered_notes) + '\n'
if company.plNotGlobEnt_notes:
desc += strip_urls_newlines(company.plNotGlobEnt_notes) + '\n'
company_data['description'] = desc
brands = company.brand_set.all()
if brands:
company_data['description'] += "\n"
brands_list = ', '.join(sorted(str(d) for d in brands))
if len(brands) == 1:
company_data['description'] += f"<NAME>aca do firmy: {brands_list}."
else:
company_data['description'] += f"Marki naależace do firmy: {brands_list}."
company_data['sources'] = company.get_sources(raise_exp=False)
if plScore:
company_data['plScore'] = plScore
return company_data
ENABLE_GS1_API = False
def get_by_code(code):
try:
return Product.objects.get(code=code)
except Product.DoesNotExist:
pass
try:
if is_code_supported_by_gs1_api(code) and ENABLE_GS1_API:
client = Client(settings.PRODUKTY_W_SIECI_API_USERNAME, settings.PRODUKTY_W_SIECI_API_PASSWORD)
product_info = client.get_product_by_gtin(code)
return create_from_api(code, product_info)
except produkty_w_sieci_api.ApiError:
pass
return Product.objects.create(code=code)
def create_from_api(code, obj, product=None):
obj_owner_name = None
obj_product_name = None
if obj:
obj_owner_name = obj.get('BrandOwner', None)
obj_product_name = obj.get('ProductName', None)
obj_brand = obj.get('Brand', None)
company_created = False
if obj_owner_name:
company, company_created = Company.objects.get_or_create(
name=obj_owner_name, commit_desc='Firma utworzona automatycznie na podstawie API' ' ILiM'
)
else:
company = None
commit_desc = ""
if not product:
product = Product.objects.create(
name=obj_product_name,
code=code,
company=company,
commit_desc="Produkt utworzony automatycznie na podstawie skanu " "użytkownika",
)
else:
if product.name:
if obj_product_name and product.name != obj_product_name:
create_bot_report(
product,
f"Wg. najnowszego odpytania w bazie ILiM nazwa tego produktu to:\"{obj_product_name}\"",
check_if_already_exists=not company_created,
)
else:
if obj_product_name != code:
commit_desc += 'Nazwa produktu zmieniona na podstawie bazy GS1. '
product.name = obj_product_name
if product.company:
if (
company
and product.company.name
and obj_owner_name
and not ilim_compare_str(product.company.name, obj_owner_name)
):
create_bot_report(
product,
f"Wg. najnowszego odpytania w bazie ILiM producent tego produktu to:\"{obj_owner_name}\"",
check_if_already_exists=not company_created,
)
else:
commit_desc += 'Producent produktu zmieniony na podstawie bazy GS1. '
product.company = company
if product.company and obj and obj_brand:
if product.brand:
if product.brand.name != obj_brand:
create_bot_report(
product,
f"Wg. najnowszego odpytania w bazie ILiM marka tego produktu to:\"{obj_brand}\"",
check_if_already_exists=True,
)
else:
brand, _ = Brand.objects.get_or_create(
name=obj_brand,
company=product.company,
commit_desc='Marka utworzona automatycznie na podstawie API' ' ILiM',
)
product.brand = brand
commit_desc += 'Marka produktu zmieniona na podstawie bazy GS1. '
product.save(commit_desc=commit_desc)
return product
def create_bot_report(product, description, check_if_already_exists=False):
if (
check_if_already_exists
and Report.objects.filter(product=product, client='krs-bot', description=description).exists()
):
return
report = Report(description=description)
report.product = product
report.client = 'krs-bot'
report.save()
def get_plScore(company):
if (
company.plCapital is not None
and company.plWorkers is not None
and company.plRnD is not None
and company.plRegistered is not None
and company.plNotGlobEnt is not None
):
return int(
0.35 * company.plCapital
+ 0.30 * company.plWorkers
+ 0.15 * company.plRnD
+ 0.10 * company.plRegistered
+ 0.10 * company.plNotGlobEnt
)
else:
return None
def shareholders_to_str(krs, id, indent):
str = ''
json = krs.query_shareholders(id)
data = json['data']
kapital_zakladowy = data['krs_podmioty.wartosc_kapital_zakladowy']
wspolnicy = json['layers']['wspolnicy']
for wspolnik in wspolnicy:
udzialy_wartosc = wspolnik.get('udzialy_wartosc', None)
if udzialy_wartosc is None:
str += f"{indent}* {wspolnik['nazwa']} -------\n"
else:
str += '{}* {} {}/{} {:.0f}%\n'.format(
indent,
wspolnik['nazwa'],
udzialy_wartosc,
kapital_zakladowy,
100 * locale.atof(udzialy_wartosc) / kapital_zakladowy,
)
if wspolnik['krs_id'] is not None:
str += shareholders_to_str(krs, wspolnik['krs_id'], indent + ' ')
return str
def rem_dbl_newlines(str):
return str.replace('\r\n\r\n', '\r\n').replace('\n\n', '\n')
def strip_dbl_spaces(str):
return re.sub(' +', ' ', str).strip()
def ilim_compare_str(s1, s2):
s1 = strip_dbl_spaces(s1)
s2 = strip_dbl_spaces(s2)
return s1.upper() == s2.upper()
def strip_urls_newlines(str):
s = re.sub(
r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|'
r'(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’'
r']))',
'',
str,
)
s = rem_dbl_newlines(s)
s = s.strip(' \t\n\r')
return s
TYPE_RED = 'type_red'
TYPE_WHITE = 'type_white'
TYPE_GREY = 'type_grey'
DEFAULT_COMPANY_DATA = {
'name': None,
'plCapital': None,
'plCapital_notes': None,
'plWorkers': None,
'plWorkers_notes': None,
'plRnD': None,
'plRnD_notes': None,
'plRegistered': None,
'plRegistered_notes': None,
'plNotGlobEnt': None,
'plNotGlobEnt_notes': None,
'plScore': None,
}
DEFAULT_REPORT_DATA = {
'text': 'Zgłoś jeśli posiadasz bardziej aktualne dane na temat tego produktu',
'button_text': 'Zgłoś',
'button_type': TYPE_WHITE,
}
DEFAULT_RESULT = {
'product_id': None,
'code': None,
'name': None,
'card_type': TYPE_WHITE,
'altText': None,
}
DEFAULT_STATS = {'was_verified': False, 'was_590': False, 'was_plScore': False}
CODE_PREFIX_TO_COUNTRY = {
"30": "Francja",
"31": "Francja",
"32": "Francja",
"33": "Francja",
"34": "Francja",
"35": "Francja",
"36": "Francja",
"37": "Francja",
"380": "Bułgaria",
"383": "Słowenia",
"385": "Chorwacja",
"387": "Bośnia-Hercegowina",
"40": "Niemcy",
"41": "Niemcy",
"42": "Niemcy",
"43": "Niemcy",
"44": "Niemcy",
"45": "Japonia",
"46": "Federacja Rosyjska",
"470": "Kirgistan",
"471": "Taiwan",
"474": "Estonia",
"475": "Łotwa",
"476": "Azerbejdżan",
"477": "Litwa",
"478": "Uzbekistan",
"479": "Sri Lanka",
"480": "Filipiny",
"481": "Białoruś",
"482": "Ukraina",
"484": "Mołdova",
"485": "Armenia",
"486": "Gruzja",
"487": "Kazachstan",
"489": "Hong Kong",
"49": "Japonia",
"50": "Wielka Brytania",
"520": "Grecja",
"528": "Liban",
"529": "Cypr",
"531": "Macedonia",
"535": "Malta",
"539": "Irlandia",
"54": "Belgia & Luksemburg",
"560": "Portugalia",
"569": "Islandia",
"57": "Dania",
# "590": "Polska",
"594": "Rumunia",
"599": "Węgry",
"600": "Południowa Afryka",
"601": "Południowa Afryka",
"608": "Bahrain",
"609": "Mauritius",
"611": "Maroko",
"613": "Algeria",
"619": "Tunezja",
"621": "Syria",
"622": "Egipt",
"624": "Libia",
"625": "Jordania",
"626": "Iran",
"627": "Kuwejt",
"628": "Arabia Saudyjska",
"64": "Finlandia",
"690": "Chiny",
"691": "Chiny",
"692": "Chiny",
"70": "Norwegia",
"729": "Izrael",
"73": "Szwecja",
"740": "Gwatemala",
"741": "Salwador",
"742": "Honduras",
"743": "Nikaragua",
"744": "Kostaryka",
"745": "Panama",
"746": "Dominikana",
"750": "Meksyk",
"759": "Wenezuela",
"76": "Szwajcaria",
"770": "Kolumbia",
"773": "Urugwaj",
"775": "Peru",
"777": "Boliwia",
"779": "Argentyna",
"780": "Chile",
"784": "Paragwaj",
"786": "Ekwador",
"789": "Brazylia",
"790": "Brazylia",
"80": "Włochy",
"81": "Włochy",
"82": "Włochy",
"83": "Włochy",
"84": "Hiszpania",
"850": "Kuba",
"858": "Słowacja",
"859": "Czechy",
"860": "Jugosławia",
"867": "Korea Północna",
"869": "Turcja",
"87": "Holandia",
"880": "Korea Południowa",
"885": "Tajlandia",
"888": "Singapur",
"890": "Indie",
"893": "Wietnam",
"899": "Indonezja",
"90": "Austria",
"91": "Austria",
"93": "Australia",
"94": "Nowa Zelandia",
"950": "EAN - IDA",
"955": "Malezja",
"958": "Makao",
}
|
en
| 0.914063
|
# code is EAN8 or EAN13 # not an EAN8 nor EAN13 code. Probably QR code or some error # we don't know the manufacturer # the code is registered in Poland, we want more data! # this is an ISBN/ISSN/ISMN number # (book, music album or magazine) # let's try to associate the code with a country # Ups. It seems to be an internal code # we know the manufacturer of the product # "590": "Polska",
| 1.949825
| 2
|
oct-qiskit-pulse/src/qoc_instruction_schedule_map.py
|
brosand/qiskit-terra
| 0
|
6628932
|
<gh_stars>0
from collections import defaultdict
from typing import Tuple, Iterable, Union
from qiskit import schedule, pulse
from qiskit.circuit import Gate
from qiskit.pulse import Play, Acquire
from qiskit.pulse.instruction_schedule_map import InstructionScheduleMap
from qiskit.pulse.schedule import Schedule
class QOCInstructionScheduleMap(InstructionScheduleMap):
def __init__(self, qoc_optimizer):
super().__init__()
self.qoc_optimizer = qoc_optimizer
# FIXME don't use protected member
# self._new_map =
def get(self,
# TODO figure out type hints for gate below
instruction: Union[str, Gate],
qubits: Union[int, Iterable[int]],
*params: Union[int, float, complex],
**kwparams: Union[int, float, complex]) -> Schedule:
"""Return the defined :py:class:`~qiskit.pulse.Schedule` for the given instruction on
the given qubits.
Args:
instruction: Name of the instruction.
qubits: The qubits for the instruction.
*params: Command parameters for generating the output schedule.
**kwparams: Keyworded command parameters for generating the schedule.
Returns:
The Schedule defined for the input.
"""
# TODO: fix below
# schedule.draw()
if isinstance(instruction, Gate):
if self._map[instruction.name]:
# self.assert_has(instruction.name, qubits)
# TODO: copied to_tuple because protected but feels redundent?
schedule_generator = self._map[instruction.name].get(_to_tuple(qubits))
else:
print('gate hit')
pulse_seq = (self.qoc_optimizer.get_pulse_schedule(instruction))
out_schedule = pulse.Schedule()
drive_chan = pulse.DriveChannel(1)
# Figure out universal version for more drive channels
out_schedule += Play(pulse.SamplePulse(pulse_seq), drive_chan) << out_schedule.duration
schedule_generator = out_schedule
else:
self.assert_has(instruction, qubits)
# TODO: copied to_tuple because protected but feels redundent?
schedule_generator = self._map[instruction].get(_to_tuple(qubits))
# don't forget in here to use _gate.to_matrix
if callable(schedule_generator):
return schedule_generator(*params, **kwparams)
# otherwise this is just a Schedule
return schedule_generator
@classmethod
def from_inst_map(cls, grape_optimizer, instruction_schedule_map, default_inst=['measure']):
# probably replace this using get for measurement
imap = {gate: instruction_schedule_map._map[gate] for gate in default_inst}
imap = defaultdict(str, imap)
qubit_instructions = instruction_schedule_map._qubit_instructions
qoc_map = QOCInstructionScheduleMap(grape_optimizer)
qoc_map._map = imap
qoc_map._qubit_instructions = qubit_instructions
return qoc_map
def _to_tuple(values: Union[int, Iterable[int]]) -> Tuple[int, ...]:
"""Return the input as a tuple.
Args:
values: An integer, or iterable of integers.
Returns:
The input values as a sorted tuple.
"""
try:
return tuple(values)
except TypeError:
return (values,)
|
from collections import defaultdict
from typing import Tuple, Iterable, Union
from qiskit import schedule, pulse
from qiskit.circuit import Gate
from qiskit.pulse import Play, Acquire
from qiskit.pulse.instruction_schedule_map import InstructionScheduleMap
from qiskit.pulse.schedule import Schedule
class QOCInstructionScheduleMap(InstructionScheduleMap):
def __init__(self, qoc_optimizer):
super().__init__()
self.qoc_optimizer = qoc_optimizer
# FIXME don't use protected member
# self._new_map =
def get(self,
# TODO figure out type hints for gate below
instruction: Union[str, Gate],
qubits: Union[int, Iterable[int]],
*params: Union[int, float, complex],
**kwparams: Union[int, float, complex]) -> Schedule:
"""Return the defined :py:class:`~qiskit.pulse.Schedule` for the given instruction on
the given qubits.
Args:
instruction: Name of the instruction.
qubits: The qubits for the instruction.
*params: Command parameters for generating the output schedule.
**kwparams: Keyworded command parameters for generating the schedule.
Returns:
The Schedule defined for the input.
"""
# TODO: fix below
# schedule.draw()
if isinstance(instruction, Gate):
if self._map[instruction.name]:
# self.assert_has(instruction.name, qubits)
# TODO: copied to_tuple because protected but feels redundent?
schedule_generator = self._map[instruction.name].get(_to_tuple(qubits))
else:
print('gate hit')
pulse_seq = (self.qoc_optimizer.get_pulse_schedule(instruction))
out_schedule = pulse.Schedule()
drive_chan = pulse.DriveChannel(1)
# Figure out universal version for more drive channels
out_schedule += Play(pulse.SamplePulse(pulse_seq), drive_chan) << out_schedule.duration
schedule_generator = out_schedule
else:
self.assert_has(instruction, qubits)
# TODO: copied to_tuple because protected but feels redundent?
schedule_generator = self._map[instruction].get(_to_tuple(qubits))
# don't forget in here to use _gate.to_matrix
if callable(schedule_generator):
return schedule_generator(*params, **kwparams)
# otherwise this is just a Schedule
return schedule_generator
@classmethod
def from_inst_map(cls, grape_optimizer, instruction_schedule_map, default_inst=['measure']):
# probably replace this using get for measurement
imap = {gate: instruction_schedule_map._map[gate] for gate in default_inst}
imap = defaultdict(str, imap)
qubit_instructions = instruction_schedule_map._qubit_instructions
qoc_map = QOCInstructionScheduleMap(grape_optimizer)
qoc_map._map = imap
qoc_map._qubit_instructions = qubit_instructions
return qoc_map
def _to_tuple(values: Union[int, Iterable[int]]) -> Tuple[int, ...]:
"""Return the input as a tuple.
Args:
values: An integer, or iterable of integers.
Returns:
The input values as a sorted tuple.
"""
try:
return tuple(values)
except TypeError:
return (values,)
|
en
| 0.682232
|
# FIXME don't use protected member # self._new_map = # TODO figure out type hints for gate below Return the defined :py:class:`~qiskit.pulse.Schedule` for the given instruction on the given qubits. Args: instruction: Name of the instruction. qubits: The qubits for the instruction. *params: Command parameters for generating the output schedule. **kwparams: Keyworded command parameters for generating the schedule. Returns: The Schedule defined for the input. # TODO: fix below # schedule.draw() # self.assert_has(instruction.name, qubits) # TODO: copied to_tuple because protected but feels redundent? # Figure out universal version for more drive channels # TODO: copied to_tuple because protected but feels redundent? # don't forget in here to use _gate.to_matrix # otherwise this is just a Schedule # probably replace this using get for measurement Return the input as a tuple. Args: values: An integer, or iterable of integers. Returns: The input values as a sorted tuple.
| 2.492472
| 2
|
examples/paths.py
|
NicolasGensollen/streamfig
| 0
|
6628933
|
from Drawing import *
s = Drawing()
s.addColor("grey", "#BBBBBB")
s.addColor("white", "#FFFFFF")
s.addColor("red", "#ff0000")
s.addNode("a", [(0,10)])
s.addNode("b", [(0,4),(5,10)])
s.addNode("c", [(4,9)])
s.addNode("d", [(1,3)])
s.addLink("a","b",1,3)
s.addLink("a","b",7,8)
s.addLink("b","c",6,9)
s.addLink("b","d",2,3,height=0.4)
s.addLink("a","c",4.5,7.5,height=0.4)
s.addRectangle("a","c",2,4,color=11)
s.addRectangle("b","d",7,8,color="red")
s.addTimeLine(ticks=2)
|
from Drawing import *
s = Drawing()
s.addColor("grey", "#BBBBBB")
s.addColor("white", "#FFFFFF")
s.addColor("red", "#ff0000")
s.addNode("a", [(0,10)])
s.addNode("b", [(0,4),(5,10)])
s.addNode("c", [(4,9)])
s.addNode("d", [(1,3)])
s.addLink("a","b",1,3)
s.addLink("a","b",7,8)
s.addLink("b","c",6,9)
s.addLink("b","d",2,3,height=0.4)
s.addLink("a","c",4.5,7.5,height=0.4)
s.addRectangle("a","c",2,4,color=11)
s.addRectangle("b","d",7,8,color="red")
s.addTimeLine(ticks=2)
|
none
| 1
| 2.55449
| 3
|
|
Chapter7_CNN/Chapter7_3_CNN_Optimization/cnnNormAugment.py
|
thisisjako/UdemyTF
| 0
|
6628934
|
import os
from typing import Tuple
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from mnistData import MNIST
LOGS_DIR = os.path.abspath("C:/Users/Jan/Dropbox/_Programmieren/UdemyTF/logs/")
if not os.path.exists(LOGS_DIR):
os.mkdir(LOGS_DIR)
MODEL_LOG_DIR = os.path.join(LOGS_DIR, "cnn_norm_augment")
def build_model(img_shape: Tuple[int, int, int], num_classes: int) -> Model:
input_img = Input(shape=img_shape)
x = Conv2D(filters=32, kernel_size=3, padding="same")(input_img)
x = Activation("relu")(x)
x = Conv2D(filters=32, kernel_size=3, padding="same")(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Conv2D(filters=64, kernel_size=3, padding="same")(x)
x = Activation("relu")(x)
x = Conv2D(filters=64, kernel_size=3, padding="same")(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Flatten()(x)
x = Dense(units=num_classes)(x)
y_pred = Activation("softmax")(x)
model = Model(
inputs=[input_img],
outputs=[y_pred]
)
model.summary()
return model
if __name__ == "__main__":
data = MNIST(with_normalization=True)
data.data_augmentation(augment_size=5_000)
x_train_, x_val_, y_train_, y_val_ = data.get_splitted_train_validation_set()
model = build_model(data.img_shape, data.num_classes)
model.compile(
loss="categorical_crossentropy",
optimizer=Adam(learning_rate=0.0005),
metrics=["accuracy"]
)
tb_callback = TensorBoard(
log_dir=MODEL_LOG_DIR,
write_graph=True
)
model.fit(
x=x_train_,
y=y_train_,
epochs=40,
batch_size=128,
verbose=1,
validation_data=(x_val_, y_val_),
callbacks=[tb_callback]
)
|
import os
from typing import Tuple
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from mnistData import MNIST
LOGS_DIR = os.path.abspath("C:/Users/Jan/Dropbox/_Programmieren/UdemyTF/logs/")
if not os.path.exists(LOGS_DIR):
os.mkdir(LOGS_DIR)
MODEL_LOG_DIR = os.path.join(LOGS_DIR, "cnn_norm_augment")
def build_model(img_shape: Tuple[int, int, int], num_classes: int) -> Model:
input_img = Input(shape=img_shape)
x = Conv2D(filters=32, kernel_size=3, padding="same")(input_img)
x = Activation("relu")(x)
x = Conv2D(filters=32, kernel_size=3, padding="same")(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Conv2D(filters=64, kernel_size=3, padding="same")(x)
x = Activation("relu")(x)
x = Conv2D(filters=64, kernel_size=3, padding="same")(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Flatten()(x)
x = Dense(units=num_classes)(x)
y_pred = Activation("softmax")(x)
model = Model(
inputs=[input_img],
outputs=[y_pred]
)
model.summary()
return model
if __name__ == "__main__":
data = MNIST(with_normalization=True)
data.data_augmentation(augment_size=5_000)
x_train_, x_val_, y_train_, y_val_ = data.get_splitted_train_validation_set()
model = build_model(data.img_shape, data.num_classes)
model.compile(
loss="categorical_crossentropy",
optimizer=Adam(learning_rate=0.0005),
metrics=["accuracy"]
)
tb_callback = TensorBoard(
log_dir=MODEL_LOG_DIR,
write_graph=True
)
model.fit(
x=x_train_,
y=y_train_,
epochs=40,
batch_size=128,
verbose=1,
validation_data=(x_val_, y_val_),
callbacks=[tb_callback]
)
|
none
| 1
| 2.854217
| 3
|
|
train.py
|
Kash6/AnimeBot
| 0
|
6628935
|
<filename>train.py
#Train function
def train():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# Config
batch_size = 4
image_size = 256
learning_rate = 1e-3
beta1, beta2 = (.5, .99)
weight_decay = 1e-3
epochs = 100
# Models
netD = Discriminator().to(device)
netG = Generator().to(device)
optimizerD = AdamW(netD.parameters(), lr=learning_rate, betas=(beta1, beta2), weight_decay=weight_decay)
optimizerG = AdamW(netG.parameters(), lr=learning_rate, betas=(beta1, beta2), weight_decay=weight_decay)
# Labels
cartoon_labels = torch.ones (batch_size, 1, image_size // 4, image_size // 4).to(device)
fake_labels = torch.zeros(batch_size, 1, image_size // 4, image_size // 4).to(device)
# Loss functions
content_loss = ContentLoss().to(device)
adv_loss = AdversialLoss(cartoon_labels, fake_labels).to(device)
BCE_loss = nn.BCELoss().to(device)
# Dataloaders
real_dataloader = get_dataloader("/content/drive/MyDrive/Colab/pic2anime/trainA", size = image_size, bs = batch_size)
cartoon_dataloader = get_dataloader("/content/drive/MyDrive/Colab/pic2anime/trainB", size = image_size, bs = batch_size)
edge_dataloader = get_dataloader("/content/drive/MyDrive/Colab/pic2anime/trainB_smooth", size = image_size, bs = batch_size)
last_epoch = 0
last_i = 0
# --------------------------------------------------------------------------------------------- #
# Training Loop
tracked_images = next(iter(real_dataloader))[0].to(device)
original_images = tracked_images.detach().cpu()
grid = vutils.make_grid(original_images, padding=2, normalize=True, nrow=3)
plt.imsave(f"/content/cartoon-gan/results/original.png", np.transpose(grid, (1,2,0)).numpy())
netG.load_state_dict(torch.load("_trained_netG65.pth"))
netD.load_state_dict(torch.load("_trained_netD65.pth"))
with open("iter_data65.pickle", "rb") as handle:
last_epoch, last_i = pickle.load(handle)
# Lists to keep track of progress"
img_list = []
G_losses = []
D_losses = []
iters = 0
start_epoch = last_epoch
start_i = last_i
print("Starting Training Loop...")
# For each epoch.
for epoch in range(start_epoch,epochs):
real_dl_iter = iter(real_dataloader)
cartoon_dl_iter = iter(cartoon_dataloader)
edge_dl_iter = iter(edge_dataloader)
iterations = min(len(real_dl_iter), len(cartoon_dl_iter))
for i in range(start_i,iterations):
real_data = next(real_dl_iter)
cartoon_data = next(cartoon_dl_iter)
edge_data = next(edge_dl_iter)
netD.zero_grad()
# Format batch.
cartoon_data = cartoon_data.to(device)
edge_data = edge_data.to(device)
real_data = real_data.to(device)
# Generate image
generated_data = netG(real_data)
# Forward pass all batches through D.
cartoon_pred = netD(cartoon_data) #.view(-1)
edge_pred = netD(edge_data) #.view(-1)
generated_pred = netD(generated_data) #.view(-1)
print(generated_data.is_cuda, real_data.is_cuda)
errD = adv_loss(cartoon_pred, generated_pred, edge_pred)
errD.backward(retain_graph=True)
D_x = cartoon_pred.mean().item() # Should be close to 1
optimizerD.step()
netG.zero_grad()
generated_pred = netD(generated_data) #.view(-1)
print(generated_data.is_cuda, real_data.is_cuda)
print("generated_pred:", generated_pred.is_cuda, "cartoon_labels:", cartoon_labels.is_cuda)
errG = BCE_loss(generated_pred, cartoon_labels) + content_loss(generated_data, real_data)
errG.backward()
D_G_z2 = generated_pred.mean().item() # Should be close to 1
optimizerG.step()
if i % 50 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): null / %.4f'
% (epoch, epochs, i, len(real_dataloader),
errD.item(), errG.item(), D_x, D_G_z2))
G_losses.append(errG.item())
D_losses.append(errD.item())
if (iters % 500 == 0) or ((epoch == epochs-1) and (i == len(dataloader)-1)):
with torch.no_grad():
fake = netG(tracked_images.unsqueeze(0)).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
iters += 1
last_i = i
start_i = 0
last_epoch = epoch
torch.save(netG.state_dict(), "_trained_netG"+str(epoch)+".pth")
torch.save(netD.state_dict(), "_trained_netD"+str(epoch)+".pth")
files.download("_trained_netG"+str(epoch)+".pth")
files.download("_trained_netD"+str(epoch)+".pth")
with open("iter_data"+str(epoch)+".pickle", "wb") as handle:
pickle.dump([last_epoch, last_i], handle)
print("file saved")
files.download("iter_data"+str(epoch)+".pickle")
|
<filename>train.py
#Train function
def train():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# Config
batch_size = 4
image_size = 256
learning_rate = 1e-3
beta1, beta2 = (.5, .99)
weight_decay = 1e-3
epochs = 100
# Models
netD = Discriminator().to(device)
netG = Generator().to(device)
optimizerD = AdamW(netD.parameters(), lr=learning_rate, betas=(beta1, beta2), weight_decay=weight_decay)
optimizerG = AdamW(netG.parameters(), lr=learning_rate, betas=(beta1, beta2), weight_decay=weight_decay)
# Labels
cartoon_labels = torch.ones (batch_size, 1, image_size // 4, image_size // 4).to(device)
fake_labels = torch.zeros(batch_size, 1, image_size // 4, image_size // 4).to(device)
# Loss functions
content_loss = ContentLoss().to(device)
adv_loss = AdversialLoss(cartoon_labels, fake_labels).to(device)
BCE_loss = nn.BCELoss().to(device)
# Dataloaders
real_dataloader = get_dataloader("/content/drive/MyDrive/Colab/pic2anime/trainA", size = image_size, bs = batch_size)
cartoon_dataloader = get_dataloader("/content/drive/MyDrive/Colab/pic2anime/trainB", size = image_size, bs = batch_size)
edge_dataloader = get_dataloader("/content/drive/MyDrive/Colab/pic2anime/trainB_smooth", size = image_size, bs = batch_size)
last_epoch = 0
last_i = 0
# --------------------------------------------------------------------------------------------- #
# Training Loop
tracked_images = next(iter(real_dataloader))[0].to(device)
original_images = tracked_images.detach().cpu()
grid = vutils.make_grid(original_images, padding=2, normalize=True, nrow=3)
plt.imsave(f"/content/cartoon-gan/results/original.png", np.transpose(grid, (1,2,0)).numpy())
netG.load_state_dict(torch.load("_trained_netG65.pth"))
netD.load_state_dict(torch.load("_trained_netD65.pth"))
with open("iter_data65.pickle", "rb") as handle:
last_epoch, last_i = pickle.load(handle)
# Lists to keep track of progress"
img_list = []
G_losses = []
D_losses = []
iters = 0
start_epoch = last_epoch
start_i = last_i
print("Starting Training Loop...")
# For each epoch.
for epoch in range(start_epoch,epochs):
real_dl_iter = iter(real_dataloader)
cartoon_dl_iter = iter(cartoon_dataloader)
edge_dl_iter = iter(edge_dataloader)
iterations = min(len(real_dl_iter), len(cartoon_dl_iter))
for i in range(start_i,iterations):
real_data = next(real_dl_iter)
cartoon_data = next(cartoon_dl_iter)
edge_data = next(edge_dl_iter)
netD.zero_grad()
# Format batch.
cartoon_data = cartoon_data.to(device)
edge_data = edge_data.to(device)
real_data = real_data.to(device)
# Generate image
generated_data = netG(real_data)
# Forward pass all batches through D.
cartoon_pred = netD(cartoon_data) #.view(-1)
edge_pred = netD(edge_data) #.view(-1)
generated_pred = netD(generated_data) #.view(-1)
print(generated_data.is_cuda, real_data.is_cuda)
errD = adv_loss(cartoon_pred, generated_pred, edge_pred)
errD.backward(retain_graph=True)
D_x = cartoon_pred.mean().item() # Should be close to 1
optimizerD.step()
netG.zero_grad()
generated_pred = netD(generated_data) #.view(-1)
print(generated_data.is_cuda, real_data.is_cuda)
print("generated_pred:", generated_pred.is_cuda, "cartoon_labels:", cartoon_labels.is_cuda)
errG = BCE_loss(generated_pred, cartoon_labels) + content_loss(generated_data, real_data)
errG.backward()
D_G_z2 = generated_pred.mean().item() # Should be close to 1
optimizerG.step()
if i % 50 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): null / %.4f'
% (epoch, epochs, i, len(real_dataloader),
errD.item(), errG.item(), D_x, D_G_z2))
G_losses.append(errG.item())
D_losses.append(errD.item())
if (iters % 500 == 0) or ((epoch == epochs-1) and (i == len(dataloader)-1)):
with torch.no_grad():
fake = netG(tracked_images.unsqueeze(0)).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
iters += 1
last_i = i
start_i = 0
last_epoch = epoch
torch.save(netG.state_dict(), "_trained_netG"+str(epoch)+".pth")
torch.save(netD.state_dict(), "_trained_netD"+str(epoch)+".pth")
files.download("_trained_netG"+str(epoch)+".pth")
files.download("_trained_netD"+str(epoch)+".pth")
with open("iter_data"+str(epoch)+".pickle", "wb") as handle:
pickle.dump([last_epoch, last_i], handle)
print("file saved")
files.download("iter_data"+str(epoch)+".pickle")
|
en
| 0.548263
|
#Train function # Config # Models # Labels # Loss functions # Dataloaders # --------------------------------------------------------------------------------------------- # # Training Loop # Lists to keep track of progress" # For each epoch. # Format batch. # Generate image # Forward pass all batches through D. #.view(-1) #.view(-1) #.view(-1) # Should be close to 1 #.view(-1) # Should be close to 1
| 2.472012
| 2
|
cadnano/extras/wrapapi.py
|
mctrinh/cadnano2.5
| 69
|
6628936
|
import inspect
from types import FunctionType
def getPublicMethods(cls):
return [(x, y) for x, y in cls.__dict__.items()
if (isinstance(y, FunctionType) and not x.startswith('_'))
]
# end def
def copyWrapAPI(cls_from, cls_to, attr_str='model'):
"""Use same `eval` trick as decorator module on PyPi to match
function signatures
see also: https://emptysqua.re/blog/copying-a-python-functions-signature/
But this supports type annotations too now
maybe try to use functools.update_wrapper in the future
"""
to_name = cls_to.__qualname__
module = cls_to.__module__
self_attr_str = 'self.%s' % (attr_str)
for name, f in getPublicMethods(cls_from):
# 1. Copy call signature Python 3 only use getargspec, formatargspec for 2.7
argspec_1 = inspect.signature(f)
formatted_args_1 = str(argspec_1)
# print(formatted_args_1)
# strip annotations from parameters
argspec_2 = list(argspec_1.parameters)
argspec_2[0] = (self_attr_str, ) # swap in reference
# print(argspec_2)
formatted_args_2 = ', '.join([x[0] if isinstance(x, tuple) else x for x in argspec_2])
# print(formatted_args_2)
f_wrapper_str = 'def func_wrapper%s: \r\n return func(%s)' % (formatted_args_1,
formatted_args_2)
# print(f_wrapper_str)
# 2. Create wrapper function
code = compile(f_wrapper_str, '<string>', 'exec')
ns = {'func': f}
exec(code, ns)
f_wrapper = ns['func_wrapper']
# 3. set wrapper function attributes
f_wrapper.__name__ = name
f_wrapper.__qualname__ = '%s.%s' % (to_name, name)
f_wrapper.__module__ = module
for attr in ['__doc__', '__annotations__']:
setattr(f_wrapper, attr, getattr(f, attr))
# print("anno", f.__annotations__)
# 4. Assign wrapper function to the class
setattr(cls_to, name, f_wrapper)
# end def
if __name__ == '__main__':
class Foo(object):
def awesome(self, a: int): # noqa
print(7777)
def dope(self):
return 9999
def _soPrivate(self):
pass
class Bar(object):
def __init__(self, model):
self.model = model
copyWrapAPI(Foo, Bar)
print(getPublicMethods(Foo))
print(getPublicMethods(Bar))
print(Foo.dope(8))
|
import inspect
from types import FunctionType
def getPublicMethods(cls):
return [(x, y) for x, y in cls.__dict__.items()
if (isinstance(y, FunctionType) and not x.startswith('_'))
]
# end def
def copyWrapAPI(cls_from, cls_to, attr_str='model'):
"""Use same `eval` trick as decorator module on PyPi to match
function signatures
see also: https://emptysqua.re/blog/copying-a-python-functions-signature/
But this supports type annotations too now
maybe try to use functools.update_wrapper in the future
"""
to_name = cls_to.__qualname__
module = cls_to.__module__
self_attr_str = 'self.%s' % (attr_str)
for name, f in getPublicMethods(cls_from):
# 1. Copy call signature Python 3 only use getargspec, formatargspec for 2.7
argspec_1 = inspect.signature(f)
formatted_args_1 = str(argspec_1)
# print(formatted_args_1)
# strip annotations from parameters
argspec_2 = list(argspec_1.parameters)
argspec_2[0] = (self_attr_str, ) # swap in reference
# print(argspec_2)
formatted_args_2 = ', '.join([x[0] if isinstance(x, tuple) else x for x in argspec_2])
# print(formatted_args_2)
f_wrapper_str = 'def func_wrapper%s: \r\n return func(%s)' % (formatted_args_1,
formatted_args_2)
# print(f_wrapper_str)
# 2. Create wrapper function
code = compile(f_wrapper_str, '<string>', 'exec')
ns = {'func': f}
exec(code, ns)
f_wrapper = ns['func_wrapper']
# 3. set wrapper function attributes
f_wrapper.__name__ = name
f_wrapper.__qualname__ = '%s.%s' % (to_name, name)
f_wrapper.__module__ = module
for attr in ['__doc__', '__annotations__']:
setattr(f_wrapper, attr, getattr(f, attr))
# print("anno", f.__annotations__)
# 4. Assign wrapper function to the class
setattr(cls_to, name, f_wrapper)
# end def
if __name__ == '__main__':
class Foo(object):
def awesome(self, a: int): # noqa
print(7777)
def dope(self):
return 9999
def _soPrivate(self):
pass
class Bar(object):
def __init__(self, model):
self.model = model
copyWrapAPI(Foo, Bar)
print(getPublicMethods(Foo))
print(getPublicMethods(Bar))
print(Foo.dope(8))
|
en
| 0.394074
|
# end def Use same `eval` trick as decorator module on PyPi to match function signatures see also: https://emptysqua.re/blog/copying-a-python-functions-signature/ But this supports type annotations too now maybe try to use functools.update_wrapper in the future # 1. Copy call signature Python 3 only use getargspec, formatargspec for 2.7 # print(formatted_args_1) # strip annotations from parameters # swap in reference # print(argspec_2) # print(formatted_args_2) # print(f_wrapper_str) # 2. Create wrapper function # 3. set wrapper function attributes # print("anno", f.__annotations__) # 4. Assign wrapper function to the class # end def # noqa
| 2.466385
| 2
|
raybot/actions/messages.py
|
Zverik/bot_na_rayone
| 32
|
6628937
|
<reponame>Zverik/bot_na_rayone
from raybot import config
from raybot.bot import bot
from raybot.util import get_user, tr
from raybot.model import db
from asyncio import sleep
from aiogram import types
async def broadcast(message: types.Message):
mods = [config.ADMIN] + (await db.get_role_users('moderator'))
for user_id in mods:
await bot.send_message(user_id, tr('do_reply'))
await message.forward(user_id)
await sleep(0.5)
async def broadcast_str(message: str, except_id: int = None):
mods = [config.ADMIN] + (await db.get_role_users('moderator'))
for user_id in mods:
if user_id != except_id:
await bot.send_message(user_id, message)
await sleep(0.5)
async def process_reply(message: types.Message):
info = await get_user(message.from_user)
to = await get_user(message.reply_to_message.forward_from)
if info.is_moderator():
# Notify other moderators that it has been replied
# TODO: can we do it just once per user?
await broadcast_str(tr('reply_sent', to.name),
info.id, disable_notification=True)
await bot.send_message(to.id, tr('do_reply'))
await message.forward(to.id)
|
from raybot import config
from raybot.bot import bot
from raybot.util import get_user, tr
from raybot.model import db
from asyncio import sleep
from aiogram import types
async def broadcast(message: types.Message):
mods = [config.ADMIN] + (await db.get_role_users('moderator'))
for user_id in mods:
await bot.send_message(user_id, tr('do_reply'))
await message.forward(user_id)
await sleep(0.5)
async def broadcast_str(message: str, except_id: int = None):
mods = [config.ADMIN] + (await db.get_role_users('moderator'))
for user_id in mods:
if user_id != except_id:
await bot.send_message(user_id, message)
await sleep(0.5)
async def process_reply(message: types.Message):
info = await get_user(message.from_user)
to = await get_user(message.reply_to_message.forward_from)
if info.is_moderator():
# Notify other moderators that it has been replied
# TODO: can we do it just once per user?
await broadcast_str(tr('reply_sent', to.name),
info.id, disable_notification=True)
await bot.send_message(to.id, tr('do_reply'))
await message.forward(to.id)
|
en
| 0.909027
|
# Notify other moderators that it has been replied # TODO: can we do it just once per user?
| 2.33035
| 2
|
{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/views.py
|
bahattincinic/cookiecutter-apistar
| 2
|
6628938
|
from apistar import Response
from apistar.backends.sqlalchemy_backend import Session
from .models import Example
from .schema import ExampleCreate, ExampleList
def list_view(session: Session) -> Response:
return Response(content=[
ExampleList(item)
for item in session.query(Example).all()
])
def create_view(session: Session, data: ExampleCreate) -> Response:
instance = Example(name=data['name'])
session.add(instance)
session.flush()
return Response(status=201, content=ExampleList(instance))
|
from apistar import Response
from apistar.backends.sqlalchemy_backend import Session
from .models import Example
from .schema import ExampleCreate, ExampleList
def list_view(session: Session) -> Response:
return Response(content=[
ExampleList(item)
for item in session.query(Example).all()
])
def create_view(session: Session, data: ExampleCreate) -> Response:
instance = Example(name=data['name'])
session.add(instance)
session.flush()
return Response(status=201, content=ExampleList(instance))
|
none
| 1
| 2.506975
| 3
|
|
aasaan/aasaan/urls.py
|
deepakkt/aasaan
| 0
|
6628939
|
<reponame>deepakkt/aasaan<filename>aasaan/aasaan/urls.py
import contacts.urls
import travels.urls
import ipcaccounts.urls
import schedulemaster.urls
from django.contrib import admin
from django.urls import include, path
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from api.aasaan_api import (ScheduleResource, ZoneResource,
RoleResource, ProgramResource)
from tastypie.api import Api
from django.shortcuts import redirect
aasaan_v1_api = Api(api_name='v1')
aasaan_v1_api.register(ScheduleResource())
aasaan_v1_api.register(ZoneResource())
aasaan_v1_api.register(RoleResource())
aasaan_v1_api.register(ProgramResource())
urlpatterns = [
path('', lambda _: redirect('/accounts/google/login/?process=login'), name="login"),
path('admin/', admin.site.urls),
path('aasaan_api/', include(aasaan_v1_api.urls)),
path('contacts/', include(contacts.urls, namespace='contacts'), name='contacts'),
path('admin/ipcaccounts/', include(ipcaccounts.urls, namespace='ipcaccounts'), name='ipcaccounts'),
path('schedules/', include(schedulemaster.urls, namespace='schedules'), name='schedules'),
path('admin/travels/', include(travels.urls, namespace='travels'), name='travels'),
url('tinymce/', include('tinymce.urls')),
url('accounts/', include('allauth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.ASYNC:
urlpatterns += [url('django-rq/', include('django_rq.urls'))]
|
import contacts.urls
import travels.urls
import ipcaccounts.urls
import schedulemaster.urls
from django.contrib import admin
from django.urls import include, path
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from api.aasaan_api import (ScheduleResource, ZoneResource,
RoleResource, ProgramResource)
from tastypie.api import Api
from django.shortcuts import redirect
aasaan_v1_api = Api(api_name='v1')
aasaan_v1_api.register(ScheduleResource())
aasaan_v1_api.register(ZoneResource())
aasaan_v1_api.register(RoleResource())
aasaan_v1_api.register(ProgramResource())
urlpatterns = [
path('', lambda _: redirect('/accounts/google/login/?process=login'), name="login"),
path('admin/', admin.site.urls),
path('aasaan_api/', include(aasaan_v1_api.urls)),
path('contacts/', include(contacts.urls, namespace='contacts'), name='contacts'),
path('admin/ipcaccounts/', include(ipcaccounts.urls, namespace='ipcaccounts'), name='ipcaccounts'),
path('schedules/', include(schedulemaster.urls, namespace='schedules'), name='schedules'),
path('admin/travels/', include(travels.urls, namespace='travels'), name='travels'),
url('tinymce/', include('tinymce.urls')),
url('accounts/', include('allauth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.ASYNC:
urlpatterns += [url('django-rq/', include('django_rq.urls'))]
|
none
| 1
| 1.808372
| 2
|
|
sympy/core/tests/test_random.py
|
utkarshdeorah/sympy
| 1
|
6628940
|
<reponame>utkarshdeorah/sympy
import random
from sympy.core.symbol import Symbol, symbols
from sympy.functions.elementary.trigonometric import sin, acos
from sympy.abc import x
def test_random():
random.seed(42)
a = random.random()
random.seed(42)
Symbol('z').is_finite
b = random.random()
assert a == b
got = set()
for i in range(2):
random.seed(28)
m0, m1 = symbols('m_0 m_1', real=True)
_ = acos(-m0/m1)
got.add(random.uniform(0,1))
assert len(got) == 1
random.seed(10)
y = 0
for i in range(4):
y += sin(random.uniform(-10,10) * x)
random.seed(10)
z = 0
for i in range(4):
z += sin(random.uniform(-10,10) * x)
assert y == z
|
import random
from sympy.core.symbol import Symbol, symbols
from sympy.functions.elementary.trigonometric import sin, acos
from sympy.abc import x
def test_random():
random.seed(42)
a = random.random()
random.seed(42)
Symbol('z').is_finite
b = random.random()
assert a == b
got = set()
for i in range(2):
random.seed(28)
m0, m1 = symbols('m_0 m_1', real=True)
_ = acos(-m0/m1)
got.add(random.uniform(0,1))
assert len(got) == 1
random.seed(10)
y = 0
for i in range(4):
y += sin(random.uniform(-10,10) * x)
random.seed(10)
z = 0
for i in range(4):
z += sin(random.uniform(-10,10) * x)
assert y == z
|
none
| 1
| 3.204187
| 3
|
|
test-mq.py
|
yuyongpeng/raspberrypi
| 0
|
6628941
|
# -*- coding:UTF-8 -*-
import time
import os
import commands
import subprocess
import pika
parameters = pika.URLParameters('amqp://hardchain:pswHd@localhost:5672/%2F')
connection = pika.BlockingConnection(parameters)
channelr = connection.channel()
#声明queue
channelr.queue_declare(queue='electron3', durable=False) # 若声明过,则换一个名字
#n RabbitMQ a message can never be sent directly to the queue, it always needs to go through an exchange.
channelr.basic_publish(exchange='xxx',
routing_key='electron3',
body='test',
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
)
)
print(" [x] Sent 'Hello World!'")
connection.close()
|
# -*- coding:UTF-8 -*-
import time
import os
import commands
import subprocess
import pika
parameters = pika.URLParameters('amqp://hardchain:pswHd@localhost:5672/%2F')
connection = pika.BlockingConnection(parameters)
channelr = connection.channel()
#声明queue
channelr.queue_declare(queue='electron3', durable=False) # 若声明过,则换一个名字
#n RabbitMQ a message can never be sent directly to the queue, it always needs to go through an exchange.
channelr.basic_publish(exchange='xxx',
routing_key='electron3',
body='test',
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
)
)
print(" [x] Sent 'Hello World!'")
connection.close()
|
en
| 0.528594
|
# -*- coding:UTF-8 -*- #声明queue # 若声明过,则换一个名字 #n RabbitMQ a message can never be sent directly to the queue, it always needs to go through an exchange. # make message persistent
| 2.662588
| 3
|
pynailgun/test_ng.py
|
mwhittaker/nailgun
| 0
|
6628942
|
import subprocess
import os
import time
import unittest
import tempfile
import shutil
import uuid
import sys
from pynailgun import NailgunException, NailgunConnection
is_py2 = sys.version[0] == '2'
if is_py2:
from StringIO import StringIO
else:
from io import StringIO
if os.name == 'posix':
def transport_exists(transport_file):
return os.path.exists(transport_file)
if os.name == 'nt':
import ctypes
from ctypes.wintypes import WIN32_FIND_DATAW as WIN32_FIND_DATA
INVALID_HANDLE_VALUE = -1
FindFirstFile = ctypes.windll.kernel32.FindFirstFileW
FindClose = ctypes.windll.kernel32.FindClose
# on windows os.path.exists doen't allow to check reliably that a pipe exists
# (os.path.exists tries to open connection to a pipe)
def transport_exists(transport_path):
wfd = WIN32_FIND_DATA()
handle = FindFirstFile(transport_path, ctypes.byref(wfd))
result = handle != INVALID_HANDLE_VALUE
FindClose(handle)
return result
class TestNailgunConnection(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestNailgunConnection, self).__init__(*args, **kwargs)
self.heartbeat_timeout_ms = 10000
def setUp(self):
self.setUpTransport()
self.startNailgun()
def setUpTransport(self):
self.tmpdir = tempfile.mkdtemp()
if os.name == 'posix':
self.transport_file = os.path.join(self.tmpdir, 'sock')
self.transport_address = 'local:{0}'.format(self.transport_file)
else:
pipe_name = u'nailgun-test-{0}'.format(uuid.uuid4().hex)
self.transport_address = u'local:{0}'.format(pipe_name)
self.transport_file = u'\\\\.\\pipe\{0}'.format(pipe_name)
def getClassPath(self):
cp = [
'nailgun-server/target/nailgun-server-0.9.3-SNAPSHOT-uber.jar',
'nailgun-examples/target/nailgun-examples-0.9.3-SNAPSHOT.jar',
]
if os.name == 'nt':
return ';'.join(cp)
return ':'.join(cp)
def startNailgun(self):
if os.name == 'posix':
def preexec_fn():
# Close any open file descriptors to further separate buckd from its
# invoking context (e.g. otherwise we'd hang when running things like
# `ssh localhost buck clean`).
dev_null_fd = os.open("/dev/null", os.O_RDWR)
os.dup2(dev_null_fd, 0)
os.dup2(dev_null_fd, 2)
os.close(dev_null_fd)
creationflags = 0
else:
preexec_fn = None
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx#DETACHED_PROCESS
DETACHED_PROCESS = 0x00000008
creationflags = DETACHED_PROCESS
stdout = None
if os.name == 'posix':
stdout=subprocess.PIPE
log_config_file = os.path.join(self.tmpdir, 'logging.properties')
self.log_file = os.path.join(self.tmpdir, 'test_ng.log')
with open(log_config_file, 'w') as config_file:
config_file.write('handlers = java.util.logging.FileHandler\n')
config_file.write('.level = ALL\n')
config_file.write('java.util.logging.FileHandler.level = ALL\n')
config_file.write('java.util.logging.FileHandler.pattern = ' + self.log_file + '\n')
config_file.write('java.util.logging.FileHandler.count = 1\n')
config_file.write('java.util.logging.FileHandler.formatter = java.util.logging.SimpleFormatter\n')
cmd = ['java', '-Djna.nosys=true', '-Djava.util.logging.config.file=' + log_config_file, '-classpath', self.getClassPath()]
debug_mode = os.environ.get('DEBUG_MODE') or ''
if debug_mode != '':
suspend = 'n' if debug_mode == '2' else 'y'
cmd.append('-agentlib:jdwp=transport=dt_socket,address=localhost:8888,server=y,suspend=' + suspend)
cmd = cmd + ['com.martiansoftware.nailgun.NGServer', self.transport_address, str(self.heartbeat_timeout_ms)]
self.ng_server_process = subprocess.Popen(
cmd,
preexec_fn=preexec_fn,
creationflags=creationflags,
stdout=stdout,
)
self.assertIsNone(self.ng_server_process.poll())
if os.name == 'posix':
# on *nix we have to wait for server to be ready to accept connections
while True:
the_first_line = str(self.ng_server_process.stdout.readline().strip())
if "NGServer" in the_first_line and "started" in the_first_line:
break
if the_first_line is None or the_first_line == '':
break
else:
for _ in range(0, 600):
# on windows it is OK to rely on existence of the pipe file
if not transport_exists(self.transport_file):
time.sleep(0.01)
else:
break
self.assertTrue(transport_exists(self.transport_file))
def tearDown(self):
try:
with NailgunConnection(
self.transport_address,
cwd=os.getcwd(),
stderr=None,
stdin=None,
stdout=None) as c:
c.send_command('ng-stop')
except NailgunException as e:
# stopping server is a best effort
# if something wrong has happened, we will kill it anyways
pass
# Python2 compatible wait with timeout
process_exit_code = None
for _ in range(0, 500):
process_exit_code = self.ng_server_process.poll()
if process_exit_code is not None:
break
time.sleep(0.02) # 1 second total
if process_exit_code is None:
# some test has failed, ng-server was not stopped. killing it
self.ng_server_process.kill()
debug_logs = os.environ.get('DEBUG_LOGS') or ''
if debug_logs != '':
with open(self.log_file, 'r') as log_file:
print('NAILGUN SERVER LOG:\n')
print(log_file.read())
shutil.rmtree(self.tmpdir)
class TestNailgunConnectionMain(TestNailgunConnection):
def __init__(self, *args, **kwargs):
super(TestNailgunConnectionMain, self).__init__(*args, **kwargs)
def test_nailgun_stats(self):
output = StringIO()
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output) as c:
exit_code = c.send_command('ng-stats')
self.assertEqual(exit_code, 0)
actual_out = output.getvalue().strip()
expected_out = 'com.martiansoftware.nailgun.builtins.NGServerStats: 1/1'
self.assertEqual(actual_out, expected_out)
def test_nailgun_exit_code(self):
output = StringIO()
expected_exit_code = 10
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output) as c:
exit_code = c.send_command('com.martiansoftware.nailgun.examples.Exit', [str(expected_exit_code)])
self.assertEqual(exit_code, expected_exit_code)
def test_nailgun_stdin(self):
lines = [str(i) for i in range(100)]
echo = '\n'.join(lines)
output = StringIO()
input = StringIO(echo)
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=input,
stdout=output) as c:
exit_code = c.send_command('com.martiansoftware.nailgun.examples.Echo')
self.assertEqual(exit_code, 0)
actual_out = output.getvalue().strip()
self.assertEqual(actual_out, echo)
def test_nailgun_default_streams(self):
with NailgunConnection(self.transport_address) as c:
exit_code = c.send_command('ng-stats')
self.assertEqual(exit_code, 0)
def test_nailgun_heartbeats(self):
output = StringIO()
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output,
heartbeat_interval_sec=0.1) as c:
# just run Heartbeat nail for 5 seconds. During this period there should be
# heartbeats received and printed back
exit_code = c.send_command('com.martiansoftware.nailgun.examples.Heartbeat', ['5000'])
self.assertTrue(output.getvalue().count('H') > 10)
def test_nailgun_no_heartbeat(self):
output = StringIO()
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output,
heartbeat_interval_sec=0) as c:
exit_code = c.send_command('com.martiansoftware.nailgun.examples.Heartbeat', ['3000'])
self.assertTrue(output.getvalue().count('H') == 0)
def test_stress_nailgun_socket_close_without_race_condition(self):
output = StringIO()
for i in range (1000):
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output,
heartbeat_interval_sec=0.001) as c:
exit_code = c.send_command('com.martiansoftware.nailgun.examples.Heartbeat', ['10'])
self.assertEqual(exit_code, 0)
class TestNailgunConnectionSmallHeartbeatTimeout(TestNailgunConnection):
def __init__(self, *args, **kwargs):
super(TestNailgunConnectionSmallHeartbeatTimeout, self).__init__(*args, **kwargs)
def setUp(self):
self.heartbeat_timeout_ms = 1000
super(TestNailgunConnectionSmallHeartbeatTimeout, self).setUp()
def test_nailgun_disconnect(self):
"""
We should disconnect before time elapses because of configuration:
Heartbeats are sent every 5 secs
Server expects to look for disconnects if no hearbeat is received in 1 sec
Server runs for 30 sec given we still have heartbeats, so it should output about 6 'H'
We assert that number of 'H' is smaller
"""
output = StringIO()
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output,
heartbeat_interval_sec=5) as c:
exit_code = c.send_command('com.martiansoftware.nailgun.examples.Heartbeat', ['30000'])
self.assertTrue(output.getvalue().count('H') < 3)
if __name__ == '__main__':
was_successful = unittest.main(exit=False).result.wasSuccessful()
if not was_successful:
sys.exit(1)
|
import subprocess
import os
import time
import unittest
import tempfile
import shutil
import uuid
import sys
from pynailgun import NailgunException, NailgunConnection
is_py2 = sys.version[0] == '2'
if is_py2:
from StringIO import StringIO
else:
from io import StringIO
if os.name == 'posix':
def transport_exists(transport_file):
return os.path.exists(transport_file)
if os.name == 'nt':
import ctypes
from ctypes.wintypes import WIN32_FIND_DATAW as WIN32_FIND_DATA
INVALID_HANDLE_VALUE = -1
FindFirstFile = ctypes.windll.kernel32.FindFirstFileW
FindClose = ctypes.windll.kernel32.FindClose
# on windows os.path.exists doen't allow to check reliably that a pipe exists
# (os.path.exists tries to open connection to a pipe)
def transport_exists(transport_path):
wfd = WIN32_FIND_DATA()
handle = FindFirstFile(transport_path, ctypes.byref(wfd))
result = handle != INVALID_HANDLE_VALUE
FindClose(handle)
return result
class TestNailgunConnection(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestNailgunConnection, self).__init__(*args, **kwargs)
self.heartbeat_timeout_ms = 10000
def setUp(self):
self.setUpTransport()
self.startNailgun()
def setUpTransport(self):
self.tmpdir = tempfile.mkdtemp()
if os.name == 'posix':
self.transport_file = os.path.join(self.tmpdir, 'sock')
self.transport_address = 'local:{0}'.format(self.transport_file)
else:
pipe_name = u'nailgun-test-{0}'.format(uuid.uuid4().hex)
self.transport_address = u'local:{0}'.format(pipe_name)
self.transport_file = u'\\\\.\\pipe\{0}'.format(pipe_name)
def getClassPath(self):
cp = [
'nailgun-server/target/nailgun-server-0.9.3-SNAPSHOT-uber.jar',
'nailgun-examples/target/nailgun-examples-0.9.3-SNAPSHOT.jar',
]
if os.name == 'nt':
return ';'.join(cp)
return ':'.join(cp)
def startNailgun(self):
if os.name == 'posix':
def preexec_fn():
# Close any open file descriptors to further separate buckd from its
# invoking context (e.g. otherwise we'd hang when running things like
# `ssh localhost buck clean`).
dev_null_fd = os.open("/dev/null", os.O_RDWR)
os.dup2(dev_null_fd, 0)
os.dup2(dev_null_fd, 2)
os.close(dev_null_fd)
creationflags = 0
else:
preexec_fn = None
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx#DETACHED_PROCESS
DETACHED_PROCESS = 0x00000008
creationflags = DETACHED_PROCESS
stdout = None
if os.name == 'posix':
stdout=subprocess.PIPE
log_config_file = os.path.join(self.tmpdir, 'logging.properties')
self.log_file = os.path.join(self.tmpdir, 'test_ng.log')
with open(log_config_file, 'w') as config_file:
config_file.write('handlers = java.util.logging.FileHandler\n')
config_file.write('.level = ALL\n')
config_file.write('java.util.logging.FileHandler.level = ALL\n')
config_file.write('java.util.logging.FileHandler.pattern = ' + self.log_file + '\n')
config_file.write('java.util.logging.FileHandler.count = 1\n')
config_file.write('java.util.logging.FileHandler.formatter = java.util.logging.SimpleFormatter\n')
cmd = ['java', '-Djna.nosys=true', '-Djava.util.logging.config.file=' + log_config_file, '-classpath', self.getClassPath()]
debug_mode = os.environ.get('DEBUG_MODE') or ''
if debug_mode != '':
suspend = 'n' if debug_mode == '2' else 'y'
cmd.append('-agentlib:jdwp=transport=dt_socket,address=localhost:8888,server=y,suspend=' + suspend)
cmd = cmd + ['com.martiansoftware.nailgun.NGServer', self.transport_address, str(self.heartbeat_timeout_ms)]
self.ng_server_process = subprocess.Popen(
cmd,
preexec_fn=preexec_fn,
creationflags=creationflags,
stdout=stdout,
)
self.assertIsNone(self.ng_server_process.poll())
if os.name == 'posix':
# on *nix we have to wait for server to be ready to accept connections
while True:
the_first_line = str(self.ng_server_process.stdout.readline().strip())
if "NGServer" in the_first_line and "started" in the_first_line:
break
if the_first_line is None or the_first_line == '':
break
else:
for _ in range(0, 600):
# on windows it is OK to rely on existence of the pipe file
if not transport_exists(self.transport_file):
time.sleep(0.01)
else:
break
self.assertTrue(transport_exists(self.transport_file))
def tearDown(self):
try:
with NailgunConnection(
self.transport_address,
cwd=os.getcwd(),
stderr=None,
stdin=None,
stdout=None) as c:
c.send_command('ng-stop')
except NailgunException as e:
# stopping server is a best effort
# if something wrong has happened, we will kill it anyways
pass
# Python2 compatible wait with timeout
process_exit_code = None
for _ in range(0, 500):
process_exit_code = self.ng_server_process.poll()
if process_exit_code is not None:
break
time.sleep(0.02) # 1 second total
if process_exit_code is None:
# some test has failed, ng-server was not stopped. killing it
self.ng_server_process.kill()
debug_logs = os.environ.get('DEBUG_LOGS') or ''
if debug_logs != '':
with open(self.log_file, 'r') as log_file:
print('NAILGUN SERVER LOG:\n')
print(log_file.read())
shutil.rmtree(self.tmpdir)
class TestNailgunConnectionMain(TestNailgunConnection):
def __init__(self, *args, **kwargs):
super(TestNailgunConnectionMain, self).__init__(*args, **kwargs)
def test_nailgun_stats(self):
output = StringIO()
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output) as c:
exit_code = c.send_command('ng-stats')
self.assertEqual(exit_code, 0)
actual_out = output.getvalue().strip()
expected_out = 'com.martiansoftware.nailgun.builtins.NGServerStats: 1/1'
self.assertEqual(actual_out, expected_out)
def test_nailgun_exit_code(self):
output = StringIO()
expected_exit_code = 10
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output) as c:
exit_code = c.send_command('com.martiansoftware.nailgun.examples.Exit', [str(expected_exit_code)])
self.assertEqual(exit_code, expected_exit_code)
def test_nailgun_stdin(self):
lines = [str(i) for i in range(100)]
echo = '\n'.join(lines)
output = StringIO()
input = StringIO(echo)
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=input,
stdout=output) as c:
exit_code = c.send_command('com.martiansoftware.nailgun.examples.Echo')
self.assertEqual(exit_code, 0)
actual_out = output.getvalue().strip()
self.assertEqual(actual_out, echo)
def test_nailgun_default_streams(self):
with NailgunConnection(self.transport_address) as c:
exit_code = c.send_command('ng-stats')
self.assertEqual(exit_code, 0)
def test_nailgun_heartbeats(self):
output = StringIO()
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output,
heartbeat_interval_sec=0.1) as c:
# just run Heartbeat nail for 5 seconds. During this period there should be
# heartbeats received and printed back
exit_code = c.send_command('com.martiansoftware.nailgun.examples.Heartbeat', ['5000'])
self.assertTrue(output.getvalue().count('H') > 10)
def test_nailgun_no_heartbeat(self):
output = StringIO()
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output,
heartbeat_interval_sec=0) as c:
exit_code = c.send_command('com.martiansoftware.nailgun.examples.Heartbeat', ['3000'])
self.assertTrue(output.getvalue().count('H') == 0)
def test_stress_nailgun_socket_close_without_race_condition(self):
output = StringIO()
for i in range (1000):
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output,
heartbeat_interval_sec=0.001) as c:
exit_code = c.send_command('com.martiansoftware.nailgun.examples.Heartbeat', ['10'])
self.assertEqual(exit_code, 0)
class TestNailgunConnectionSmallHeartbeatTimeout(TestNailgunConnection):
def __init__(self, *args, **kwargs):
super(TestNailgunConnectionSmallHeartbeatTimeout, self).__init__(*args, **kwargs)
def setUp(self):
self.heartbeat_timeout_ms = 1000
super(TestNailgunConnectionSmallHeartbeatTimeout, self).setUp()
def test_nailgun_disconnect(self):
"""
We should disconnect before time elapses because of configuration:
Heartbeats are sent every 5 secs
Server expects to look for disconnects if no hearbeat is received in 1 sec
Server runs for 30 sec given we still have heartbeats, so it should output about 6 'H'
We assert that number of 'H' is smaller
"""
output = StringIO()
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output,
heartbeat_interval_sec=5) as c:
exit_code = c.send_command('com.martiansoftware.nailgun.examples.Heartbeat', ['30000'])
self.assertTrue(output.getvalue().count('H') < 3)
if __name__ == '__main__':
was_successful = unittest.main(exit=False).result.wasSuccessful()
if not was_successful:
sys.exit(1)
|
en
| 0.917393
|
# on windows os.path.exists doen't allow to check reliably that a pipe exists # (os.path.exists tries to open connection to a pipe) # Close any open file descriptors to further separate buckd from its # invoking context (e.g. otherwise we'd hang when running things like # `ssh localhost buck clean`). # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx#DETACHED_PROCESS # on *nix we have to wait for server to be ready to accept connections # on windows it is OK to rely on existence of the pipe file # stopping server is a best effort # if something wrong has happened, we will kill it anyways # Python2 compatible wait with timeout # 1 second total # some test has failed, ng-server was not stopped. killing it # just run Heartbeat nail for 5 seconds. During this period there should be # heartbeats received and printed back We should disconnect before time elapses because of configuration: Heartbeats are sent every 5 secs Server expects to look for disconnects if no hearbeat is received in 1 sec Server runs for 30 sec given we still have heartbeats, so it should output about 6 'H' We assert that number of 'H' is smaller
| 2.112868
| 2
|
_scrap/_naver_stock.py
|
hopelife/mp_scraper
| 0
|
6628943
|
<reponame>hopelife/mp_scraper<gh_stars>0
import os, sys
import math
import copy
import time
from datetime import datetime
import re
# import requests
import urllib
import lxml.html as ht
# import lxml.etree as et
##------------------------------------------------------------
sys.path.append(os.path.join(os.path.dirname(__file__), '../_public')) ## Note: 현재 디렉토리 기준 상대 경로 설정
from utils_basic import (
_create_folder,
_read_file,
_file_to_json,
_json_to_file,
_to_lists,
_to_digit,
_divide_list,
_fn
)
from utils_scraping import (
_root,
_remove_punc,
_pages_by_pagination,
_scrape_list_pages,
_extract_values,
_scrape_detail_page,
_scrape_full_html
)
from scrap_selenium import (
_selenium,
_source,
_wait,
_login,
)
# sys.path.append(os.path.join(os.path.abspath('../staff')))
# from ScrapBySelenium import ScrapBySelenium
_base_url = 'https://m.stock.naver.com'
TODAY = datetime.now().strftime("%Y%m%d")
##
##----------------------------------------------------------
def scrap_naver_total(shcode='336370'):
"""
shcode의 종목에 대한 '종합/투자자별 매매동향/...' 데이터 scrap
"""
# url = f"https://m.stock.naver.com/item/main.nhn#/stocks/{shcode}/total"
url = f"https://m.stock.naver.com/index.html#/domestic/stock/{shcode}/total"
browser = _selenium(url=url, headless=False)
button = _wait(xpath='.//*[@id="content"]//div[@class="VStockInfo_article__3dWiQ"]/a', driver=browser)
if not button:
print(f"페이지 로딩 실패")
return False
button.click() ## 종목 정보 더보기
html = _source(driver=browser, xpath='.//*[@id="content"]')
root = _root(html)
# ## NOTE: N증권 / 국내증시 / 종합
# info = root.xpath('.//ul[@class="VStockInfo_list__1Hfnb"]')[0]
# values = {
# 'keys': {
# 'xpath': './/li[contains(@class, "VStockInfo_item__1jFNs")]/div/strong',
# 'target': 'text',
# },
# 'vals': {
# 'xpath': './/li[contains(@class, "VStockInfo_item__1jFNs")]/div/span',
# 'target': 'text',
# },
# }
# r = _extract_values(info, values, _zip=None)
# print({key: _to_digit(val) for key, val in zip(r['keys'], r['vals'])})
## NOTE: N증권 / 국내증시 / 투자자별 매매동향
button = _wait(xpath='.//*[@id="content"]//div[@class="VTableTrend_boxMore__1EVMo"]/a[1]', driver=browser)
if not button:
print(f"페이지 로딩 실패")
return False
button.click() ## 매매동향 더보기
info = root.xpath('.//div[@class="VTableTrend_inner__1Crkx"]')[0]
values = {
'keys': {
'xpath': './table/thead/tr/th',
'target': 'text'
},
'vals': {
'xpath': './table/tbody/tr/td',
'target': 'content'
}
}
r = _extract_values(info, values, _zip=None)
n = len(r['keys']) ## NOTE: 열column 수
vals = [val if i%n == 0 else _to_digit(val[:len(val)//2]) if i%n==n-2 else _to_digit(val) for i, val in enumerate(r['vals'])]
rows = [r['keys']] + _divide_list(vals, n)
print(f"투자동향: {rows}")
# ## NOTE: 동일 업종 비교
# xpath = '//div[contains(@class, "compare")]/a'
# if s.wait(xpath, max_wait=3) != -1: # '동일 업종 비교'가 있는 경우
# upjong = s.attribute_value(xpath, "href").split('=')[-1]
# output['업종번호'] = upjong
# ## 컨센서스
# xpath = '//span[contains(@class, "data_lyr")]'
# if s.check_element(xpath): # NOTE: 컨센서스가 있는 경우
# trade_weight = s._convert_to_float(s.find_element(xpath).text) # NOTE: 매수.매도 점수
# goal_price = s._convert_to_float(s.find_element('//span[@class="goal_stock"]/em').text) # NOTE: 목표가
# output['매매추천'] = trade_weight
# output['목표주가'] = goal_price
# s.close() # NOTE: selenium browser close
# return output
# def scrap_naver_upjong():
# """
# 업종 상승률
# """
# url = "https://m.stock.naver.com/sise/siseList.nhn?menu=upjong"
# s = ScrapBySelenium(url=url)
# # wait_xpath = '//span[@class="u_pg_area"]/span[contains(@class, "u_pg_txt")]'
# wait_xpath = '//span[@class="u_pg_total"]'
# s.wait(wait_xpath)
# total = s._convert_to_float(s.find_element_text(wait_xpath))
# wait_xpath = '//span[@class="u_pg_area"]/span[contains(@class, "u_pg_txt")]'
# s.click(xpath=wait_xpath) # 버튼 펼치기
# output = []
# for i in range(0, total):
# gap_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//span[1]'
# name_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//strong[@class="stock_item"]'
# no_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//a[1]'
# # <a href="/sise/siseGroupDetail.nhn?menu=upjong&no=218" class="btn_detail" onclick="nclk(this, 'mil.cat', '', '');">상세 목록 보기</a>
# name = s.find_element(name_xpath).text
# no = s.attribute_value(no_xpath, 'href').split('=')[-1]
# gap = s._convert_to_float(s.find_element(gap_xpath).text)
# print(f"{name}, {no}, {gap}")
# output.append({'업종명': name, '업종번호': no, '업종상승률': gap})
# s.close()
# return output
if __name__ == '__main__':
## NOTE: 테스트
scrap_naver_total(shcode='336370')
## NOTE: shcode의 종목에 대한 '종합/투자자별 매매동향/업종번호/'
# t = scrap_naver_total(shcode='336370')
# print(f"{t}")
## NOTE: 업종별 업종명/업종번호/상승률
# u = scrap_naver_upjong()
# print(f"{u}")
## NOTE: file
# path = './naver_sise_rise_table_bak.html'
# path = './naver_sise_rise_table.html'
# root = _tree_from_file(path=path)
# # text = _text_by_xpath(root, xpath='.//div[@class="choice_lt"]/div')
# # text = _text_by_xpath(root, xpath='.//th')
# result = []
# for i in range(3, 13):
# texts = _texts_by_xpath(root, xpath=f'.//table[@class="type_2"]/tbody/tr[{i}]/td')
# if len(texts) > 2:
# result.append(texts)
# print(f"result: {result}")
# # print(f"{[el.text for el in root.findall('.//country//rank')]}")
# ## NOTE: naver_stock_m_domestic_upper_kospi
# path = './naver_stock_m_domestic_upper_kospi.html'
# root = _tree_from_file(path=path)
# result = []
# for i in range(1, 10):
# texts = _texts_by_xpath(root, xpath=f'.//table/tbody//tr[{i}]/td')
# if len(texts) > 2:
# result.append(texts)
# print(f"result: {result}")
## TODO:
## naver 업종 코드(page serial)
# https://m.stock.naver.com/sise/siseGroupDetail.nhn?menu=upjong&no=218
# # 네이버
# N증권 > 국내증시
# ### 종합
# - https://m.stock.naver.com/item/main.nhn#/stocks/336370/total
# 전일
# 시가
# 고가
# 저가
# 거래량
# 대금
# 시총
# 외인소진율
# 52주최고
# 52주최저
# PER
# EPS
# BPS
# 배당수익률
# 주당배당금
# ### 토론
# - https://m.stock.naver.com/item/main.nhn#/stocks/336370/discuss
# ### 뉴스.공시
# #### 종목뉴스
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/news
# #### 공시정보
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/notice
# #### IR정보
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/ir
# ### 시세.호가
# #### 일별시세
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/price
# #### 5단계 호가
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/ask
# ### 재무
# #### 연간실적
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/annual
# #### 분기실적
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/quarter
# #### 비재무정보
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/nonfinance
# ## 홈
# ### 관심종목
# ### 트렌드 랭킹
# ## 시장지표
# ### 주요
# ### 환율
# ### 에너지
# ### 금속
# ### 금리
# ### 농축산물
# ## 국내
# ### 시가총액
# ### 업종
# ### 테마
# ### 그룹
# ### 인기검색
# ### 배당
# ### 거래상위
# ### 상한가
# ###
# 컨센서스
# 컨센서스
# 업종
# 테마
# 그룹
# 거래상위
# https://m.stock.naver.com/sise/siseList.nhn?menu=quant&sosok=0
# 상한가
# 상승
# 하락
# 관리
|
import os, sys
import math
import copy
import time
from datetime import datetime
import re
# import requests
import urllib
import lxml.html as ht
# import lxml.etree as et
##------------------------------------------------------------
sys.path.append(os.path.join(os.path.dirname(__file__), '../_public')) ## Note: 현재 디렉토리 기준 상대 경로 설정
from utils_basic import (
_create_folder,
_read_file,
_file_to_json,
_json_to_file,
_to_lists,
_to_digit,
_divide_list,
_fn
)
from utils_scraping import (
_root,
_remove_punc,
_pages_by_pagination,
_scrape_list_pages,
_extract_values,
_scrape_detail_page,
_scrape_full_html
)
from scrap_selenium import (
_selenium,
_source,
_wait,
_login,
)
# sys.path.append(os.path.join(os.path.abspath('../staff')))
# from ScrapBySelenium import ScrapBySelenium
_base_url = 'https://m.stock.naver.com'
TODAY = datetime.now().strftime("%Y%m%d")
##
##----------------------------------------------------------
def scrap_naver_total(shcode='336370'):
"""
shcode의 종목에 대한 '종합/투자자별 매매동향/...' 데이터 scrap
"""
# url = f"https://m.stock.naver.com/item/main.nhn#/stocks/{shcode}/total"
url = f"https://m.stock.naver.com/index.html#/domestic/stock/{shcode}/total"
browser = _selenium(url=url, headless=False)
button = _wait(xpath='.//*[@id="content"]//div[@class="VStockInfo_article__3dWiQ"]/a', driver=browser)
if not button:
print(f"페이지 로딩 실패")
return False
button.click() ## 종목 정보 더보기
html = _source(driver=browser, xpath='.//*[@id="content"]')
root = _root(html)
# ## NOTE: N증권 / 국내증시 / 종합
# info = root.xpath('.//ul[@class="VStockInfo_list__1Hfnb"]')[0]
# values = {
# 'keys': {
# 'xpath': './/li[contains(@class, "VStockInfo_item__1jFNs")]/div/strong',
# 'target': 'text',
# },
# 'vals': {
# 'xpath': './/li[contains(@class, "VStockInfo_item__1jFNs")]/div/span',
# 'target': 'text',
# },
# }
# r = _extract_values(info, values, _zip=None)
# print({key: _to_digit(val) for key, val in zip(r['keys'], r['vals'])})
## NOTE: N증권 / 국내증시 / 투자자별 매매동향
button = _wait(xpath='.//*[@id="content"]//div[@class="VTableTrend_boxMore__1EVMo"]/a[1]', driver=browser)
if not button:
print(f"페이지 로딩 실패")
return False
button.click() ## 매매동향 더보기
info = root.xpath('.//div[@class="VTableTrend_inner__1Crkx"]')[0]
values = {
'keys': {
'xpath': './table/thead/tr/th',
'target': 'text'
},
'vals': {
'xpath': './table/tbody/tr/td',
'target': 'content'
}
}
r = _extract_values(info, values, _zip=None)
n = len(r['keys']) ## NOTE: 열column 수
vals = [val if i%n == 0 else _to_digit(val[:len(val)//2]) if i%n==n-2 else _to_digit(val) for i, val in enumerate(r['vals'])]
rows = [r['keys']] + _divide_list(vals, n)
print(f"투자동향: {rows}")
# ## NOTE: 동일 업종 비교
# xpath = '//div[contains(@class, "compare")]/a'
# if s.wait(xpath, max_wait=3) != -1: # '동일 업종 비교'가 있는 경우
# upjong = s.attribute_value(xpath, "href").split('=')[-1]
# output['업종번호'] = upjong
# ## 컨센서스
# xpath = '//span[contains(@class, "data_lyr")]'
# if s.check_element(xpath): # NOTE: 컨센서스가 있는 경우
# trade_weight = s._convert_to_float(s.find_element(xpath).text) # NOTE: 매수.매도 점수
# goal_price = s._convert_to_float(s.find_element('//span[@class="goal_stock"]/em').text) # NOTE: 목표가
# output['매매추천'] = trade_weight
# output['목표주가'] = goal_price
# s.close() # NOTE: selenium browser close
# return output
# def scrap_naver_upjong():
# """
# 업종 상승률
# """
# url = "https://m.stock.naver.com/sise/siseList.nhn?menu=upjong"
# s = ScrapBySelenium(url=url)
# # wait_xpath = '//span[@class="u_pg_area"]/span[contains(@class, "u_pg_txt")]'
# wait_xpath = '//span[@class="u_pg_total"]'
# s.wait(wait_xpath)
# total = s._convert_to_float(s.find_element_text(wait_xpath))
# wait_xpath = '//span[@class="u_pg_area"]/span[contains(@class, "u_pg_txt")]'
# s.click(xpath=wait_xpath) # 버튼 펼치기
# output = []
# for i in range(0, total):
# gap_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//span[1]'
# name_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//strong[@class="stock_item"]'
# no_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//a[1]'
# # <a href="/sise/siseGroupDetail.nhn?menu=upjong&no=218" class="btn_detail" onclick="nclk(this, 'mil.cat', '', '');">상세 목록 보기</a>
# name = s.find_element(name_xpath).text
# no = s.attribute_value(no_xpath, 'href').split('=')[-1]
# gap = s._convert_to_float(s.find_element(gap_xpath).text)
# print(f"{name}, {no}, {gap}")
# output.append({'업종명': name, '업종번호': no, '업종상승률': gap})
# s.close()
# return output
if __name__ == '__main__':
## NOTE: 테스트
scrap_naver_total(shcode='336370')
## NOTE: shcode의 종목에 대한 '종합/투자자별 매매동향/업종번호/'
# t = scrap_naver_total(shcode='336370')
# print(f"{t}")
## NOTE: 업종별 업종명/업종번호/상승률
# u = scrap_naver_upjong()
# print(f"{u}")
## NOTE: file
# path = './naver_sise_rise_table_bak.html'
# path = './naver_sise_rise_table.html'
# root = _tree_from_file(path=path)
# # text = _text_by_xpath(root, xpath='.//div[@class="choice_lt"]/div')
# # text = _text_by_xpath(root, xpath='.//th')
# result = []
# for i in range(3, 13):
# texts = _texts_by_xpath(root, xpath=f'.//table[@class="type_2"]/tbody/tr[{i}]/td')
# if len(texts) > 2:
# result.append(texts)
# print(f"result: {result}")
# # print(f"{[el.text for el in root.findall('.//country//rank')]}")
# ## NOTE: naver_stock_m_domestic_upper_kospi
# path = './naver_stock_m_domestic_upper_kospi.html'
# root = _tree_from_file(path=path)
# result = []
# for i in range(1, 10):
# texts = _texts_by_xpath(root, xpath=f'.//table/tbody//tr[{i}]/td')
# if len(texts) > 2:
# result.append(texts)
# print(f"result: {result}")
## TODO:
## naver 업종 코드(page serial)
# https://m.stock.naver.com/sise/siseGroupDetail.nhn?menu=upjong&no=218
# # 네이버
# N증권 > 국내증시
# ### 종합
# - https://m.stock.naver.com/item/main.nhn#/stocks/336370/total
# 전일
# 시가
# 고가
# 저가
# 거래량
# 대금
# 시총
# 외인소진율
# 52주최고
# 52주최저
# PER
# EPS
# BPS
# 배당수익률
# 주당배당금
# ### 토론
# - https://m.stock.naver.com/item/main.nhn#/stocks/336370/discuss
# ### 뉴스.공시
# #### 종목뉴스
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/news
# #### 공시정보
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/notice
# #### IR정보
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/ir
# ### 시세.호가
# #### 일별시세
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/price
# #### 5단계 호가
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/ask
# ### 재무
# #### 연간실적
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/annual
# #### 분기실적
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/quarter
# #### 비재무정보
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/nonfinance
# ## 홈
# ### 관심종목
# ### 트렌드 랭킹
# ## 시장지표
# ### 주요
# ### 환율
# ### 에너지
# ### 금속
# ### 금리
# ### 농축산물
# ## 국내
# ### 시가총액
# ### 업종
# ### 테마
# ### 그룹
# ### 인기검색
# ### 배당
# ### 거래상위
# ### 상한가
# ###
# 컨센서스
# 컨센서스
# 업종
# 테마
# 그룹
# 거래상위
# https://m.stock.naver.com/sise/siseList.nhn?menu=quant&sosok=0
# 상한가
# 상승
# 하락
# 관리
|
ko
| 0.242533
|
# import requests # import lxml.etree as et ##------------------------------------------------------------ ## Note: 현재 디렉토리 기준 상대 경로 설정 # sys.path.append(os.path.join(os.path.abspath('../staff'))) # from ScrapBySelenium import ScrapBySelenium ## ##---------------------------------------------------------- shcode의 종목에 대한 '종합/투자자별 매매동향/...' 데이터 scrap # url = f"https://m.stock.naver.com/item/main.nhn#/stocks/{shcode}/total" #/domestic/stock/{shcode}/total" ## 종목 정보 더보기 # ## NOTE: N증권 / 국내증시 / 종합 # info = root.xpath('.//ul[@class="VStockInfo_list__1Hfnb"]')[0] # values = { # 'keys': { # 'xpath': './/li[contains(@class, "VStockInfo_item__1jFNs")]/div/strong', # 'target': 'text', # }, # 'vals': { # 'xpath': './/li[contains(@class, "VStockInfo_item__1jFNs")]/div/span', # 'target': 'text', # }, # } # r = _extract_values(info, values, _zip=None) # print({key: _to_digit(val) for key, val in zip(r['keys'], r['vals'])}) ## NOTE: N증권 / 국내증시 / 투자자별 매매동향 ## 매매동향 더보기 ## NOTE: 열column 수 # ## NOTE: 동일 업종 비교 # xpath = '//div[contains(@class, "compare")]/a' # if s.wait(xpath, max_wait=3) != -1: # '동일 업종 비교'가 있는 경우 # upjong = s.attribute_value(xpath, "href").split('=')[-1] # output['업종번호'] = upjong # ## 컨센서스 # xpath = '//span[contains(@class, "data_lyr")]' # if s.check_element(xpath): # NOTE: 컨센서스가 있는 경우 # trade_weight = s._convert_to_float(s.find_element(xpath).text) # NOTE: 매수.매도 점수 # goal_price = s._convert_to_float(s.find_element('//span[@class="goal_stock"]/em').text) # NOTE: 목표가 # output['매매추천'] = trade_weight # output['목표주가'] = goal_price # s.close() # NOTE: selenium browser close # return output # def scrap_naver_upjong(): # """ # 업종 상승률 # """ # url = "https://m.stock.naver.com/sise/siseList.nhn?menu=upjong" # s = ScrapBySelenium(url=url) # # wait_xpath = '//span[@class="u_pg_area"]/span[contains(@class, "u_pg_txt")]' # wait_xpath = '//span[@class="u_pg_total"]' # s.wait(wait_xpath) # total = s._convert_to_float(s.find_element_text(wait_xpath)) # wait_xpath = '//span[@class="u_pg_area"]/span[contains(@class, "u_pg_txt")]' # s.click(xpath=wait_xpath) # 버튼 펼치기 # output = [] # for i in range(0, total): # gap_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//span[1]' # name_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//strong[@class="stock_item"]' # no_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//a[1]' # # <a href="/sise/siseGroupDetail.nhn?menu=upjong&no=218" class="btn_detail" onclick="nclk(this, 'mil.cat', '', '');">상세 목록 보기</a> # name = s.find_element(name_xpath).text # no = s.attribute_value(no_xpath, 'href').split('=')[-1] # gap = s._convert_to_float(s.find_element(gap_xpath).text) # print(f"{name}, {no}, {gap}") # output.append({'업종명': name, '업종번호': no, '업종상승률': gap}) # s.close() # return output ## NOTE: 테스트 ## NOTE: shcode의 종목에 대한 '종합/투자자별 매매동향/업종번호/' # t = scrap_naver_total(shcode='336370') # print(f"{t}") ## NOTE: 업종별 업종명/업종번호/상승률 # u = scrap_naver_upjong() # print(f"{u}") ## NOTE: file # path = './naver_sise_rise_table_bak.html' # path = './naver_sise_rise_table.html' # root = _tree_from_file(path=path) # # text = _text_by_xpath(root, xpath='.//div[@class="choice_lt"]/div') # # text = _text_by_xpath(root, xpath='.//th') # result = [] # for i in range(3, 13): # texts = _texts_by_xpath(root, xpath=f'.//table[@class="type_2"]/tbody/tr[{i}]/td') # if len(texts) > 2: # result.append(texts) # print(f"result: {result}") # # print(f"{[el.text for el in root.findall('.//country//rank')]}") # ## NOTE: naver_stock_m_domestic_upper_kospi # path = './naver_stock_m_domestic_upper_kospi.html' # root = _tree_from_file(path=path) # result = [] # for i in range(1, 10): # texts = _texts_by_xpath(root, xpath=f'.//table/tbody//tr[{i}]/td') # if len(texts) > 2: # result.append(texts) # print(f"result: {result}") ## TODO: ## naver 업종 코드(page serial) # https://m.stock.naver.com/sise/siseGroupDetail.nhn?menu=upjong&no=218 # # 네이버 # N증권 > 국내증시 # ### 종합 # - https://m.stock.naver.com/item/main.nhn#/stocks/336370/total # 전일 # 시가 # 고가 # 저가 # 거래량 # 대금 # 시총 # 외인소진율 # 52주최고 # 52주최저 # PER # EPS # BPS # 배당수익률 # 주당배당금 # ### 토론 # - https://m.stock.naver.com/item/main.nhn#/stocks/336370/discuss # ### 뉴스.공시 # #### 종목뉴스 # https://m.stock.naver.com/item/main.nhn#/stocks/336370/news # #### 공시정보 # https://m.stock.naver.com/item/main.nhn#/stocks/336370/notice # #### IR정보 # https://m.stock.naver.com/item/main.nhn#/stocks/336370/ir # ### 시세.호가 # #### 일별시세 # https://m.stock.naver.com/item/main.nhn#/stocks/336370/price # #### 5단계 호가 # https://m.stock.naver.com/item/main.nhn#/stocks/336370/ask # ### 재무 # #### 연간실적 # https://m.stock.naver.com/item/main.nhn#/stocks/336370/annual # #### 분기실적 # https://m.stock.naver.com/item/main.nhn#/stocks/336370/quarter # #### 비재무정보 # https://m.stock.naver.com/item/main.nhn#/stocks/336370/nonfinance # ## 홈 # ### 관심종목 # ### 트렌드 랭킹 # ## 시장지표 # ### 주요 # ### 환율 # ### 에너지 # ### 금속 # ### 금리 # ### 농축산물 # ## 국내 # ### 시가총액 # ### 업종 # ### 테마 # ### 그룹 # ### 인기검색 # ### 배당 # ### 거래상위 # ### 상한가 # ### # 컨센서스 # 컨센서스 # 업종 # 테마 # 그룹 # 거래상위 # https://m.stock.naver.com/sise/siseList.nhn?menu=quant&sosok=0 # 상한가 # 상승 # 하락 # 관리
| 2.473153
| 2
|
data/train/python/16b9d2b0b3e3ea014120236fc6a289879ecb9b64repositorymap.py
|
harshp8l/deep-learning-lang-detection
| 84
|
6628944
|
<gh_stars>10-100
#
# Copyright 2012 SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Read configuration files for mapping Git repositories to CVS locations
# for synchronization, including branch mappings. Format is ini-style:
# basename of git repositories must be unique
import config
import os
import shlex
class RepositoryConfig(config.Config):
def __init__(self, configFileName):
config.Config.__init__(self, configFileName)
# enforce uniqueness
self.repos = {}
for r in self.getRepositories():
name = self.getRepositoryName(r)
if name in self.repos:
raise KeyError('Duplicate repository name %s: %s and %s'
%(name, self.repos[name], r))
self.repos[name] = r
self.requireAbsolutePaths('skeleton')
def getRepositories(self):
return set(self.sections()) - set(('GLOBAL',))
@staticmethod
def getRepositoryName(repository):
return os.path.basename(repository)
def getRepositoryByName(self, repositoryName):
if self.has_section(repositoryName):
return repositoryName
return self.repos[repositoryName]
def getCVSRoot(self, repository):
return self.getGlobalFallback(repository, 'cvsroot')
def getGitRef(self, repository):
gitroot = self.getGlobalFallback(repository, 'gitroot')
# this test needs to handle more cases
if gitroot.startswith('/') or '://' in gitroot:
# directory paths, http/s
return '/'.join((gitroot, repository))
# username@host:path
return ':'.join((gitroot, repository))
def getCVSPath(self, repository):
return self.get(repository, 'cvspath')
def getSkeleton(self, repository):
return self.getGlobalFallback(repository, 'skeleton', error=False)
def getBranchFrom(self, repository):
return self.getOptional(repository, 'branchfrom')
def getBranchPrefix(self, repository, branch):
optname = 'prefix.'+branch
return self.getOptional(repository, optname)
def getGitLogOptions(self, repository, branch):
optname = 'gitlog.'+branch
return self.getOptional(repository, optname)
def getImportBranchMaps(self, repository):
'return: [(cvsbranch, gitbranch), ...]'
return [(x[4:], 'cvs-' + self.get(repository, x))
for x in sorted(self.options(repository))
if x.startswith('cvs.')]
def getCVSVariables(self, repository):
'return: ["VARIABLE=value", ...]'
return ['='.join((x[7:], '' + self.getGlobalFallback(repository, x)))
for x in sorted(set(self.options(repository) +
self.options('GLOBAL')))
if x.startswith('cvsvar.')]
def getExportBranchMaps(self, repository):
'return: [(gitbranch, cvsbranch, exportbranch), ...]'
return [(x[4:], self.get(repository, x), 'export-' + x[4:])
for x in sorted(self.options(repository))
if x.startswith('git.')]
def getMergeBranchMaps(self, repository):
'return: {sourcebranch, set(targetbranch, targetbranch, ...), ...}'
return dict((x[6:], set(self.get(repository, x).strip().split()))
for x in sorted(self.options(repository))
if x.startswith('merge.'))
def getHook(self, type, when, repository):
return self.getGlobalFallback(repository, when+'hook.'+type, error=False)
def getHookDir(self, direction, type, when, repository):
if direction:
return self.getGlobalFallback(repository, when+'hook.'+direction+'.'+type,
error=False)
return None
def getHookBranch(self, type, when, repository, branch):
return self.getGlobalFallback(repository, when+'hook.'+type+'.'+branch,
error=False)
def getHookDirBranch(self, direction, type, when, repository, branch):
if direction:
return self.getGlobalFallback(repository, when+'hook.'+direction+'.'+type+'.'+branch,
error=False)
return None
def getHooksBranch(self, type, direction, when, repository, branch):
return [shlex.split(x) for x in
(self.getHook(type, when, repository),
self.getHookDir(direction, type, when, repository),
self.getHookBranch(type, when, repository, branch),
self.getHookDirBranch(direction, type, when, repository, branch))
if x]
def getGitImpPreHooks(self, repository, branch):
return self.getHooksBranch('git', 'imp', 'pre', repository, branch)
def getGitImpPostHooks(self, repository, branch):
return self.getHooksBranch('git', 'imp', 'post', repository, branch)
def getGitExpPreHooks(self, repository, branch):
return self.getHooksBranch('git', 'exp', 'pre', repository, branch)
def getGitExpPostHooks(self, repository, branch):
return self.getHooksBranch('git', 'exp', 'post', repository, branch)
def getCVSPreHooks(self, repository, branch):
return self.getHooksBranch('cvs', None, 'pre', repository, branch)
def getCVSPostHooks(self, repository, branch):
return self.getHooksBranch('cvs', None, 'post', repository, branch)
def getEmail(self, repository):
email = self.getGlobalFallback(repository, 'email', error=False)
if email:
return email.split()
return None
def addEmail(self, repository, addresses):
if not addresses:
return
email = self.getGlobalFallback(repository, 'email', error=False)
if email:
email = email.split()
email.extend(addresses)
self.set(repository, 'email', ' '.join(email))
else:
self.set(repository, 'email', ' '.join(addresses))
|
#
# Copyright 2012 SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Read configuration files for mapping Git repositories to CVS locations
# for synchronization, including branch mappings. Format is ini-style:
# basename of git repositories must be unique
import config
import os
import shlex
class RepositoryConfig(config.Config):
def __init__(self, configFileName):
config.Config.__init__(self, configFileName)
# enforce uniqueness
self.repos = {}
for r in self.getRepositories():
name = self.getRepositoryName(r)
if name in self.repos:
raise KeyError('Duplicate repository name %s: %s and %s'
%(name, self.repos[name], r))
self.repos[name] = r
self.requireAbsolutePaths('skeleton')
def getRepositories(self):
return set(self.sections()) - set(('GLOBAL',))
@staticmethod
def getRepositoryName(repository):
return os.path.basename(repository)
def getRepositoryByName(self, repositoryName):
if self.has_section(repositoryName):
return repositoryName
return self.repos[repositoryName]
def getCVSRoot(self, repository):
return self.getGlobalFallback(repository, 'cvsroot')
def getGitRef(self, repository):
gitroot = self.getGlobalFallback(repository, 'gitroot')
# this test needs to handle more cases
if gitroot.startswith('/') or '://' in gitroot:
# directory paths, http/s
return '/'.join((gitroot, repository))
# username@host:path
return ':'.join((gitroot, repository))
def getCVSPath(self, repository):
return self.get(repository, 'cvspath')
def getSkeleton(self, repository):
return self.getGlobalFallback(repository, 'skeleton', error=False)
def getBranchFrom(self, repository):
return self.getOptional(repository, 'branchfrom')
def getBranchPrefix(self, repository, branch):
optname = 'prefix.'+branch
return self.getOptional(repository, optname)
def getGitLogOptions(self, repository, branch):
optname = 'gitlog.'+branch
return self.getOptional(repository, optname)
def getImportBranchMaps(self, repository):
'return: [(cvsbranch, gitbranch), ...]'
return [(x[4:], 'cvs-' + self.get(repository, x))
for x in sorted(self.options(repository))
if x.startswith('cvs.')]
def getCVSVariables(self, repository):
'return: ["VARIABLE=value", ...]'
return ['='.join((x[7:], '' + self.getGlobalFallback(repository, x)))
for x in sorted(set(self.options(repository) +
self.options('GLOBAL')))
if x.startswith('cvsvar.')]
def getExportBranchMaps(self, repository):
'return: [(gitbranch, cvsbranch, exportbranch), ...]'
return [(x[4:], self.get(repository, x), 'export-' + x[4:])
for x in sorted(self.options(repository))
if x.startswith('git.')]
def getMergeBranchMaps(self, repository):
'return: {sourcebranch, set(targetbranch, targetbranch, ...), ...}'
return dict((x[6:], set(self.get(repository, x).strip().split()))
for x in sorted(self.options(repository))
if x.startswith('merge.'))
def getHook(self, type, when, repository):
return self.getGlobalFallback(repository, when+'hook.'+type, error=False)
def getHookDir(self, direction, type, when, repository):
if direction:
return self.getGlobalFallback(repository, when+'hook.'+direction+'.'+type,
error=False)
return None
def getHookBranch(self, type, when, repository, branch):
return self.getGlobalFallback(repository, when+'hook.'+type+'.'+branch,
error=False)
def getHookDirBranch(self, direction, type, when, repository, branch):
if direction:
return self.getGlobalFallback(repository, when+'hook.'+direction+'.'+type+'.'+branch,
error=False)
return None
def getHooksBranch(self, type, direction, when, repository, branch):
return [shlex.split(x) for x in
(self.getHook(type, when, repository),
self.getHookDir(direction, type, when, repository),
self.getHookBranch(type, when, repository, branch),
self.getHookDirBranch(direction, type, when, repository, branch))
if x]
def getGitImpPreHooks(self, repository, branch):
return self.getHooksBranch('git', 'imp', 'pre', repository, branch)
def getGitImpPostHooks(self, repository, branch):
return self.getHooksBranch('git', 'imp', 'post', repository, branch)
def getGitExpPreHooks(self, repository, branch):
return self.getHooksBranch('git', 'exp', 'pre', repository, branch)
def getGitExpPostHooks(self, repository, branch):
return self.getHooksBranch('git', 'exp', 'post', repository, branch)
def getCVSPreHooks(self, repository, branch):
return self.getHooksBranch('cvs', None, 'pre', repository, branch)
def getCVSPostHooks(self, repository, branch):
return self.getHooksBranch('cvs', None, 'post', repository, branch)
def getEmail(self, repository):
email = self.getGlobalFallback(repository, 'email', error=False)
if email:
return email.split()
return None
def addEmail(self, repository, addresses):
if not addresses:
return
email = self.getGlobalFallback(repository, 'email', error=False)
if email:
email = email.split()
email.extend(addresses)
self.set(repository, 'email', ' '.join(email))
else:
self.set(repository, 'email', ' '.join(addresses))
|
en
| 0.819848
|
# # Copyright 2012 SAS Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # Read configuration files for mapping Git repositories to CVS locations # for synchronization, including branch mappings. Format is ini-style: # basename of git repositories must be unique # enforce uniqueness # this test needs to handle more cases # directory paths, http/s # username@host:path
| 2.065977
| 2
|
qiime2/metadata/io.py
|
lizgehret/qiime2
| 0
|
6628945
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import csv
import itertools
import os.path
import re
import numpy as np
import pandas as pd
from qiime2.core.util import find_duplicates
from .base import SUPPORTED_COLUMN_TYPES, FORMATTED_ID_HEADERS, is_id_header
from .metadata import Metadata, MetadataColumn
class MetadataFileError(Exception):
_suffix = (
"There may be more errors present in the metadata file. To get a full "
"report, sample/feature metadata files can be validated with Keemei: "
"https://keemei.qiime2.org\n\nFind details on QIIME 2 metadata "
"requirements here: https://docs.qiime2.org/%s/tutorials/metadata/")
def __init__(self, message, include_suffix=True):
# Lazy import because `qiime2.__release__` is available at runtime but
# not at import time (otherwise the release value could be interpolated
# into `_suffix` in the class definition above).
import qiime2
if include_suffix:
message = message + '\n\n' + self._suffix % qiime2.__release__
super().__init__(message)
class MetadataReader:
def __init__(self, filepath):
if not os.path.isfile(filepath):
raise MetadataFileError(
"Metadata file path doesn't exist, or the path points to "
"something other than a file. Please check that the path "
"exists, has read permissions, and points to a regular file "
"(not a directory): %s" % filepath)
self._filepath = filepath
# Used by `read()` to store an iterator yielding rows with
# leading/trailing whitespace stripped from their cells (this is a
# preprocessing step that should happen with *every* row). The iterator
# protocol is the only guaranteed API on this object.
self._reader = None
def read(self, into, column_types=None):
if column_types is None:
column_types = {}
try:
# Newline settings based on recommendation from csv docs:
# https://docs.python.org/3/library/csv.html#id3
# Ignore BOM on read (but do not write BOM)
with open(self._filepath,
'r', newline='', encoding='utf-8-sig') as fh:
tsv_reader = csv.reader(fh, dialect='excel-tab', strict=True)
self._reader = (self._strip_cell_whitespace(row)
for row in tsv_reader)
header = self._read_header()
directives = self._read_directives(header)
ids, data = self._read_data(header)
except UnicodeDecodeError as e:
if ('0xff in position 0' in str(e)
or '0xfe in position 0' in str(e)):
raise MetadataFileError(
"Metadata file must be encoded as UTF-8 or ASCII, found "
"UTF-16. If this file is from Microsoft Excel, save "
"as a plain text file, not 'UTF-16 Unicode'")
raise MetadataFileError(
"Metadata file must be encoded as UTF-8 or ASCII. The "
"following error occurred when decoding the file:\n\n%s" % e)
finally:
self._reader = None
index = pd.Index(ids, name=header[0], dtype=object)
df = pd.DataFrame(data, columns=header[1:], index=index, dtype=object)
for name, type in column_types.items():
if name not in df.columns:
raise MetadataFileError(
"Column name %r specified in `column_types` is not a "
"column in the metadata file." % name)
if type not in SUPPORTED_COLUMN_TYPES:
fmt_column_types = ', '.join(
repr(e) for e in sorted(SUPPORTED_COLUMN_TYPES))
raise MetadataFileError(
"Column name %r specified in `column_types` has an "
"unrecognized column type %r. Supported column types: %s" %
(name, type, fmt_column_types))
resolved_column_types = directives.get('types', {})
resolved_column_types.update(column_types)
try:
# Cast each column to the appropriate dtype based on column type.
df = df.apply(self._cast_column, axis='index',
column_types=resolved_column_types)
except MetadataFileError as e:
# HACK: If an exception is raised within `DataFrame.apply`, pandas
# adds an extra tuple element to `e.args`, making the original
# error message difficult to read because a tuple is repr'd instead
# of a string. To work around this, we catch and reraise a
# MetadataFileError with the original error message. We use
# `include_suffix=False` to avoid adding another suffix to the
# error message we're reraising.
msg = e.args[0]
raise MetadataFileError(msg, include_suffix=False)
try:
return into(df)
except Exception as e:
raise MetadataFileError(
"There was an issue with loading the metadata file:\n\n%s" % e)
def _read_header(self):
header = None
for row in self._reader:
if self._is_header(row):
header = row
break
elif self._is_comment(row):
continue
elif self._is_empty(row):
continue
elif self._is_directive(row):
raise MetadataFileError(
"Found directive %r while searching for header. "
"Directives may only appear immediately after the header."
% row[0])
else:
raise MetadataFileError(
"Found unrecognized ID column name %r while searching for "
"header. The first column name in the header defines the "
"ID column, and must be one of these values:\n\n%s\n\n"
"NOTE: Metadata files must contain tab-separated values." %
(row[0], FORMATTED_ID_HEADERS))
if header is None:
raise MetadataFileError(
"Failed to locate header. The metadata file may be empty, or "
"consists only of comments or empty rows.")
# Trim trailing empty cells from header.
data_extent = None
for idx, cell in enumerate(header):
if cell != '':
data_extent = idx
header = header[:data_extent+1]
# Basic validation to 1) fail early before processing entire file; and
# 2) make some basic guarantees about the header for things in this
# class that use the header as part of reading the file.
column_names = set(header)
if '' in column_names:
raise MetadataFileError(
"Found at least one column without a name in the header. Each "
"column must be named.")
elif len(header) != len(column_names):
duplicates = find_duplicates(header)
raise MetadataFileError(
"Column names must be unique. The following column names are "
"duplicated: %s" %
(', '.join(repr(e) for e in sorted(duplicates))))
# Skip the first element of the header because we know it is a valid ID
# header. The other column names are validated to ensure they *aren't*
# valid ID headers.
for column_name in header[1:]:
if is_id_header(column_name):
raise MetadataFileError(
"Metadata column name %r conflicts with a name reserved "
"for the ID column header. Reserved ID column headers:"
"\n\n%s" % (column_name, FORMATTED_ID_HEADERS))
return header
def _read_directives(self, header):
directives = {}
for row in self._reader:
if not self._is_directive(row):
self._reader = itertools.chain([row], self._reader)
break
if not self._is_column_types_directive(row):
raise MetadataFileError(
"Unrecognized directive %r. Only the #q2:types "
"directive is supported at this time." % row[0])
if 'types' in directives:
raise MetadataFileError(
"Found duplicate directive %r. Each directive may "
"only be specified a single time." % row[0])
row = self._match_header_len(row, header)
column_types = {}
for column_name, column_type in zip(header[1:], row[1:]):
if column_type:
type_nocase = column_type.lower()
if type_nocase in SUPPORTED_COLUMN_TYPES:
column_types[column_name] = type_nocase
else:
fmt_column_types = ', '.join(
repr(e) for e in sorted(SUPPORTED_COLUMN_TYPES))
raise MetadataFileError(
"Column %r has an unrecognized column type %r "
"specified in its #q2:types directive. "
"Supported column types (case-insensitive): %s"
% (column_name, column_type, fmt_column_types))
directives['types'] = column_types
return directives
def _read_data(self, header):
ids = []
data = []
for row in self._reader:
if self._is_comment(row):
continue
elif self._is_empty(row):
continue
elif self._is_directive(row):
raise MetadataFileError(
"Found directive %r outside of the directives section of "
"the file. Directives may only appear immediately after "
"the header." % row[0])
elif self._is_header(row):
raise MetadataFileError(
"Metadata ID %r conflicts with a name reserved for the ID "
"column header. Reserved ID column headers:\n\n%s" %
(row[0], FORMATTED_ID_HEADERS))
row = self._match_header_len(row, header)
ids.append(row[0])
data.append(row[1:])
return ids, data
def _strip_cell_whitespace(self, row):
return [cell.strip() for cell in row]
def _match_header_len(self, row, header):
row_len = len(row)
header_len = len(header)
if row_len < header_len:
# Pad row with empty cells to match header length.
row = row + [''] * (header_len - row_len)
elif row_len > header_len:
trailing_row = row[header_len:]
if not self._is_empty(trailing_row):
raise MetadataFileError(
"Metadata row contains more cells than are declared by "
"the header. The row has %d cells, while the header "
"declares %d cells." % (row_len, header_len))
row = row[:header_len]
return row
def _is_empty(self, row):
# `all` returns True for an empty iterable, so this check works for a
# row of zero elements (corresponds to a blank line in the file).
return all((cell == '' for cell in row))
def _is_comment(self, row):
return (
len(row) > 0 and
row[0].startswith('#') and
not self._is_directive(row) and
not self._is_header(row)
)
def _is_header(self, row):
if len(row) == 0:
return False
return is_id_header(row[0])
def _is_directive(self, row):
return len(row) > 0 and row[0].startswith('#q2:')
def _is_column_types_directive(self, row):
return len(row) > 0 and row[0] == '#q2:types'
def _cast_column(self, series, column_types):
if series.name in column_types:
if column_types[series.name] == 'numeric':
return self._to_numeric(series)
else: # 'categorical'
return self._to_categorical(series)
else:
# Infer type
try:
return self._to_numeric(series)
except MetadataFileError:
return self._to_categorical(series)
def _to_categorical(self, series):
# Replace empty strings with `None` to force the series to remain
# dtype=object (this only matters if the series consists solely of
# missing data). Replacing with np.nan and casting to dtype=object
# won't retain the correct dtype in the resulting dataframe
# (`DataFrame.apply` seems to force series consisting solely of np.nan
# to dtype=float64, even if dtype=object is specified.
#
# To replace a value with `None`, the following invocation of
# `Series.replace` must be used because `None` is a sentinel:
# https://stackoverflow.com/a/17097397/3776794
return series.replace([''], [None])
def _to_numeric(self, series):
series = series.replace('', np.nan)
is_numeric = series.apply(self._is_numeric)
if is_numeric.all():
return pd.to_numeric(series, errors='raise')
else:
non_numerics = series[~is_numeric].unique()
raise MetadataFileError(
"Cannot convert metadata column %r to numeric. The following "
"values could not be interpreted as numeric: %s" %
(series.name,
', '.join(repr(e) for e in sorted(non_numerics))))
def _is_numeric(self, value):
return (isinstance(value, float) or
len(_numeric_regex.findall(value)) == 1)
class MetadataWriter:
def __init__(self, metadata):
self._metadata = metadata
def write(self, filepath):
# Newline settings based on recommendation from csv docs:
# https://docs.python.org/3/library/csv.html#id3
# Do NOT write a BOM, hence utf-8 not utf-8-sig
with open(filepath, 'w', newline='', encoding='utf-8') as fh:
tsv_writer = csv.writer(fh, dialect='excel-tab', strict=True)
md = self._metadata
header = [md.id_header]
types_directive = ['#q2:types']
if isinstance(md, Metadata):
for name, props in md.columns.items():
header.append(name)
types_directive.append(props.type)
elif isinstance(md, MetadataColumn):
header.append(md.name)
types_directive.append(md.type)
else:
raise NotImplementedError
tsv_writer.writerow(header)
tsv_writer.writerow(types_directive)
df = md.to_dataframe()
df.fillna('', inplace=True)
df = df.applymap(self._format)
tsv_writer.writerows(df.itertuples(index=True))
def _format(self, value):
if isinstance(value, str):
return value
elif isinstance(value, float):
# Use fixed precision or scientific notation as necessary (both are
# roundtrippable in the metadata file format), with up to 15 digits
# *total* precision (i.e. before and after the decimal point),
# rounding if necessary. Trailing zeros or decimal points will not
# be included in the formatted string (e.g. 42.0 will be formatted
# as "42"). A precision of 15 digits is used because that is within
# the 64-bit floating point spec (things get weird after that).
#
# Using repr() and str() each have their own predefined precision
# which varies across Python versions. Using the string formatting
# presentation types (e.g. %g, %f) without specifying a precision
# will usually default to 6 digits past the decimal point, which
# seems a little low.
#
# References:
#
# - https://stackoverflow.com/a/2440786/3776794
# - https://stackoverflow.com/a/2440708/3776794
# - https://docs.python.org/3/library/string.html#
# format-specification-mini-language
# - https://stackoverflow.com/a/20586479/3776794
# - https://drj11.wordpress.com/2007/07/03/python-poor-printing-
# of-floating-point/
return '{0:.15g}'.format(value)
else:
raise NotImplementedError
# Credit: https://stackoverflow.com/a/4703508/3776794
_numeric_pattern = r"""
^[-+]? # optional sign
(?:
(?: \d* \. \d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc
|
(?: \d+ \.? ) # 1. 12. 123. etc 1 12 123 etc
)
# followed by optional exponent part if desired
(?: [Ee] [+-]? \d+ ) ?$
"""
_numeric_regex = re.compile(_numeric_pattern, re.VERBOSE)
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import csv
import itertools
import os.path
import re
import numpy as np
import pandas as pd
from qiime2.core.util import find_duplicates
from .base import SUPPORTED_COLUMN_TYPES, FORMATTED_ID_HEADERS, is_id_header
from .metadata import Metadata, MetadataColumn
class MetadataFileError(Exception):
_suffix = (
"There may be more errors present in the metadata file. To get a full "
"report, sample/feature metadata files can be validated with Keemei: "
"https://keemei.qiime2.org\n\nFind details on QIIME 2 metadata "
"requirements here: https://docs.qiime2.org/%s/tutorials/metadata/")
def __init__(self, message, include_suffix=True):
# Lazy import because `qiime2.__release__` is available at runtime but
# not at import time (otherwise the release value could be interpolated
# into `_suffix` in the class definition above).
import qiime2
if include_suffix:
message = message + '\n\n' + self._suffix % qiime2.__release__
super().__init__(message)
class MetadataReader:
def __init__(self, filepath):
if not os.path.isfile(filepath):
raise MetadataFileError(
"Metadata file path doesn't exist, or the path points to "
"something other than a file. Please check that the path "
"exists, has read permissions, and points to a regular file "
"(not a directory): %s" % filepath)
self._filepath = filepath
# Used by `read()` to store an iterator yielding rows with
# leading/trailing whitespace stripped from their cells (this is a
# preprocessing step that should happen with *every* row). The iterator
# protocol is the only guaranteed API on this object.
self._reader = None
def read(self, into, column_types=None):
if column_types is None:
column_types = {}
try:
# Newline settings based on recommendation from csv docs:
# https://docs.python.org/3/library/csv.html#id3
# Ignore BOM on read (but do not write BOM)
with open(self._filepath,
'r', newline='', encoding='utf-8-sig') as fh:
tsv_reader = csv.reader(fh, dialect='excel-tab', strict=True)
self._reader = (self._strip_cell_whitespace(row)
for row in tsv_reader)
header = self._read_header()
directives = self._read_directives(header)
ids, data = self._read_data(header)
except UnicodeDecodeError as e:
if ('0xff in position 0' in str(e)
or '0xfe in position 0' in str(e)):
raise MetadataFileError(
"Metadata file must be encoded as UTF-8 or ASCII, found "
"UTF-16. If this file is from Microsoft Excel, save "
"as a plain text file, not 'UTF-16 Unicode'")
raise MetadataFileError(
"Metadata file must be encoded as UTF-8 or ASCII. The "
"following error occurred when decoding the file:\n\n%s" % e)
finally:
self._reader = None
index = pd.Index(ids, name=header[0], dtype=object)
df = pd.DataFrame(data, columns=header[1:], index=index, dtype=object)
for name, type in column_types.items():
if name not in df.columns:
raise MetadataFileError(
"Column name %r specified in `column_types` is not a "
"column in the metadata file." % name)
if type not in SUPPORTED_COLUMN_TYPES:
fmt_column_types = ', '.join(
repr(e) for e in sorted(SUPPORTED_COLUMN_TYPES))
raise MetadataFileError(
"Column name %r specified in `column_types` has an "
"unrecognized column type %r. Supported column types: %s" %
(name, type, fmt_column_types))
resolved_column_types = directives.get('types', {})
resolved_column_types.update(column_types)
try:
# Cast each column to the appropriate dtype based on column type.
df = df.apply(self._cast_column, axis='index',
column_types=resolved_column_types)
except MetadataFileError as e:
# HACK: If an exception is raised within `DataFrame.apply`, pandas
# adds an extra tuple element to `e.args`, making the original
# error message difficult to read because a tuple is repr'd instead
# of a string. To work around this, we catch and reraise a
# MetadataFileError with the original error message. We use
# `include_suffix=False` to avoid adding another suffix to the
# error message we're reraising.
msg = e.args[0]
raise MetadataFileError(msg, include_suffix=False)
try:
return into(df)
except Exception as e:
raise MetadataFileError(
"There was an issue with loading the metadata file:\n\n%s" % e)
def _read_header(self):
header = None
for row in self._reader:
if self._is_header(row):
header = row
break
elif self._is_comment(row):
continue
elif self._is_empty(row):
continue
elif self._is_directive(row):
raise MetadataFileError(
"Found directive %r while searching for header. "
"Directives may only appear immediately after the header."
% row[0])
else:
raise MetadataFileError(
"Found unrecognized ID column name %r while searching for "
"header. The first column name in the header defines the "
"ID column, and must be one of these values:\n\n%s\n\n"
"NOTE: Metadata files must contain tab-separated values." %
(row[0], FORMATTED_ID_HEADERS))
if header is None:
raise MetadataFileError(
"Failed to locate header. The metadata file may be empty, or "
"consists only of comments or empty rows.")
# Trim trailing empty cells from header.
data_extent = None
for idx, cell in enumerate(header):
if cell != '':
data_extent = idx
header = header[:data_extent+1]
# Basic validation to 1) fail early before processing entire file; and
# 2) make some basic guarantees about the header for things in this
# class that use the header as part of reading the file.
column_names = set(header)
if '' in column_names:
raise MetadataFileError(
"Found at least one column without a name in the header. Each "
"column must be named.")
elif len(header) != len(column_names):
duplicates = find_duplicates(header)
raise MetadataFileError(
"Column names must be unique. The following column names are "
"duplicated: %s" %
(', '.join(repr(e) for e in sorted(duplicates))))
# Skip the first element of the header because we know it is a valid ID
# header. The other column names are validated to ensure they *aren't*
# valid ID headers.
for column_name in header[1:]:
if is_id_header(column_name):
raise MetadataFileError(
"Metadata column name %r conflicts with a name reserved "
"for the ID column header. Reserved ID column headers:"
"\n\n%s" % (column_name, FORMATTED_ID_HEADERS))
return header
def _read_directives(self, header):
directives = {}
for row in self._reader:
if not self._is_directive(row):
self._reader = itertools.chain([row], self._reader)
break
if not self._is_column_types_directive(row):
raise MetadataFileError(
"Unrecognized directive %r. Only the #q2:types "
"directive is supported at this time." % row[0])
if 'types' in directives:
raise MetadataFileError(
"Found duplicate directive %r. Each directive may "
"only be specified a single time." % row[0])
row = self._match_header_len(row, header)
column_types = {}
for column_name, column_type in zip(header[1:], row[1:]):
if column_type:
type_nocase = column_type.lower()
if type_nocase in SUPPORTED_COLUMN_TYPES:
column_types[column_name] = type_nocase
else:
fmt_column_types = ', '.join(
repr(e) for e in sorted(SUPPORTED_COLUMN_TYPES))
raise MetadataFileError(
"Column %r has an unrecognized column type %r "
"specified in its #q2:types directive. "
"Supported column types (case-insensitive): %s"
% (column_name, column_type, fmt_column_types))
directives['types'] = column_types
return directives
def _read_data(self, header):
ids = []
data = []
for row in self._reader:
if self._is_comment(row):
continue
elif self._is_empty(row):
continue
elif self._is_directive(row):
raise MetadataFileError(
"Found directive %r outside of the directives section of "
"the file. Directives may only appear immediately after "
"the header." % row[0])
elif self._is_header(row):
raise MetadataFileError(
"Metadata ID %r conflicts with a name reserved for the ID "
"column header. Reserved ID column headers:\n\n%s" %
(row[0], FORMATTED_ID_HEADERS))
row = self._match_header_len(row, header)
ids.append(row[0])
data.append(row[1:])
return ids, data
def _strip_cell_whitespace(self, row):
return [cell.strip() for cell in row]
def _match_header_len(self, row, header):
row_len = len(row)
header_len = len(header)
if row_len < header_len:
# Pad row with empty cells to match header length.
row = row + [''] * (header_len - row_len)
elif row_len > header_len:
trailing_row = row[header_len:]
if not self._is_empty(trailing_row):
raise MetadataFileError(
"Metadata row contains more cells than are declared by "
"the header. The row has %d cells, while the header "
"declares %d cells." % (row_len, header_len))
row = row[:header_len]
return row
def _is_empty(self, row):
# `all` returns True for an empty iterable, so this check works for a
# row of zero elements (corresponds to a blank line in the file).
return all((cell == '' for cell in row))
def _is_comment(self, row):
return (
len(row) > 0 and
row[0].startswith('#') and
not self._is_directive(row) and
not self._is_header(row)
)
def _is_header(self, row):
if len(row) == 0:
return False
return is_id_header(row[0])
def _is_directive(self, row):
return len(row) > 0 and row[0].startswith('#q2:')
def _is_column_types_directive(self, row):
return len(row) > 0 and row[0] == '#q2:types'
def _cast_column(self, series, column_types):
if series.name in column_types:
if column_types[series.name] == 'numeric':
return self._to_numeric(series)
else: # 'categorical'
return self._to_categorical(series)
else:
# Infer type
try:
return self._to_numeric(series)
except MetadataFileError:
return self._to_categorical(series)
def _to_categorical(self, series):
# Replace empty strings with `None` to force the series to remain
# dtype=object (this only matters if the series consists solely of
# missing data). Replacing with np.nan and casting to dtype=object
# won't retain the correct dtype in the resulting dataframe
# (`DataFrame.apply` seems to force series consisting solely of np.nan
# to dtype=float64, even if dtype=object is specified.
#
# To replace a value with `None`, the following invocation of
# `Series.replace` must be used because `None` is a sentinel:
# https://stackoverflow.com/a/17097397/3776794
return series.replace([''], [None])
def _to_numeric(self, series):
series = series.replace('', np.nan)
is_numeric = series.apply(self._is_numeric)
if is_numeric.all():
return pd.to_numeric(series, errors='raise')
else:
non_numerics = series[~is_numeric].unique()
raise MetadataFileError(
"Cannot convert metadata column %r to numeric. The following "
"values could not be interpreted as numeric: %s" %
(series.name,
', '.join(repr(e) for e in sorted(non_numerics))))
def _is_numeric(self, value):
return (isinstance(value, float) or
len(_numeric_regex.findall(value)) == 1)
class MetadataWriter:
def __init__(self, metadata):
self._metadata = metadata
def write(self, filepath):
# Newline settings based on recommendation from csv docs:
# https://docs.python.org/3/library/csv.html#id3
# Do NOT write a BOM, hence utf-8 not utf-8-sig
with open(filepath, 'w', newline='', encoding='utf-8') as fh:
tsv_writer = csv.writer(fh, dialect='excel-tab', strict=True)
md = self._metadata
header = [md.id_header]
types_directive = ['#q2:types']
if isinstance(md, Metadata):
for name, props in md.columns.items():
header.append(name)
types_directive.append(props.type)
elif isinstance(md, MetadataColumn):
header.append(md.name)
types_directive.append(md.type)
else:
raise NotImplementedError
tsv_writer.writerow(header)
tsv_writer.writerow(types_directive)
df = md.to_dataframe()
df.fillna('', inplace=True)
df = df.applymap(self._format)
tsv_writer.writerows(df.itertuples(index=True))
def _format(self, value):
if isinstance(value, str):
return value
elif isinstance(value, float):
# Use fixed precision or scientific notation as necessary (both are
# roundtrippable in the metadata file format), with up to 15 digits
# *total* precision (i.e. before and after the decimal point),
# rounding if necessary. Trailing zeros or decimal points will not
# be included in the formatted string (e.g. 42.0 will be formatted
# as "42"). A precision of 15 digits is used because that is within
# the 64-bit floating point spec (things get weird after that).
#
# Using repr() and str() each have their own predefined precision
# which varies across Python versions. Using the string formatting
# presentation types (e.g. %g, %f) without specifying a precision
# will usually default to 6 digits past the decimal point, which
# seems a little low.
#
# References:
#
# - https://stackoverflow.com/a/2440786/3776794
# - https://stackoverflow.com/a/2440708/3776794
# - https://docs.python.org/3/library/string.html#
# format-specification-mini-language
# - https://stackoverflow.com/a/20586479/3776794
# - https://drj11.wordpress.com/2007/07/03/python-poor-printing-
# of-floating-point/
return '{0:.15g}'.format(value)
else:
raise NotImplementedError
# Credit: https://stackoverflow.com/a/4703508/3776794
_numeric_pattern = r"""
^[-+]? # optional sign
(?:
(?: \d* \. \d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc
|
(?: \d+ \.? ) # 1. 12. 123. etc 1 12 123 etc
)
# followed by optional exponent part if desired
(?: [Ee] [+-]? \d+ ) ?$
"""
_numeric_regex = re.compile(_numeric_pattern, re.VERBOSE)
|
en
| 0.764603
|
# ---------------------------------------------------------------------------- # Copyright (c) 2016-2022, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- # Lazy import because `qiime2.__release__` is available at runtime but # not at import time (otherwise the release value could be interpolated # into `_suffix` in the class definition above). # Used by `read()` to store an iterator yielding rows with # leading/trailing whitespace stripped from their cells (this is a # preprocessing step that should happen with *every* row). The iterator # protocol is the only guaranteed API on this object. # Newline settings based on recommendation from csv docs: # https://docs.python.org/3/library/csv.html#id3 # Ignore BOM on read (but do not write BOM) # Cast each column to the appropriate dtype based on column type. # HACK: If an exception is raised within `DataFrame.apply`, pandas # adds an extra tuple element to `e.args`, making the original # error message difficult to read because a tuple is repr'd instead # of a string. To work around this, we catch and reraise a # MetadataFileError with the original error message. We use # `include_suffix=False` to avoid adding another suffix to the # error message we're reraising. # Trim trailing empty cells from header. # Basic validation to 1) fail early before processing entire file; and # 2) make some basic guarantees about the header for things in this # class that use the header as part of reading the file. # Skip the first element of the header because we know it is a valid ID # header. The other column names are validated to ensure they *aren't* # valid ID headers. #q2:types " #q2:types directive. " # Pad row with empty cells to match header length. # `all` returns True for an empty iterable, so this check works for a # row of zero elements (corresponds to a blank line in the file). # 'categorical' # Infer type # Replace empty strings with `None` to force the series to remain # dtype=object (this only matters if the series consists solely of # missing data). Replacing with np.nan and casting to dtype=object # won't retain the correct dtype in the resulting dataframe # (`DataFrame.apply` seems to force series consisting solely of np.nan # to dtype=float64, even if dtype=object is specified. # # To replace a value with `None`, the following invocation of # `Series.replace` must be used because `None` is a sentinel: # https://stackoverflow.com/a/17097397/3776794 # Newline settings based on recommendation from csv docs: # https://docs.python.org/3/library/csv.html#id3 # Do NOT write a BOM, hence utf-8 not utf-8-sig # Use fixed precision or scientific notation as necessary (both are # roundtrippable in the metadata file format), with up to 15 digits # *total* precision (i.e. before and after the decimal point), # rounding if necessary. Trailing zeros or decimal points will not # be included in the formatted string (e.g. 42.0 will be formatted # as "42"). A precision of 15 digits is used because that is within # the 64-bit floating point spec (things get weird after that). # # Using repr() and str() each have their own predefined precision # which varies across Python versions. Using the string formatting # presentation types (e.g. %g, %f) without specifying a precision # will usually default to 6 digits past the decimal point, which # seems a little low. # # References: # # - https://stackoverflow.com/a/2440786/3776794 # - https://stackoverflow.com/a/2440708/3776794 # - https://docs.python.org/3/library/string.html# # format-specification-mini-language # - https://stackoverflow.com/a/20586479/3776794 # - https://drj11.wordpress.com/2007/07/03/python-poor-printing- # of-floating-point/ # Credit: https://stackoverflow.com/a/4703508/3776794 ^[-+]? # optional sign (?: (?: \d* \. \d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc | (?: \d+ \.? ) # 1. 12. 123. etc 1 12 123 etc ) # followed by optional exponent part if desired (?: [Ee] [+-]? \d+ ) ?$
| 1.872986
| 2
|
examples/sync.py
|
DocRaptor/docraptor-python
| 16
|
6628946
|
<filename>examples/sync.py
# This example demonstrates creating a PDF using common options and saving it
# to a place on the filesystem.
#
# It is created synchronously, which means DocRaptor will render it for up to
# 60 seconds. It is slightly simpler than making documents using the async
# interface but making many documents in parallel or very large documents with
# lots of assets will require the async api.
#
# DocRaptor supports many options for output customization, the full list is
# https://docraptor.com/documentation/api#api_general
#
# You can run this example with: python sync.rb
import docraptor
import shutil
doc_api = docraptor.DocApi()
doc_api.api_client.configuration.username = 'YOUR_API_KEY_HERE' # this key works for test documents
# doc_api.api_client.configuration.debug = True
try:
create_response = doc_api.create_doc({
"test": True, # test documents are free but watermarked
"document_content": "<html><body>Hello World</body></html>", # supply content directly
# "document_url": "http://docraptor.com/examples/invoice.html", # or use a url
"name": "docraptor-python.pdf", # help you find a document later
"document_type": "pdf", # pdf or xls or xlsx
# "javascript": True, # enable JavaScript processing
# "prince_options": {
# "media": "screen", # use screen styles instead of print styles
# "baseurl": "http://hello.com", # pretend URL when using document_content
# },
})
with open("/tmp/docraptor-python.pdf", "wb") as f:
f.write(create_response)
print("Wrote PDF to /tmp/docraptor-python.pdf")
except docraptor.rest.ApiException as error:
print(error.status)
print(error.reason)
print(error.body)
|
<filename>examples/sync.py
# This example demonstrates creating a PDF using common options and saving it
# to a place on the filesystem.
#
# It is created synchronously, which means DocRaptor will render it for up to
# 60 seconds. It is slightly simpler than making documents using the async
# interface but making many documents in parallel or very large documents with
# lots of assets will require the async api.
#
# DocRaptor supports many options for output customization, the full list is
# https://docraptor.com/documentation/api#api_general
#
# You can run this example with: python sync.rb
import docraptor
import shutil
doc_api = docraptor.DocApi()
doc_api.api_client.configuration.username = 'YOUR_API_KEY_HERE' # this key works for test documents
# doc_api.api_client.configuration.debug = True
try:
create_response = doc_api.create_doc({
"test": True, # test documents are free but watermarked
"document_content": "<html><body>Hello World</body></html>", # supply content directly
# "document_url": "http://docraptor.com/examples/invoice.html", # or use a url
"name": "docraptor-python.pdf", # help you find a document later
"document_type": "pdf", # pdf or xls or xlsx
# "javascript": True, # enable JavaScript processing
# "prince_options": {
# "media": "screen", # use screen styles instead of print styles
# "baseurl": "http://hello.com", # pretend URL when using document_content
# },
})
with open("/tmp/docraptor-python.pdf", "wb") as f:
f.write(create_response)
print("Wrote PDF to /tmp/docraptor-python.pdf")
except docraptor.rest.ApiException as error:
print(error.status)
print(error.reason)
print(error.body)
|
en
| 0.730424
|
# This example demonstrates creating a PDF using common options and saving it # to a place on the filesystem. # # It is created synchronously, which means DocRaptor will render it for up to # 60 seconds. It is slightly simpler than making documents using the async # interface but making many documents in parallel or very large documents with # lots of assets will require the async api. # # DocRaptor supports many options for output customization, the full list is # https://docraptor.com/documentation/api#api_general # # You can run this example with: python sync.rb # this key works for test documents # doc_api.api_client.configuration.debug = True # test documents are free but watermarked # supply content directly # "document_url": "http://docraptor.com/examples/invoice.html", # or use a url # help you find a document later # pdf or xls or xlsx # "javascript": True, # enable JavaScript processing # "prince_options": { # "media": "screen", # use screen styles instead of print styles # "baseurl": "http://hello.com", # pretend URL when using document_content # },
| 3.16819
| 3
|
cell-painting/1.optimize/scripts/get_best_hyperparametersLEVEL5.py
|
gwaygenomics/cell-painting-vae
| 3
|
6628947
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import os
import json
vis_datas = []
layers = ['onelayer','twolayer','threelayer']
for layer in layers:
vis_data = []
rootdir = 'parameter_sweepLEVEL5/' + layer
for subdirs, dirs, files in os.walk(rootdir):
for file in files:
if file.endswith("trial.json"):
with open(subdirs + '/' + file, 'r') as json_file:
data = json_file.read()
if 'loss' in json.loads(data)['metrics']['metrics']:
vis_data.append(json.loads(data))
vis_datas.append(vis_data)
# In[6]:
optimal_hyperparameters = [vis_datas[0][0],vis_datas[1][0],vis_datas[2][0]]
for i in range(len(vis_datas)):
for j in range(len(vis_datas[i])):
if vis_datas[i][j]['metrics']['metrics']['val_loss']['observations'][0]['value'] < optimal_hyperparameters[i]['metrics']['metrics']['val_loss']['observations'][0]['value']:
optimal_hyperparameters[i] = vis_datas[i][j]
# In[5]:
for layer in optimal_hyperparameters:
print('latent_dim:', layer['hyperparameters']['values']['latent_dim'])
print('learning_rate:', layer['hyperparameters']['values']['learning_rate'])
print('beta:', layer['hyperparameters']['values']['beta'])
print('encoder_batch_norm:', layer['hyperparameters']['values']['encoder_batch_norm'])
print('batch_size:', layer['hyperparameters']['values']['batch_size'])
print('epochs:', layer['hyperparameters']['values']['epochs'])
print('loss:', layer['metrics']['metrics']['loss']['observations'][0]['value'])
print('sval_loss:', layer['metrics']['metrics']['val_loss']['observations'][0]['value'])
print()
# In[6]:
import hiplot as hip
data = [{'latent_dim': vis_data[idx]['hyperparameters']['values']['latent_dim'],
'learning_rate': vis_data[idx]['hyperparameters']['values']['learning_rate'],
'beta': vis_data[idx]['hyperparameters']['values']['beta'],
'encoder_batch_norm': vis_data[idx]['hyperparameters']['values']['encoder_batch_norm'],
'batch_size': vis_data[idx]['hyperparameters']['values']['batch_size'],
'epochs': vis_data[idx]['hyperparameters']['values']['epochs'],
'loss': vis_data[idx]['metrics']['metrics']['loss']['observations'][0]['value'],
'val_loss': vis_data[idx]['metrics']['metrics']['val_loss']['observations'][0]['value'], } for idx in range(len(vis_data))]
hip.Experiment.from_iterable(data).display()
# In[ ]:
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import os
import json
vis_datas = []
layers = ['onelayer','twolayer','threelayer']
for layer in layers:
vis_data = []
rootdir = 'parameter_sweepLEVEL5/' + layer
for subdirs, dirs, files in os.walk(rootdir):
for file in files:
if file.endswith("trial.json"):
with open(subdirs + '/' + file, 'r') as json_file:
data = json_file.read()
if 'loss' in json.loads(data)['metrics']['metrics']:
vis_data.append(json.loads(data))
vis_datas.append(vis_data)
# In[6]:
optimal_hyperparameters = [vis_datas[0][0],vis_datas[1][0],vis_datas[2][0]]
for i in range(len(vis_datas)):
for j in range(len(vis_datas[i])):
if vis_datas[i][j]['metrics']['metrics']['val_loss']['observations'][0]['value'] < optimal_hyperparameters[i]['metrics']['metrics']['val_loss']['observations'][0]['value']:
optimal_hyperparameters[i] = vis_datas[i][j]
# In[5]:
for layer in optimal_hyperparameters:
print('latent_dim:', layer['hyperparameters']['values']['latent_dim'])
print('learning_rate:', layer['hyperparameters']['values']['learning_rate'])
print('beta:', layer['hyperparameters']['values']['beta'])
print('encoder_batch_norm:', layer['hyperparameters']['values']['encoder_batch_norm'])
print('batch_size:', layer['hyperparameters']['values']['batch_size'])
print('epochs:', layer['hyperparameters']['values']['epochs'])
print('loss:', layer['metrics']['metrics']['loss']['observations'][0]['value'])
print('sval_loss:', layer['metrics']['metrics']['val_loss']['observations'][0]['value'])
print()
# In[6]:
import hiplot as hip
data = [{'latent_dim': vis_data[idx]['hyperparameters']['values']['latent_dim'],
'learning_rate': vis_data[idx]['hyperparameters']['values']['learning_rate'],
'beta': vis_data[idx]['hyperparameters']['values']['beta'],
'encoder_batch_norm': vis_data[idx]['hyperparameters']['values']['encoder_batch_norm'],
'batch_size': vis_data[idx]['hyperparameters']['values']['batch_size'],
'epochs': vis_data[idx]['hyperparameters']['values']['epochs'],
'loss': vis_data[idx]['metrics']['metrics']['loss']['observations'][0]['value'],
'val_loss': vis_data[idx]['metrics']['metrics']['val_loss']['observations'][0]['value'], } for idx in range(len(vis_data))]
hip.Experiment.from_iterable(data).display()
# In[ ]:
|
en
| 0.187926
|
#!/usr/bin/env python # coding: utf-8 # In[2]: # In[6]: # In[5]: # In[6]: # In[ ]:
| 2.202685
| 2
|
4/tests.py
|
offbyoni/adventOfCode2017
| 0
|
6628948
|
import unittest
import passphraseChecker
class advent4Test(unittest.TestCase):
def test_checkInValidphrase(self):
passphrase = "<PASSWORD>"
self.assertFalse(passphraseChecker.checkPhraseValid(passphrase))
def test_checkValidPassphrase(self):
passphrase = "<PASSWORD>"
self.assertTrue(passphraseChecker.checkPhraseValid(passphrase))
def test_checkAnotherValidPassphrase(self):
passphrase = "<PASSWORD>"
self.assertTrue(passphraseChecker.checkPhraseValid(passphrase))
def test_countValidPassphrases(self):
passphrases = "aa bb cc dd aa\naa bb cc dd ee\naa bb cc dd aaa"
res = passphraseChecker.countValidPassphrases(passphrases)
self.assertEqual(res, 2)
if __name__ == '__main__':
unittest.main()
|
import unittest
import passphraseChecker
class advent4Test(unittest.TestCase):
def test_checkInValidphrase(self):
passphrase = "<PASSWORD>"
self.assertFalse(passphraseChecker.checkPhraseValid(passphrase))
def test_checkValidPassphrase(self):
passphrase = "<PASSWORD>"
self.assertTrue(passphraseChecker.checkPhraseValid(passphrase))
def test_checkAnotherValidPassphrase(self):
passphrase = "<PASSWORD>"
self.assertTrue(passphraseChecker.checkPhraseValid(passphrase))
def test_countValidPassphrases(self):
passphrases = "aa bb cc dd aa\naa bb cc dd ee\naa bb cc dd aaa"
res = passphraseChecker.countValidPassphrases(passphrases)
self.assertEqual(res, 2)
if __name__ == '__main__':
unittest.main()
|
none
| 1
| 3.513349
| 4
|
|
direct/launch.py
|
NKI-AI/direct
| 57
|
6628949
|
<gh_stars>10-100
# coding=utf-8
# Copyright (c) DIRECT Contributors
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Taken from Detectron 2, licensed under Apache 2.0.
# https://github.com/facebookresearch/detectron2/blob/903d28b63c02dffc81935a38a85ab5a16450a445/detectron2/engine/launch.py
# Changes:
# - Docstring to match the rest of the library.
# - Calls to other subroutines which do not exist in DIRECT.
# - Stylistic changes.
import logging
import sys
from datetime import timedelta
from typing import Callable, Tuple
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from direct.utils import communication
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
__all__ = ["launch", "launch_distributed", "DEFAULT_TIMEOUT"]
DEFAULT_TIMEOUT = timedelta(minutes=30)
def _find_free_port():
"""Finds ans returns a free port."""
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binding to port 0 will cause the OS to find an available port for us
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
# NOTE: there is still a chance the port could be taken by other processes.
return port
def launch_distributed(
main_func: Callable,
num_gpus_per_machine: int,
num_machines: int = 1,
machine_rank: int = 0,
dist_url: str = "auto",
args: Tuple = (),
timeout: timedelta = DEFAULT_TIMEOUT,
) -> None:
"""Launch multi-gpu or distributed training.
This function must be called on all machines involved in the training and it will spawn
child processes (defined by `num_gpus_per_machine`) on each machine.
Parameters
----------
main_func: Callable
A function that will be called by `main_func(*args)`.
num_gpus_per_machine: int
The number of GPUs per machine.
num_machines : int
The number of machines.
machine_rank: int
The rank of this machine (one per machine).
dist_url: str
URL to connect to for distributed training, including protocol e.g. "tcp://127.0.0.1:8686".
Can be set to auto to automatically select a free port on localhost
args: Tuple
arguments passed to main_func.
timeout: timedelta
Timeout of the distributed workers.
"""
world_size = num_machines * num_gpus_per_machine
if world_size > 1:
# https://github.com/pytorch/pytorch/pull/14391
# TODO prctl in spawned processes
if dist_url == "auto":
if num_machines != 1:
raise ValueError("dist_url=auto not supported in multi-machine jobs.")
port = _find_free_port()
dist_url = f"tcp://127.0.0.1:{port}"
if num_machines > 1 and dist_url.startswith("file://"):
logger.warning("file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://")
mp.spawn(
_distributed_worker,
nprocs=num_gpus_per_machine,
args=(
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout,
),
daemon=False,
)
else:
main_func(*args)
def _distributed_worker(
local_rank: int,
main_func: Callable,
world_size: int,
num_gpus_per_machine: int,
machine_rank: int,
dist_url: str,
args: Tuple,
timeout: timedelta = DEFAULT_TIMEOUT,
) -> None:
"""Sets up `init_process_group`.
Parameters
----------
local_rank: int
Local rank.
main_func: Callable
A function that will be called by `main_func(*args)`.
world_size: int
World size equal to `num_machines * num_gpus_per_machine`.
machine_rank: int
The rank of this machine (one per machine).
num_gpus_per_machine: int
The number of GPUs per machine.
dist_url: str
URL to connect to for distributed training, including protocol e.g. "tcp://127.0.0.1:8686".
Can be set to auto to automatically select a free port on localhost
args: Tuple
arguments passed to main_func.
timeout: timedelta
Timeout of the distributed workers.
"""
if not torch.cuda.is_available():
raise RuntimeError("CUDA is not available. Please check your installation.")
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger.error(f"Process group URL: {dist_url}")
raise e
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
communication.synchronize()
logger.info(f"Global rank {global_rank}.")
logger.info("Synchronized GPUs.")
if num_gpus_per_machine > torch.cuda.device_count():
raise RuntimeError
torch.cuda.set_device(local_rank)
# Setup the local process group (which contains ranks within the same machine)
if communication._LOCAL_PROCESS_GROUP is not None:
raise RuntimeError
num_machines = world_size // num_gpus_per_machine
for idx in range(num_machines):
ranks_on_i = list(range(idx * num_gpus_per_machine, (idx + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if idx == machine_rank:
communication._LOCAL_PROCESS_GROUP = pg
main_func(*args)
def launch(
func: Callable,
num_machines: int,
num_gpus: int,
machine_rank: int,
dist_url: str,
*args: Tuple,
) -> None:
"""Launch the training, in case there is only one GPU available the function can be called directly.
Parameters
----------
func: Callable
Function to launch.
num_machines : int
The number of machines.
num_gpus: int
The number of GPUs.
machine_rank: int
The machine rank.
dist_url: str
URL to connect to for distributed training, including protocol.
args: Tuple
Arguments to pass to func.
"""
# There is no need for the launch script within one node and at most one GPU.
if num_machines == 1 and num_gpus <= 1:
if torch.cuda.device_count() > 1:
logger.warning(
f"Device count is {torch.cuda.device_count()}, "
f"but num_machines is set to {num_machines} and num_gpus is {num_gpus}."
)
func(*args)
elif torch.cuda.device_count() > 1 and num_gpus <= 1:
print(
f"Device count is {torch.cuda.device_count()}, yet number of GPUs is {num_gpus}. "
f"Unexpected behavior will occur. Consider exposing less GPUs (e.g. through docker). Exiting."
)
sys.exit()
else:
launch_distributed(
func,
num_gpus,
num_machines=num_machines,
machine_rank=machine_rank,
dist_url=dist_url,
args=args,
)
|
# coding=utf-8
# Copyright (c) DIRECT Contributors
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Taken from Detectron 2, licensed under Apache 2.0.
# https://github.com/facebookresearch/detectron2/blob/903d28b63c02dffc81935a38a85ab5a16450a445/detectron2/engine/launch.py
# Changes:
# - Docstring to match the rest of the library.
# - Calls to other subroutines which do not exist in DIRECT.
# - Stylistic changes.
import logging
import sys
from datetime import timedelta
from typing import Callable, Tuple
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from direct.utils import communication
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
__all__ = ["launch", "launch_distributed", "DEFAULT_TIMEOUT"]
DEFAULT_TIMEOUT = timedelta(minutes=30)
def _find_free_port():
"""Finds ans returns a free port."""
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binding to port 0 will cause the OS to find an available port for us
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
# NOTE: there is still a chance the port could be taken by other processes.
return port
def launch_distributed(
main_func: Callable,
num_gpus_per_machine: int,
num_machines: int = 1,
machine_rank: int = 0,
dist_url: str = "auto",
args: Tuple = (),
timeout: timedelta = DEFAULT_TIMEOUT,
) -> None:
"""Launch multi-gpu or distributed training.
This function must be called on all machines involved in the training and it will spawn
child processes (defined by `num_gpus_per_machine`) on each machine.
Parameters
----------
main_func: Callable
A function that will be called by `main_func(*args)`.
num_gpus_per_machine: int
The number of GPUs per machine.
num_machines : int
The number of machines.
machine_rank: int
The rank of this machine (one per machine).
dist_url: str
URL to connect to for distributed training, including protocol e.g. "tcp://127.0.0.1:8686".
Can be set to auto to automatically select a free port on localhost
args: Tuple
arguments passed to main_func.
timeout: timedelta
Timeout of the distributed workers.
"""
world_size = num_machines * num_gpus_per_machine
if world_size > 1:
# https://github.com/pytorch/pytorch/pull/14391
# TODO prctl in spawned processes
if dist_url == "auto":
if num_machines != 1:
raise ValueError("dist_url=auto not supported in multi-machine jobs.")
port = _find_free_port()
dist_url = f"tcp://127.0.0.1:{port}"
if num_machines > 1 and dist_url.startswith("file://"):
logger.warning("file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://")
mp.spawn(
_distributed_worker,
nprocs=num_gpus_per_machine,
args=(
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
args,
timeout,
),
daemon=False,
)
else:
main_func(*args)
def _distributed_worker(
local_rank: int,
main_func: Callable,
world_size: int,
num_gpus_per_machine: int,
machine_rank: int,
dist_url: str,
args: Tuple,
timeout: timedelta = DEFAULT_TIMEOUT,
) -> None:
"""Sets up `init_process_group`.
Parameters
----------
local_rank: int
Local rank.
main_func: Callable
A function that will be called by `main_func(*args)`.
world_size: int
World size equal to `num_machines * num_gpus_per_machine`.
machine_rank: int
The rank of this machine (one per machine).
num_gpus_per_machine: int
The number of GPUs per machine.
dist_url: str
URL to connect to for distributed training, including protocol e.g. "tcp://127.0.0.1:8686".
Can be set to auto to automatically select a free port on localhost
args: Tuple
arguments passed to main_func.
timeout: timedelta
Timeout of the distributed workers.
"""
if not torch.cuda.is_available():
raise RuntimeError("CUDA is not available. Please check your installation.")
global_rank = machine_rank * num_gpus_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
except Exception as e:
logger.error(f"Process group URL: {dist_url}")
raise e
# synchronize is needed here to prevent a possible timeout after calling init_process_group
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
communication.synchronize()
logger.info(f"Global rank {global_rank}.")
logger.info("Synchronized GPUs.")
if num_gpus_per_machine > torch.cuda.device_count():
raise RuntimeError
torch.cuda.set_device(local_rank)
# Setup the local process group (which contains ranks within the same machine)
if communication._LOCAL_PROCESS_GROUP is not None:
raise RuntimeError
num_machines = world_size // num_gpus_per_machine
for idx in range(num_machines):
ranks_on_i = list(range(idx * num_gpus_per_machine, (idx + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if idx == machine_rank:
communication._LOCAL_PROCESS_GROUP = pg
main_func(*args)
def launch(
func: Callable,
num_machines: int,
num_gpus: int,
machine_rank: int,
dist_url: str,
*args: Tuple,
) -> None:
"""Launch the training, in case there is only one GPU available the function can be called directly.
Parameters
----------
func: Callable
Function to launch.
num_machines : int
The number of machines.
num_gpus: int
The number of GPUs.
machine_rank: int
The machine rank.
dist_url: str
URL to connect to for distributed training, including protocol.
args: Tuple
Arguments to pass to func.
"""
# There is no need for the launch script within one node and at most one GPU.
if num_machines == 1 and num_gpus <= 1:
if torch.cuda.device_count() > 1:
logger.warning(
f"Device count is {torch.cuda.device_count()}, "
f"but num_machines is set to {num_machines} and num_gpus is {num_gpus}."
)
func(*args)
elif torch.cuda.device_count() > 1 and num_gpus <= 1:
print(
f"Device count is {torch.cuda.device_count()}, yet number of GPUs is {num_gpus}. "
f"Unexpected behavior will occur. Consider exposing less GPUs (e.g. through docker). Exiting."
)
sys.exit()
else:
launch_distributed(
func,
num_gpus,
num_machines=num_machines,
machine_rank=machine_rank,
dist_url=dist_url,
args=args,
)
|
en
| 0.735305
|
# coding=utf-8 # Copyright (c) DIRECT Contributors # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # Taken from Detectron 2, licensed under Apache 2.0. # https://github.com/facebookresearch/detectron2/blob/903d28b63c02dffc81935a38a85ab5a16450a445/detectron2/engine/launch.py # Changes: # - Docstring to match the rest of the library. # - Calls to other subroutines which do not exist in DIRECT. # - Stylistic changes. Finds ans returns a free port. # Binding to port 0 will cause the OS to find an available port for us # NOTE: there is still a chance the port could be taken by other processes. Launch multi-gpu or distributed training. This function must be called on all machines involved in the training and it will spawn child processes (defined by `num_gpus_per_machine`) on each machine. Parameters ---------- main_func: Callable A function that will be called by `main_func(*args)`. num_gpus_per_machine: int The number of GPUs per machine. num_machines : int The number of machines. machine_rank: int The rank of this machine (one per machine). dist_url: str URL to connect to for distributed training, including protocol e.g. "tcp://127.0.0.1:8686". Can be set to auto to automatically select a free port on localhost args: Tuple arguments passed to main_func. timeout: timedelta Timeout of the distributed workers. # https://github.com/pytorch/pytorch/pull/14391 # TODO prctl in spawned processes Sets up `init_process_group`. Parameters ---------- local_rank: int Local rank. main_func: Callable A function that will be called by `main_func(*args)`. world_size: int World size equal to `num_machines * num_gpus_per_machine`. machine_rank: int The rank of this machine (one per machine). num_gpus_per_machine: int The number of GPUs per machine. dist_url: str URL to connect to for distributed training, including protocol e.g. "tcp://127.0.0.1:8686". Can be set to auto to automatically select a free port on localhost args: Tuple arguments passed to main_func. timeout: timedelta Timeout of the distributed workers. # synchronize is needed here to prevent a possible timeout after calling init_process_group # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 # Setup the local process group (which contains ranks within the same machine) Launch the training, in case there is only one GPU available the function can be called directly. Parameters ---------- func: Callable Function to launch. num_machines : int The number of machines. num_gpus: int The number of GPUs. machine_rank: int The machine rank. dist_url: str URL to connect to for distributed training, including protocol. args: Tuple Arguments to pass to func. # There is no need for the launch script within one node and at most one GPU.
| 2.07075
| 2
|
db_manage/mysql_config.py
|
JX-Wang/obtaining_domain_valid_dns
| 1
|
6628950
|
<gh_stars>1-10
# encoding:utf-8
"""
数据库配置文件
"""
# 本地数据库
SOURCE_CONFIG_LOCAL = {
'host': '10.245.146.39',
'port': 3306,
'user': 'root',
'passwd': '<PASSWORD>',
'db': 'domain_valid_dns',
'charset': 'utf8',
'use_unicode': True
}
|
# encoding:utf-8
"""
数据库配置文件
"""
# 本地数据库
SOURCE_CONFIG_LOCAL = {
'host': '10.245.146.39',
'port': 3306,
'user': 'root',
'passwd': '<PASSWORD>',
'db': 'domain_valid_dns',
'charset': 'utf8',
'use_unicode': True
}
|
zh
| 0.974329
|
# encoding:utf-8 数据库配置文件 # 本地数据库
| 1.303624
| 1
|