hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b58d49311f4a35fb47154342912fdc6e20123b51 | 2,143 | py | Python | plans/fixed_ensemble_resnet_dikmeans_4.py | dbis-uibk/MediaEval2021 | 14d754d9cea36415090aaa115db81f5ace465964 | [
"BSD-2-Clause"
] | 1 | 2022-03-31T07:28:12.000Z | 2022-03-31T07:28:12.000Z | plans/fixed_ensemble_resnet_dikmeans_4.py | dbis-uibk/MediaEval2021 | 14d754d9cea36415090aaa115db81f5ace465964 | [
"BSD-2-Clause"
] | null | null | null | plans/fixed_ensemble_resnet_dikmeans_4.py | dbis-uibk/MediaEval2021 | 14d754d9cea36415090aaa115db81f5ace465964 | [
"BSD-2-Clause"
] | null | null | null | """Ensemble plan manually split by type moode/theme."""
import json
from dbispipeline.evaluators import FixedSplitEvaluator
from dbispipeline.evaluators import ModelCallbackWrapper
from sklearn.pipeline import Pipeline
from mediaeval2021 import common
from mediaeval2021.dataloaders.melspectrograms import MelSpectPickleLoader
from mediaeval2021.dataloaders.melspectrograms import labels_to_indices
from mediaeval2021.models.ensemble import Ensemble
from mediaeval2021.models.wrapper import TorchWrapper
dataloader = MelSpectPickleLoader('data/mediaeval2020/melspect_1366.pickle')
label_splits = [
labels_to_indices(
dataloader=dataloader,
label_list=[ # cluster 0
'film', 'heavy', 'holiday', 'drama', 'summer', 'upbeat', 'relaxing', 'groovy', 'fun', 'inspiring', 'space', 'game', 'motivational', 'dream'
],
),
labels_to_indices(
dataloader=dataloader,
label_list=[ # cluster 1
'meditative', 'party', 'christmas', 'nature', 'energetic', 'retro', 'sad', 'emotional', 'commercial', 'movie', 'happy', 'background', 'trailer', 'advertising'
],
),
labels_to_indices(
dataloader=dataloader,
label_list=[ # cluster 2
'calm', 'powerful', 'soft', 'sexy', 'action', 'children', 'fast', 'soundscape', 'hopeful', 'corporate', 'cool', 'dramatic', 'melodic', 'travel'
],
),
labels_to_indices(
dataloader=dataloader,
label_list=[ # cluster 3
'love', 'deep', 'ballad', 'sport', 'dark', 'melancholic', 'positive', 'funny', 'romantic', 'epic', 'uplifting', 'documentary', 'slow', 'adventure'
],
)
]
pipeline = Pipeline([
('model',
Ensemble(
base_estimator=TorchWrapper(model_name="ResNet-18", dataloader=dataloader, batch_size=64, early_stopping=True),
label_splits=label_splits,
epochs=100,
)),
])
evaluator = ModelCallbackWrapper(
FixedSplitEvaluator(**common.fixed_split_params()),
lambda model: common.store_prediction(model, dataloader),
)
result_handlers = [
lambda results: print(json.dumps(results, indent=4)),
]
| 35.716667 | 170 | 0.673822 |
bd0487b3dab2f262b26a707b98d663a7809a56a3 | 2,078 | py | Python | demo/demo_my_pi.py | claremacrae/raspi_code | ee089b8eae29dd4ddf8d45e101affa1e8c5efca4 | [
"MIT"
] | null | null | null | demo/demo_my_pi.py | claremacrae/raspi_code | ee089b8eae29dd4ddf8d45e101affa1e8c5efca4 | [
"MIT"
] | null | null | null | demo/demo_my_pi.py | claremacrae/raspi_code | ee089b8eae29dd4ddf8d45e101affa1e8c5efca4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
A script to show off certain Raspberry Pi features automatically,
without needing to interact with the Pi to start commands.
I'm intending this to be run at startup, perhaps if no keyboard
is connected?
To run this automatically, see
http://www.raspberrypi-spy.co.uk/2013/07/running-a-python-script-at-boot-using-cron/
Add this line to the root contab file
@reboot python /home/pi/develop/raspi_code/demo/demo_my_pi.py --keyboard_check &
"""
import os.path
import sys
import subprocess
hat_product_file = "/proc/device-tree/hat/product"
def get_hat_name():
if not os.path.exists(hat_product_file):
return None
with file(hat_product_file) as f:
line = f.read()
return line
def is_keyboard_attached():
# From http://stackoverflow.com/a/8265634
import subprocess
df = subprocess.check_output("lsusb", shell=True)
return 'keyboard' in df.lower()
if '--keyboard_check' in sys.argv and is_keyboard_attached():
print "Keyboard attached - skipping demo"
sys.exit()
hat_name = get_hat_name()
if not hat_name:
print "No HAT connected"
sys.exit()
print hat_name
script_name = None
if hat_name.startswith("Unicorn HAT"):
script_name = "/home/pi/Pimoroni/unicornhat/rainbow.py"
elif hat_name.startswith("Piano HAT"):
script_name = "/home/pi/Pimoroni/pianohat/simple-piano.py"
elif hat_name.startswith("Sense HAT"):
script_name = "/home/pi/develop/raspi_code/hardware/sense_hat/marble_maze.py"
elif hat_name.startswith("Display-o-Tron HAT"):
# This gives an error when run from this script:
# Traceback (most recent call last):
# File "/home/pi/Pimoroni/dot3k/dothat/advanced/menu.py", line 18, in <module>
# from plugins.utils import Backlight, Contrast
# ImportError: No module named plugins.utils
script_name = "/home/pi/Pimoroni/dot3k/dothat/advanced/menu.py"
else:
script_name = "/home/pi/develop/snowpirgb-python-claremacrae/demo.py"
print script_name
if os.path.exists(script_name):
subprocess.call(['sudo', 'python', script_name])
| 31.014925 | 84 | 0.729548 |
6b0a4461114e99ce0c674f2bd7dee77ba8ff99e1 | 6,802 | py | Python | apps/users/views.py | jakejie/StayOrder | 3e1e0011550dc3b98bc4cfe51723bc0fc3ef0727 | [
"Apache-2.0"
] | 1 | 2019-11-08T11:41:52.000Z | 2019-11-08T11:41:52.000Z | apps/users/views.py | jakejie/StayOrder | 3e1e0011550dc3b98bc4cfe51723bc0fc3ef0727 | [
"Apache-2.0"
] | 6 | 2020-06-05T19:55:16.000Z | 2021-12-13T19:56:00.000Z | apps/users/views.py | jakejie/StayOrder | 3e1e0011550dc3b98bc4cfe51723bc0fc3ef0727 | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
import datetime
from django.shortcuts import render, reverse
from django.http import HttpResponseRedirect
from django.views.generic import View
from .models import CrawlTaskModel, UserProfile
# 定义使用邮箱进行登陆 重载方法
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import authenticate, login, logout
# 完成并集查询
from django.db.models import Q
# 对数据库找出来的内容进行分页
from django.core.paginator import Paginator
try:
from .. import log_tool
except Exception as im_err:
try:
import log_tool
except Exception as im_err:
try:
from order import log_tool
except Exception as im_err:
print("users.views 包导入错误:{}".format(im_err))
PAGE_SETTING = 10
# 主页视图 只需要一个页面
class IndexView(View):
@staticmethod
def get(request):
if request.user.is_authenticated:
return render(request, 'index.html', {})
else:
return render(request, 'login.html', {})
# 添加预约
@staticmethod
def post(request):
if request.user.is_authenticated:
name = request.POST.get('name', '') # 姓氏
username = request.POST.get('username', '') # 名字
birthday_date = request.POST.get('birthday_date', '') # 出生日期
stay_num = request.POST.get('stay_num', '') # 居留许可证号码
stay_end_date = request.POST.get('stay_end_date', '') # 居留许可到期时间
town = request.POST.get('town', '') # 镇
start_date = request.POST.get('start_date', '') # 开始时间
end_date = request.POST.get('end_date', '') # 结束时间
access_email = request.POST.get('email', '') # 接收预约结果的邮箱
if all([name, username, birthday_date,
stay_num, stay_end_date, town,
start_date, end_date]):
log_tool.product_log("接收到爬虫任务",
"用户:{}".format(request.user.username),
"",
"姓氏:{},名字:{},出生日期:{},\
居留许可证号码:{},居留许可到期时间:{},镇:{},\
开始时间:{},结束时间:{},接收预约结果的邮箱:{},"
.format(name, username, birthday_date,
stay_num, stay_end_date, town,
start_date, end_date, access_email))
user = CrawlTaskModel.objects.create(
user_id=request.user.id, # 用户
name=name, # 姓氏
username=username, # 名字
# birthday_date=time.strftime("%Y-%m-%d".format(birthday_date)), # 出生日期
birthday_date=datetime.datetime.strptime("{}".format(birthday_date), "%Y-%m-%d"), # 出生日期
stay_num=stay_num, # 居留许可证号码
# stay_end_date=time.strftime("%Y-%m-%d".format(stay_end_date)), # 居留许可到期时间
stay_end_date=datetime.datetime.strptime("{}".format(stay_end_date), "%Y-%m-%d"), # 居留许可到期时间
town=town, # 镇
# start_date=time.strftime("%Y-%m-%d".format(start_date)), # 开始时间
start_date=datetime.datetime.strptime("{}".format(start_date), "%Y-%m-%d"), # 开始时间
# end_date=time.strftime("%Y-%m-%d".format(end_date)), # 结束时间
end_date=datetime.datetime.strptime("{}".format(end_date), "%Y-%m-%d"), # 结束时间
order_status=0, # 预约状态
access_email=access_email,
)
user.save()
return render(request, 'succeed.html',
{"msg": "任务提交成功", })
else:
return render(request, 'index.html',
{"msg": "数据不全"})
else:
return render(request, 'login.html', {})
# 历史提交记录
class CommitHistoryView(View):
@staticmethod
def get(request):
# 如果已经登陆 跳转到主页
if request.user.is_authenticated:
content = CrawlTaskModel.objects.filter(
user_id=request.user.id).all().order_by('-add_time')
paginator = Paginator(content, PAGE_SETTING)
page = request.GET.get('page')
page = page if page else 1
contacts = paginator.get_page(page)
return render(request, 'history.html',
{"contacts": contacts,
"count": len(content)
})
else:
return HttpResponseRedirect(reverse('login'))
# 重构 允许使用邮箱/用户名进行登陆
class CustomBackend(ModelBackend):
def authenticate(self, username=None, password=None, **kwargs):
try:
user = UserProfile.objects.get(Q(username=username) | Q(email=username))
if user.check_password(password):
return user
except Exception as e:
print("用户登录验证异常:{}".format(e))
return None
# 登录视图
class LoginView(View):
@staticmethod
def get(request):
return render(request, 'login.html', {})
@staticmethod
def post(request):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
if all([username, password]):
user = authenticate(username=username, password=password)
if user:
if UserProfile.objects.get(username=username).is_active:
login(request, user)
log_tool.test_log("用户登录",
"用户名:{}".format(request.user.username),
"登陆成功",
"正常登录")
return HttpResponseRedirect(reverse('index'))
msg = "用户未激活 请联系管理员"
log_tool.test_log("用户登录",
"用户名:{}".format("null"),
"登陆失败",
"用户未激活")
else:
msg = "用户名或密码不对"
log_tool.test_log("用户登录",
"用户名:{}".format("null"),
"登陆失败",
"用户名或密码不对")
else:
msg = "用户名或密码不能为空"
log_tool.test_log("用户登录",
"用户名:{}".format("null"),
"登陆失败",
"用户名或密码为空")
return render(request, "login.html",
{"msg": msg})
# 退出登陆 跳转到登陆页面
class LogoutView(View):
@staticmethod
def get(request):
# 如果已经登陆 跳转到主页
if request.user.is_authenticated:
# 已经登陆 退出登陆
logout(request)
return HttpResponseRedirect(reverse('login'))
| 38 | 113 | 0.500588 |
b90adff0b462d7e1c54a8d5ff340091dcc61a928 | 3,886 | py | Python | Individual1.py | dibovdmitry/laba2.18 | b314081f976ff2aefa1e69913d8508f49bb3c44e | [
"MIT"
] | null | null | null | Individual1.py | dibovdmitry/laba2.18 | b314081f976ff2aefa1e69913d8508f49bb3c44e | [
"MIT"
] | null | null | null | Individual1.py | dibovdmitry/laba2.18 | b314081f976ff2aefa1e69913d8508f49bb3c44e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import json
import os
import sys
def add_airplane(race, path, number, model):
race.append(
{
"path": path,
"number": number,
"model": model
}
)
return race
def display_airplanes(race):
if race:
line = '+-{}-+-{}-+-{}-+-{}-+'.format(
'-' * 4,
'-' * 30,
'-' * 20,
'-' * 20
)
print(line)
print(
'| {:^4} | {:^30} | {:^20} | {:^20} |'.format(
"№",
"Пункт назначения",
"Номер рейса",
"Тип самолёта"
)
)
print(line)
for idx, airplane in enumerate(race, 1):
print(
'| {:>4} | {:<30} | {:<20} | {:>20} |'.format(
idx,
airplane.get('path', ''),
airplane.get('number', ''),
airplane.get('model', 0)
)
)
print(line)
else:
print("Список работников пуст.")
def select_airplanes(race, sel):
result = []
for airplane in race:
if airplane.get('path') <= sel:
result.append(airplane)
return result
def save_airplanes(file_name, race):
with open(file_name, "w", encoding="utf-8") as fout:
json.dump(race, fout, ensure_ascii=False, indent=4)
def load_airplanes(file_name):
with open(file_name, "r", encoding="utf-8") as fin:
return json.load(fin)
def main(command_line=None):
file_parser = argparse.ArgumentParser(add_help=False)
file_parser.add_argument(
"-d",
"--data",
action="store",
required=False,
help="The data file name"
)
parser = argparse.ArgumentParser("airplanes")
parser.add_argument(
"--version",
action="version",
version="%(prog)s 0.1.0"
)
subparsers = parser.add_subparsers(dest="command")
add = subparsers.add_parser(
"add",
parents=[file_parser],
help="Add a new airplane"
)
add.add_argument(
"-p",
"--path",
action="store",
required=True,
help="The airplane's path"
)
add.add_argument(
"-n",
"--number",
action="store",
help="The airplane's number"
)
add.add_argument(
"-m",
"--model",
action="store",
type=int,
required=True,
help="The airplane's model"
)
_ = subparsers.add_parser(
"display",
parents=[file_parser],
help="Display all airplanes"
)
select = subparsers.add_parser(
"select",
parents=[file_parser],
help="Select the airplanes"
)
select.add_argument(
"-r",
"--result",
action="store",
type=int,
required=True,
help="The required result"
)
args = parser.parse_args(command_line)
data_file = args.data
if not data_file:
data_file = os.environ.get("RACES_DATA")
if not data_file:
print("The data file name is absent", file=sys.stderr)
sys.exit(1)
is_dirty = False
if os.path.exists(data_file):
airplanes = load_airplanes(data_file)
else:
airplanes = []
if args.command == "add":
airplanes = add_airplane(
airplanes,
args.path,
args.number,
args.model
)
is_dirty = True
elif args.command == "display":
display_airplanes(airplanes)
elif args.command == "select":
selected = select_airplanes(airplanes, args.period)
display_airplanes(selected)
if is_dirty:
save_airplanes(data_file, airplanes)
if __name__ == "__main__":
main()
| 22.079545 | 62 | 0.50386 |
2566ec23254be60067998ccce7ebf64d2fb9f43c | 3,919 | py | Python | plugins/lib/scope_data/__init__.py | thom1729-forks/PackageDev | bca17d6982983f13bd6588bacdb0245a15e30884 | [
"MIT"
] | 288 | 2016-01-25T10:05:22.000Z | 2022-03-29T01:46:18.000Z | plugins/lib/scope_data/__init__.py | thom1729-forks/PackageDev | bca17d6982983f13bd6588bacdb0245a15e30884 | [
"MIT"
] | 243 | 2016-01-24T22:12:58.000Z | 2022-03-23T20:51:13.000Z | plugins/lib/scope_data/__init__.py | thom1729-forks/PackageDev | bca17d6982983f13bd6588bacdb0245a15e30884 | [
"MIT"
] | 76 | 2016-01-24T23:04:17.000Z | 2022-03-14T03:04:39.000Z | import logging
import sublime
from .data import DATA
__all__ = ["COMPILED_NODES", "COMPILED_HEADS", "completions_from_prefix"]
logger = logging.getLogger(__name__)
SCOPE_KIND = (sublime.KIND_ID_NAMESPACE, "s", "Scope")
class NodeSet(set):
"""
Methods:
* find(name)
* find_all(name)
* to_completion()
"""
def find(self, name):
for node in self:
if node == name:
return node
return None
def find_all(self, name):
res = NodeSet()
for node in self:
if node == name:
res.add(node)
return res
def to_completion(self):
return [sublime.CompletionItem(n.name, annotation="convention", kind=SCOPE_KIND)
for n in self]
class ScopeNode(object):
"""
Attributes:
* name
* parent
* children
* level | unused
Methods:
* add_child(child)
* tree()
"""
def __init__(self, name, parent=None, children=None):
self.name = name
self.parent = parent
self.children = children or NodeSet()
self.level = parent and parent.level + 1 or 1
def __hash__(self):
return hash(str(self))
def add_child(self, child):
self.children.add(child)
def tree(self):
if self.parent:
return self.name + '.' + self.parent.tree()
else:
return self.name
def __eq__(self, other):
if isinstance(other, str):
return str(self) == other
elif isinstance(other, ScopeNode):
return (self.name == other.name
and self.parent == other.parent
and self.children == other.children)
def __str__(self):
return self.name
def __repr__(self):
ret = self.name
if self.children:
ret += " {%s}" % ' '.join(map(repr, self.children))
return ret
#######################################
# output values
COMPILED_NODES = NodeSet()
COMPILED_HEADS = NodeSet()
# parse the DATA string
lines = DATA.split("\n")
# some variables
indent = " " * 4
indent_level = 0
indents = {}
# process lines
# Note: expects sane indentation (such as only indent by 1 `indent` at a time)
for line in lines:
if line.isspace() or not len(line):
# skip blank lines
continue
if line.startswith(indent * (indent_level + 1)):
# indent increased
indent_level += 1
if not line.startswith(indent * indent_level):
# indent decreased
for level in range(indent_level - 1, 0, -1):
if line.startswith(indent * level):
indent_level = level
break
parent = indents[indent_level - 1] if indent_level - 1 in indents else None
node = ScopeNode(line.strip(), parent)
indents[indent_level] = node
if parent:
parent.add_child(node)
else:
COMPILED_HEADS.add(node)
COMPILED_NODES.add(node)
# Tokenize the current selector
def completions_from_prefix(prefix):
"""Build completions from a given scope prefix (including dots)."""
tokens = prefix.split(".")
if len(tokens) <= 1:
# No work to be done here, just return the heads
return COMPILED_HEADS.to_completion()
# Browse the nodes and their children
nodes = COMPILED_HEADS
for i, token in enumerate(tokens[:-1]):
node = nodes.find(token)
if not node:
logger.info("`%s` not found in scope naming conventions", '.'.join(tokens[:i + 1]))
break
nodes = node.children
if not nodes:
logger.info("No nodes available in scope naming conventions after `%s`",
'.'.join(tokens[:-1]))
break
else:
# Offer to complete from conventions or base scope
return nodes.to_completion()
return []
| 25.121795 | 95 | 0.573871 |
c19c604ead8a3a9c0a17cb4a3087dde6aed14cab | 27,655 | py | Python | apps/beeswax/src/beeswax/server/hive_server2_lib.py | vsosrc/hue | d8bc236d8d622759fa5988ff32246e4c750e7503 | [
"Apache-2.0"
] | null | null | null | apps/beeswax/src/beeswax/server/hive_server2_lib.py | vsosrc/hue | d8bc236d8d622759fa5988ff32246e4c750e7503 | [
"Apache-2.0"
] | null | null | null | apps/beeswax/src/beeswax/server/hive_server2_lib.py | vsosrc/hue | d8bc236d8d622759fa5988ff32246e4c750e7503 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from operator import itemgetter
from desktop.lib import thrift_util
from desktop.conf import LDAP_PASSWORD
from hadoop import cluster
from TCLIService import TCLIService
from TCLIService.ttypes import TOpenSessionReq, TGetTablesReq, TFetchResultsReq,\
TStatusCode, TGetResultSetMetadataReq, TGetColumnsReq, TTypeId,\
TExecuteStatementReq, TGetOperationStatusReq, TFetchOrientation,\
TCloseSessionReq, TGetSchemasReq, TGetLogReq, TCancelOperationReq,\
TCloseOperationReq, TFetchResultsResp, TRowSet
from beeswax import conf as beeswax_conf
from beeswax import hive_site
from beeswax.models import Session, HiveServerQueryHandle, HiveServerQueryHistory
from beeswax.server.dbms import Table, NoSuchObjectException, DataTable,\
QueryServerException
from impala import conf as impala_conf
LOG = logging.getLogger(__name__)
IMPALA_RESULTSET_CACHE_SIZE = 'impala.resultset.cache.size'
class HiveServerTable(Table):
"""
We are parsing DESCRIBE EXTENDED text as the metastore API like GetColumns() misses most of the information.
Impala only supports a simple DESCRIBE.
"""
def __init__(self, table_results, table_schema, desc_results, desc_schema):
if not table_results.rows:
raise NoSuchObjectException()
self.table = table_results.rows and table_results.rows[0] or ''
self.table_schema = table_schema
self.desc_results = desc_results
self.desc_schema = desc_schema
@property
def name(self):
return HiveServerTRow(self.table, self.table_schema).col('TABLE_NAME')
@property
def is_view(self):
return HiveServerTRow(self.table, self.table_schema).col('TABLE_TYPE') == 'VIEW' # Used to be VIRTUAL_VIEW
@property
def partition_keys(self):
describe = self.extended_describe
# Parses a list of: partitionKeys:[FieldSchema(name:baz, type:string, comment:null), FieldSchema(name:boom, type:string, comment:null)]
match = re.search('partitionKeys:\[([^\]]+)\]', describe)
if match is not None:
match = match.group(1)
return [PartitionKeyCompatible(partition)
for partition in re.findall('FieldSchema\((.+?)\)', match)]
else:
return []
@property
def path_location(self):
describe = self.extended_describe
match = re.search('location:([^,]+)', describe)
if match is not None:
match = match.group(1)
return match
@property
def parameters(self):
# Parses a list of: parameters:{serialization.format=1}),... parameters:{numPartitions=2, EXTERNAL=TRUE}
describe = self.extended_describe
params = re.findall('parameters:\{([^\}]+?)\}', describe)
if params:
params_list = ', '.join(params).split(', ')
return dict([param.split('=')for param in params_list])
else:
return {}
@property
def cols(self):
cols = HiveServerTTableSchema(self.desc_results, self.desc_schema).cols()
try:
end_cols_index = map(itemgetter('col_name'), cols).index('') # Truncate below extended describe
return cols[0:end_cols_index]
except:
# Impala use non extended describe and 'col' instead of 'col_name'
return cols
@property
def comment(self):
return HiveServerTRow(self.table, self.table_schema).col('REMARKS')
@property
def extended_describe(self):
# Just keep rows after 'Detailed Table Information'
rows = HiveServerTTableSchema(self.desc_results, self.desc_schema).cols()
detailed_row_index = map(itemgetter('col_name'), rows).index('Detailed Table Information')
# Hack because of bad delimiter escaping in LazySimpleSerDe in HS2: parameters:{serialization.format=})
describe_text = rows[detailed_row_index]['data_type']
try:
# LazySimpleSerDe case
return describe_text + rows[detailed_row_index + 1]['col_name']
except:
return describe_text
@property
def properties(self):
# Ugly but would need a recursive parsing to be clean
no_table = re.sub('\)$', '', re.sub('^Table\(', '', self.extended_describe))
properties = re.sub(', sd:StorageDescriptor\(cols.+?\]', '', no_table).split(', ')
props = []
for prop in properties:
key_val = prop.rsplit(':', 1)
if len(key_val) == 1:
key_val = key_val[0].rsplit('=', 1)
if len(key_val) == 2:
props.append(key_val)
return props
class HiveServerTRowSet:
def __init__(self, row_set, schema):
self.row_set = row_set
self.rows = row_set.rows
self.schema = schema
self.startRowOffset = row_set.startRowOffset
def is_empty(self):
return len(self.rows) == 0
def cols(self, col_names):
cols_rows = []
for row in self.rows:
row = HiveServerTRow(row, self.schema)
cols = {}
for col_name in col_names:
cols[col_name] = row.col(col_name)
cols_rows.append(cols)
return cols_rows
def __iter__(self):
return self
def next(self):
if self.rows:
return HiveServerTRow(self.rows.pop(0), self.schema)
else:
raise StopIteration
class HiveServerDataTable(DataTable):
def __init__(self, results, schema, operation_handle):
self.schema = schema and schema.schema
self.row_set = HiveServerTRowSet(results.results, schema)
self.operation_handle = operation_handle
self.has_more = not self.row_set.is_empty() # Should be results.hasMoreRows but always True in HS2
self.startRowOffset = self.row_set.startRowOffset # Always 0 in HS2
@property
def ready(self):
return True
def cols(self):
if self.schema:
return [HiveServerTColumnDesc(col) for col in self.schema.columns]
else:
return []
def rows(self):
for row in self.row_set:
yield row.fields()
class HiveServerTTableSchema:
def __init__(self, columns, schema):
self.columns = columns
self.schema = schema
def cols(self):
try:
return HiveServerTRowSet(self.columns, self.schema).cols(('col_name', 'data_type', 'comment'))
except:
# Impala API is different
cols = HiveServerTRowSet(self.columns, self.schema).cols(('name', 'type', 'comment'))
for col in cols:
col['col_name'] = col.pop('name')
col['col_type'] = col.pop('type')
return cols
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnDesc(self.columns[pos]).val
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
class HiveServerTRow:
def __init__(self, row, schema):
self.row = row
self.schema = schema
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue(self.row.colVals[pos]).val
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
def fields(self):
return [HiveServerTColumnValue(field).val for field in self.row.colVals]
class HiveServerTColumnValue:
def __init__(self, tcolumn_value):
self.column_value = tcolumn_value
@property
def val(self):
# TODO get index from schema
if self.column_value.boolVal is not None:
return self.column_value.boolVal.value
elif self.column_value.byteVal is not None:
return self.column_value.byteVal.value
elif self.column_value.i16Val is not None:
return self.column_value.i16Val.value
elif self.column_value.i32Val is not None:
return self.column_value.i32Val.value
elif self.column_value.i64Val is not None:
return self.column_value.i64Val.value
elif self.column_value.doubleVal is not None:
return self.column_value.doubleVal.value
elif self.column_value.stringVal is not None:
return self.column_value.stringVal.value
class HiveServerTColumnDesc:
def __init__(self, column):
self.column = column
@property
def name(self):
return self.column.columnName
@property
def comment(self):
return self.column.comment
@property
def type(self):
return self.get_type(self.column.typeDesc)
@classmethod
def get_type(self, typeDesc):
for ttype in typeDesc.types:
if ttype.primitiveEntry is not None:
return TTypeId._VALUES_TO_NAMES[ttype.primitiveEntry.type]
elif ttype.mapEntry is not None:
return ttype.mapEntry
elif ttype.unionEntry is not None:
return ttype.unionEntry
elif ttype.arrayEntry is not None:
return ttype.arrayEntry
elif ttype.structEntry is not None:
return ttype.structEntry
elif ttype.userDefinedTypeEntry is not None:
return ttype.userDefinedTypeEntry
class HiveServerClient:
HS2_MECHANISMS = {'KERBEROS': 'GSSAPI', 'NONE': 'PLAIN', 'NOSASL': 'NOSASL'}
def __init__(self, query_server, user):
self.query_server = query_server
self.user = user
use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled = self.get_security()
LOG.info('use_sasl=%s, mechanism=%s, kerberos_principal_short_name=%s, impersonation_enabled=%s' % (
use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled))
self.use_sasl = use_sasl
self.kerberos_principal_short_name = kerberos_principal_short_name
self.impersonation_enabled = impersonation_enabled
if self.query_server['server_name'] == 'impala':
ssl_enabled = False
timeout = impala_conf.SERVER_CONN_TIMEOUT.get()
else:
ssl_enabled = beeswax_conf.SSL.ENABLED.get()
timeout = beeswax_conf.SERVER_CONN_TIMEOUT.get()
self._client = thrift_util.get_client(TCLIService.Client,
query_server['server_host'],
query_server['server_port'],
service_name=query_server['server_name'],
kerberos_principal=kerberos_principal_short_name,
use_sasl=use_sasl,
mechanism=mechanism,
username=user.username,
timeout_seconds=timeout,
use_ssl=ssl_enabled,
ca_certs=beeswax_conf.SSL.CACERTS.get(),
keyfile=beeswax_conf.SSL.KEY.get(),
certfile=beeswax_conf.SSL.CERT.get(),
validate=beeswax_conf.SSL.VALIDATE.get())
def get_security(self):
principal = self.query_server['principal']
impersonation_enabled = False
if principal:
kerberos_principal_short_name = principal.split('/', 1)[0]
else:
kerberos_principal_short_name = None
if self.query_server['server_name'] == 'impala':
cluster_conf = cluster.get_cluster_conf_for_job_submission()
use_sasl = cluster_conf is not None and cluster_conf.SECURITY_ENABLED.get()
mechanism = HiveServerClient.HS2_MECHANISMS['KERBEROS']
impersonation_enabled = self.query_server['impersonation_enabled']
else:
hive_mechanism = hive_site.get_hiveserver2_authentication()
if hive_mechanism not in HiveServerClient.HS2_MECHANISMS:
raise Exception(_('%s server authentication not supported. Valid are %s.' % (hive_mechanism, HiveServerClient.HS2_MECHANISMS.keys())))
use_sasl = hive_mechanism in ('KERBEROS', 'NONE')
mechanism = HiveServerClient.HS2_MECHANISMS[hive_mechanism]
impersonation_enabled = hive_site.hiveserver2_impersonation_enabled()
return use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled
def open_session(self, user):
kwargs = {
'username': user.username, # If SASL, it gets the username from the authentication mechanism" since it dependents on it.
'configuration': {},
}
if self.impersonation_enabled:
kwargs.update({'username': 'hue'})
if self.query_server['server_name'] == 'impala': # Only when Impala accepts it
kwargs['configuration'].update({'impala.doas.user': user.username})
if self.query_server['server_name'] == 'beeswax': # All the time
kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
if LDAP_PASSWORD.get(): # HiveServer2 supports pass-through LDAP authentication.
kwargs['username'] = 'hue'
kwargs['password'] = LDAP_PASSWORD.get()
req = TOpenSessionReq(**kwargs)
res = self._client.OpenSession(req)
if res.status is not None and res.status.statusCode not in (TStatusCode.SUCCESS_STATUS,):
if hasattr(res.status, 'errorMessage') and res.status.errorMessage:
message = res.status.errorMessage
else:
message = ''
raise QueryServerException(Exception('Bad status for request %s:\n%s' % (req, res)), message=message)
sessionId = res.sessionHandle.sessionId
LOG.info('Opening session %s' % sessionId)
encoded_status, encoded_guid = HiveServerQueryHandle(secret=sessionId.secret, guid=sessionId.guid).get()
return Session.objects.create(owner=user,
application=self.query_server['server_name'],
status_code=res.status.statusCode,
secret=encoded_status,
guid=encoded_guid,
server_protocol_version=res.serverProtocolVersion)
def call(self, fn, req, status=TStatusCode.SUCCESS_STATUS):
session = Session.objects.get_session(self.user, self.query_server['server_name'])
if session is None:
session = self.open_session(self.user)
if hasattr(req, 'sessionHandle') and req.sessionHandle is None:
req.sessionHandle = session.get_handle()
res = fn(req)
# Not supported currently in HS2 and Impala: TStatusCode.INVALID_HANDLE_STATUS
if res.status.statusCode == TStatusCode.ERROR_STATUS and \
re.search('Invalid SessionHandle|Invalid session', res.status.errorMessage or '', re.I):
LOG.info('Retrying with a new session because for %s of %s' % (self.user, res))
session = self.open_session(self.user)
req.sessionHandle = session.get_handle()
# Get back the name of the function to call
res = getattr(self._client, fn.attr)(req)
if status is not None and res.status.statusCode not in (
TStatusCode.SUCCESS_STATUS, TStatusCode.SUCCESS_WITH_INFO_STATUS, TStatusCode.STILL_EXECUTING_STATUS):
if hasattr(res.status, 'errorMessage') and res.status.errorMessage:
message = res.status.errorMessage
else:
message = ''
raise QueryServerException(Exception('Bad status for request %s:\n%s' % (req, res)), message=message)
else:
return res
def close_session(self, sessionHandle):
req = TCloseSessionReq(sessionHandle=sessionHandle)
return self._client.CloseSession(req)
def get_databases(self):
# GetCatalogs() is not implemented in HS2
req = TGetSchemasReq()
res = self.call(self._client.GetSchemas, req)
results, schema = self.fetch_result(res.operationHandle)
self.close_operation(res.operationHandle)
col = 'TABLE_SCHEM'
return HiveServerTRowSet(results.results, schema.schema).cols((col,))
def get_tables(self, database, table_names):
req = TGetTablesReq(schemaName=database, tableName=table_names)
res = self.call(self._client.GetTables, req)
results, schema = self.fetch_result(res.operationHandle, max_rows=5000)
self.close_operation(res.operationHandle)
return HiveServerTRowSet(results.results, schema.schema).cols(('TABLE_NAME',))
def get_table(self, database, table_name):
req = TGetTablesReq(schemaName=database, tableName=table_name)
res = self.call(self._client.GetTables, req)
table_results, table_schema = self.fetch_result(res.operationHandle)
self.close_operation(res.operationHandle)
if self.query_server['server_name'] == 'impala':
# Impala does not supported extended
query = 'DESCRIBE %s' % table_name
else:
query = 'DESCRIBE EXTENDED %s' % table_name
(desc_results, desc_schema), operation_handle = self.execute_statement(query)
self.close_operation(operation_handle)
return HiveServerTable(table_results.results, table_schema.schema, desc_results.results, desc_schema.schema)
def execute_query(self, query, max_rows=1000):
configuration = self._get_query_configuration(query)
return self.execute_query_statement(statement=query.query['query'], max_rows=max_rows, configuration=configuration)
def execute_query_statement(self, statement, max_rows=1000, configuration={}):
(results, schema), operation_handle = self.execute_statement(statement=statement, max_rows=max_rows, configuration=configuration)
return HiveServerDataTable(results, schema, operation_handle)
def execute_async_query(self, query, statement=0):
if statement == 0:
# Impala just has settings currently
if self.query_server['server_name'] == 'beeswax':
for resource in query.get_configuration_statements():
self.execute_statement(resource.strip())
configuration = {}
if self.query_server['server_name'] == 'impala' and self.query_server['querycache_rows'] > 0:
configuration[IMPALA_RESULTSET_CACHE_SIZE] = str(self.query_server['querycache_rows'])
# The query can override the default configuration
configuration.update(self._get_query_configuration(query))
query_statement = query.get_query_statement(statement)
return self.execute_async_statement(statement=query_statement, confOverlay=configuration)
def execute_statement(self, statement, max_rows=1000, configuration={}):
req = TExecuteStatementReq(statement=statement.encode('utf-8'), confOverlay=configuration)
res = self.call(self._client.ExecuteStatement, req)
return self.fetch_result(res.operationHandle, max_rows=max_rows), res.operationHandle
def execute_async_statement(self, statement, confOverlay):
req = TExecuteStatementReq(statement=statement.encode('utf-8'), confOverlay=confOverlay, runAsync=True)
res = self.call(self._client.ExecuteStatement, req)
return HiveServerQueryHandle(secret=res.operationHandle.operationId.secret,
guid=res.operationHandle.operationId.guid,
operation_type=res.operationHandle.operationType,
has_result_set=res.operationHandle.hasResultSet,
modified_row_count=res.operationHandle.modifiedRowCount)
def fetch_data(self, operation_handle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=1000):
# The client should check for hasMoreRows and fetch until the result is empty dues to a HS2 bug
results, schema = self.fetch_result(operation_handle, orientation, max_rows)
return HiveServerDataTable(results, schema, operation_handle)
def cancel_operation(self, operation_handle):
req = TCancelOperationReq(operationHandle=operation_handle)
return self.call(self._client.CancelOperation, req)
def close_operation(self, operation_handle):
req = TCloseOperationReq(operationHandle=operation_handle)
return self.call(self._client.CloseOperation, req)
def get_columns(self, database, table):
req = TGetColumnsReq(schemaName=database, tableName=table)
res = self.call(self._client.GetColumns, req)
res, schema = self.fetch_result(res.operationHandle)
self.close_operation(res.operationHandle)
return res, schema
def fetch_result(self, operation_handle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=1000):
if operation_handle.hasResultSet:
fetch_req = TFetchResultsReq(operationHandle=operation_handle, orientation=orientation, maxRows=max_rows)
res = self.call(self._client.FetchResults, fetch_req)
else:
res = TFetchResultsResp(results=TRowSet(startRowOffset=0, rows=[], columns=[]))
if operation_handle.hasResultSet:
meta_req = TGetResultSetMetadataReq(operationHandle=operation_handle)
schema = self.call(self._client.GetResultSetMetadata, meta_req)
else:
schema = None
return res, schema
def get_operation_status(self, operation_handle):
req = TGetOperationStatusReq(operationHandle=operation_handle)
return self.call(self._client.GetOperationStatus, req)
def explain(self, query):
query_statement = query.get_query_statement(0)
return self.execute_query_statement('EXPLAIN %s' % query_statement)
def get_log(self, operation_handle):
try:
req = TGetLogReq(operationHandle=operation_handle)
res = self.call(self._client.GetLog, req)
return res.log
except:
return 'Server does not support GetLog()'
def get_partitions(self, database, table_name, max_parts):
table = self.get_table(database, table_name)
# TODO: do a 'use DB' ?
partitionTable = self.execute_query_statement('SHOW PARTITIONS %s' % table_name) # DB prefix not supported
return [PartitionValueCompatible(partition, table) for partition in partitionTable.rows()][-max_parts:]
def _get_query_configuration(self, query):
return dict([(setting['key'], setting['value']) for setting in query.settings])
class HiveServerTableCompatible(HiveServerTable):
"""Same API as Beeswax"""
def __init__(self, hive_table):
self.table = hive_table.table
self.table_schema = hive_table.table_schema
self.desc_results = hive_table.desc_results
self.desc_schema = hive_table.desc_schema
@property
def cols(self):
return [type('Col', (object,), {'name': col.get('col_name', '').strip(),
'type': col.get('data_type', col.get('col_type', '')).strip(), # Impala is col_type
'comment': col.get('comment', '').strip(), }) for col in HiveServerTable.cols.fget(self)]
class ResultCompatible:
def __init__(self, data_table):
self.data_table = data_table
self.rows = data_table.rows
self.has_more = data_table.has_more
self.start_row = data_table.startRowOffset
self.ready = True
@property
def columns(self):
return self.cols()
def cols(self):
return [col.name for col in self.data_table.cols()]
class PartitionKeyCompatible:
def __init__(self, partition):
# Parses: ['name:datehour, type:int, comment:null']
name, type, comment = partition.split(', ')
self.name = name.split(':')[1]
self.type = type.split(':')[1]
self.comment = comment.split(':')[1]
class PartitionValueCompatible:
def __init__(self, partition, table):
# Parses: ['datehour=2013022516'] or ['month=2011-07/dt=2011-07-01/hr=12']
self.values = [val.split('=')[1] for part in partition for val in part.split('/')]
self.sd = type('Sd', (object,), {'location': '%s/%s' % (table.path_location, ','.join(partition)),})
class ExplainCompatible:
def __init__(self, data_table):
self.textual = '\n'.join([line[0] for line in data_table.rows()])
class ResultMetaCompatible:
def __init__(self):
self.in_tablename = True
class HiveServerClientCompatible(object):
"""Same API as Beeswax"""
def __init__(self, client):
self._client = client
self.user = client.user
self.query_server = client.query_server
def query(self, query, statement=0):
return self._client.execute_async_query(query, statement)
def get_state(self, handle):
operationHandle = handle.get_rpc_handle()
res = self._client.get_operation_status(operationHandle)
return HiveServerQueryHistory.STATE_MAP[res.operationState]
def get_operation_status(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.get_operation_status(operationHandle)
def use(self, query):
data = self._client.execute_query(query)
self._client.close_operation(data.operation_handle)
return data
def explain(self, query):
data_table = self._client.explain(query)
data = ExplainCompatible(data_table)
self._client.close_operation(data_table.operation_handle)
return data
def fetch(self, handle, start_over=False, max_rows=None):
operationHandle = handle.get_rpc_handle()
if max_rows is None:
max_rows = 1000
if start_over and not (self.query_server['server_name'] == 'impala' and self.query_server['querycache_rows'] == 0): # Backward compatibility for impala
orientation = TFetchOrientation.FETCH_FIRST
else:
orientation = TFetchOrientation.FETCH_NEXT
data_table = self._client.fetch_data(operationHandle, orientation=orientation, max_rows=max_rows)
return ResultCompatible(data_table)
def cancel_operation(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.cancel_operation(operationHandle)
def close(self, handle):
return self.close_operation(handle)
def close_operation(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.close_operation(operationHandle)
def close_session(self, session):
operationHandle = session.get_handle()
return self._client.close_session(operationHandle)
def dump_config(self):
return 'Does not exist in HS2'
def get_log(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.get_log(operationHandle)
def get_databases(self):
col = 'TABLE_SCHEM'
return [table[col] for table in self._client.get_databases()]
def get_tables(self, database, table_names):
return [table['TABLE_NAME'] for table in self._client.get_tables(database, table_names)]
def get_table(self, database, table_name):
table = self._client.get_table(database, table_name)
return HiveServerTableCompatible(table)
def get_columns(self, database, table):
return self._client.get_columns(database, table)
def get_default_configuration(self, *args, **kwargs):
return {}
def get_results_metadata(self, handle):
# We just need to mock
return ResultMetaCompatible()
def create_database(self, name, description): raise NotImplementedError()
def get_database(self, *args, **kwargs): raise NotImplementedError()
def alter_table(self, dbname, tbl_name, new_tbl): raise NotImplementedError()
def open_session(self, user):
return self._client.open_session(user)
def add_partition(self, new_part): raise NotImplementedError()
def get_partition(self, *args, **kwargs): raise NotImplementedError()
def get_partitions(self, database, table_name, max_parts):
return self._client.get_partitions(database, table_name, max_parts)
def alter_partition(self, db_name, tbl_name, new_part): raise NotImplementedError()
| 34.962073 | 155 | 0.705008 |
6e3d13c1c1d2052fbebff7e8e94edea006545767 | 377 | py | Python | tests/test_beats.py | Ghayyas/amen | eaa1a1186d430d6cb342a7f79278c136439a24a1 | [
"BSD-2-Clause"
] | 1 | 2021-08-22T14:17:03.000Z | 2021-08-22T14:17:03.000Z | tests/test_beats.py | Ghayyas/amen | eaa1a1186d430d6cb342a7f79278c136439a24a1 | [
"BSD-2-Clause"
] | null | null | null | tests/test_beats.py | Ghayyas/amen | eaa1a1186d430d6cb342a7f79278c136439a24a1 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from amen.audio import Audio
from amen.utils import example_audio_file
from amen.timing import TimingList
from nose.tools import eq_
EXAMPLE_FILE = example_audio_file()
AUDIO = Audio(EXAMPLE_FILE)
def test_beats():
beats = AUDIO.timings['beats']
assert isinstance(beats, TimingList), type(beats)
eq_(len(beats), 11)
| 23.5625 | 53 | 0.737401 |
69d0ed09bf58f07a46817c86a75d60b055e9311f | 1,509 | py | Python | src/meltano/cli/__init__.py | Mu-L/meltano | 7bf8f370608ee9a8833b33ea94112c6e219c8161 | [
"MIT"
] | null | null | null | src/meltano/cli/__init__.py | Mu-L/meltano | 7bf8f370608ee9a8833b33ea94112c6e219c8161 | [
"MIT"
] | null | null | null | src/meltano/cli/__init__.py | Mu-L/meltano | 7bf8f370608ee9a8833b33ea94112c6e219c8161 | [
"MIT"
] | null | null | null | """Main entry point for the meltano CLI."""
import logging
import os
import sys
from meltano.core.logging import setup_logging
from meltano.core.project import ProjectReadonly
from .utils import CliError
# TODO: Importing the cli.cli module breaks other cli module imports
# This suggests a cyclic dependency or a poorly structured interface.
# This should be investigated and resolved to avoid implicit behavior
# based solely on import order.
from .cli import cli # isort:skip
from . import ( # isort:skip # noqa: F401, WPS235
add,
config,
discovery,
dragon,
elt,
environment,
initialize,
install,
invoke,
model,
remove,
repl,
schedule,
schema,
select,
state,
ui,
upgrade,
user,
run,
validate,
job,
)
setup_logging()
logger = logging.getLogger(__name__)
def main():
"""Entry point for the meltano cli."""
# mark the current process as executed via the `cli`
os.environ["MELTANO_JOB_TRIGGER"] = os.getenv("MELTANO_JOB_TRIGGER", "cli")
try:
try: # noqa: WPS505
cli(obj={"project": None})
except ProjectReadonly as err:
raise CliError(
f"The requested action could not be completed: {err}"
) from err
except KeyboardInterrupt: # noqa: WPS329
raise
except Exception as err:
raise CliError(str(err)) from err
except CliError as cli_error:
cli_error.print()
sys.exit(1)
| 23.578125 | 79 | 0.644135 |
965e41b9ece46908dd7220d012a878c7498d6d1b | 7,340 | py | Python | openprocurement/ocds/export/tests/test_export.py | myroslav/openprocurement.ocds.export | 127c438f43d766f9519265cd64ee7a2f20cea6a6 | [
"Apache-2.0"
] | null | null | null | openprocurement/ocds/export/tests/test_export.py | myroslav/openprocurement.ocds.export | 127c438f43d766f9519265cd64ee7a2f20cea6a6 | [
"Apache-2.0"
] | null | null | null | openprocurement/ocds/export/tests/test_export.py | myroslav/openprocurement.ocds.export | 127c438f43d766f9519265cd64ee7a2f20cea6a6 | [
"Apache-2.0"
] | null | null | null | from openprocurement.ocds.export.models import (
Award,
Contract,
Tender,
release_tender,
release_tenders,
package_tenders,
record_tenders,
modelsMap,
callbacks
)
from openprocurement.ocds.export.ext.models import (
TenderExt,
AwardExt,
ContractExt,
update_models_map,
update_callbacks,
release_tender_ext,
release_tenders_ext,
record_tenders_ext,
package_tenders_ext
)
from .utils import (
award,
contract,
tender,
config
)
class TestModels(object):
def test_award_model(self):
new = Award(award, modelsMap, callbacks).__export__()
assert 'lotID' not in new
assert 'bidID' not in new
def test_contract_model(self):
new = Contract(contract, modelsMap, callbacks).__export__()
assert 'suppliers' not in new
assert 'contractID' not in new
assert 'contractNumber' not in new
def test_tender_model(self):
new = Tender(tender, modelsMap, callbacks).__export__()
assert 'bids' not in new
assert 'lots' not in new
assert 'tenderID' not in new
class TestModelsExt(object):
def test_award_model(self):
new = AwardExt(award, update_models_map(), update_callbacks()).__export__()
assert 'lotID' in new
def test_tender_model(self):
new = TenderExt(tender, update_models_map(), update_callbacks()).__export__()
assert 'lots' in new
assert 'tenderID' in new
def test_contract_model(self):
new = ContractExt(contract, update_models_map(), update_callbacks()).__export__()
assert 'contractNumber' in new
assert 'contractID' in new
class TestExport(object):
def test_release_tender(self):
ten = tender.copy()
ten['awards'] = [award.copy()]
ten['contracts'] = [contract.copy()]
release = release_tender(ten, modelsMap, callbacks, 'test')
assert 'ocid' in release
assert release['ocid'] == 'test-{}'.format(ten['tenderID'])
assert release['date'] == ten['dateModified']
assert release['tag'] == ['tender', 'award', 'contract']
assert 'bids' not in release
assert 'bid' not in release['tag']
def test_release_package(self):
pack = package_tenders([tender for _ in xrange(3)], modelsMap, callbacks, config)
assert len(pack['releases']) == 3
for field in ['license', 'publicationPolicy']:
assert field in pack
assert pack[field] == 'test'
assert 'name' in pack['publisher']
assert pack['publisher']['name'] == 'test'
def test_release_tenders(self):
patch1 = [
{"op": "add",
"path": "/test",
"value": "test"}
]
ten = tender.copy()
ten['patches'] = [patch1]
releases = release_tenders(ten, modelsMap, callbacks, 'test')
assert len(releases) == 2
assert 'tenderUpdate' not in releases[1]
patch2 = [
{"op": "replace",
"path": "/description",
"value": "test"
}
]
ten['patches'] = [patch2]
releases = release_tenders(ten, modelsMap, callbacks, 'test')
assert 'tenderUpdate' in releases[1]['tag']
assert releases[0]['tender']['description'] != 'test'
assert releases[1]['tender']['description'] == 'test'
ten['awards'] = [award]
patch3 = [
{"op": "replace",
"path": "/awards/0/status",
"value": "test"
}
]
ten['patches'] = [patch3]
releases = release_tenders(ten, modelsMap, callbacks, 'test')
assert 'awardUpdate' in releases[1]['tag']
assert releases[0]['awards'][0]['status'] != 'test'
assert releases[1]['awards'][0]['status'] == 'test'
patch4 = [
{"op": "replace",
"path": "/contracts/0/status",
"value": "test"
}
]
ten['contracts'] = [contract]
ten['patches'] = [patch3, patch4]
releases = release_tenders(ten, modelsMap, callbacks, 'test')
assert 'awardUpdate' in releases[1]['tag']
assert 'contractUpdate' in releases[2]['tag']
assert releases[1]['awards'][0]['status'] == 'test'
assert releases[2]['contracts'][0]['status'] == 'test'
patch5 = [{'op': 'add', 'path': '/contracts',
'value': [{'status': 'test', 'description': 'Some test contract'
}]}]
ten = tender.copy()
ten['patches'] = [patch5]
releases = release_tenders(ten, modelsMap, callbacks, 'test')
def test_record(self):
ten = tender.copy()
patch = [
{"op": "replace",
"path": "/description",
"value": "test"
}
]
ten['patches'] = [patch]
record = record_tenders(ten, modelsMap, callbacks, 'test')
assert len(record['releases']) == 2
assert record['ocid'] == record['releases'][0]['ocid']
class TestExportExt(object):
def test_models_map_update(self):
assert "bids" in update_models_map()
def test_callbacks_update(self):
assert 'bids' in update_callbacks()
def test_release_tender(self):
release = release_tender_ext(tender, update_models_map(), update_callbacks(), 'test')
assert 'bid' in release['tag']
def test_release_tenders(self):
ten = tender.copy()
patch = [
{"op": "replace",
"path": "/bids/0/status",
"value": "test"
}
]
ten['patches'] = [patch]
releases = release_tenders_ext(ten, update_models_map(), update_callbacks(), 'test')
assert len(releases) == 2
assert 'bidUpdate' in releases[1]['tag']
patch1 = [
{"op": "replace",
"path": "/description",
"value": "test"
}
]
ten['patches'] = [patch1]
releases = release_tenders_ext(ten, update_models_map(), update_callbacks(), 'test')
assert 'tenderUpdate' in releases[1]['tag']
patch2 = [{'op': 'add', 'path': '/bids/1',
'value': {'status': 'test', 'description': 'Some test bid',
}}]
ten = tender.copy()
ten['patches'] = [patch2]
releases = release_tenders_ext(ten, update_models_map(), update_callbacks(), 'test')
assert 'bid' in releases[1]['tag']
def test_release_package(self):
pack = package_tenders_ext([tender for _ in xrange(3)], update_models_map(), update_callbacks(), config)
assert len(pack['releases']) == 3
for field in ['license', 'publicationPolicy']:
assert field in pack
assert pack[field] == 'test'
assert 'name' in pack['publisher']
assert pack['publisher']['name'] == 'test'
def test_record(self):
ten = tender.copy()
patch = [
{"op": "replace",
"path": "/description",
"value": "test"
}
]
ten['patches'] = [patch]
record = record_tenders_ext(ten, update_models_map(), update_callbacks(), 'test')
assert len(record['releases']) == 2
assert record['ocid'] == record['releases'][0]['ocid']
| 33.063063 | 112 | 0.562534 |
2a0dcd9b6636733ef382c27d4c4b0a000f08894a | 9,579 | py | Python | pytext/config/component.py | hudeven/pytext | 6e5ab16803be33bcb784b7fd79aa99935cfd12ec | [
"BSD-3-Clause"
] | 2 | 2022-01-21T17:04:19.000Z | 2022-01-21T17:04:25.000Z | pytext/config/component.py | hudeven/pytext | 6e5ab16803be33bcb784b7fd79aa99935cfd12ec | [
"BSD-3-Clause"
] | null | null | null | pytext/config/component.py | hudeven/pytext | 6e5ab16803be33bcb784b7fd79aa99935cfd12ec | [
"BSD-3-Clause"
] | 1 | 2021-11-21T06:17:30.000Z | 2021-11-21T06:17:30.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import collections
import enum
from typing import Any, Dict, List, Tuple, Type, Union
import torch
from .pytext_config import ConfigBase, PyTextConfig
class ComponentType(enum.Enum):
TASK = "task"
COLUMN = "column"
DATA_TYPE = "data_type"
DATA_HANDLER = "data_handler"
DATA_SOURCE = "data_source"
TOKENIZER = "tokenizer"
TENSORIZER = "tensorizer"
BATCHER = "batcher"
BATCH_SAMPLER = "batch_sampler"
FEATURIZER = "featurizer"
TRAINER = "trainer"
LOSS = "loss"
OPTIMIZER = "optimizer"
SCHEDULER = "scheduler"
MODEL = "model"
MODEL2 = "model2"
MODULE = "module"
PREDICTOR = "predictor"
EXPORTER = "exporter"
METRIC_REPORTER = "metric_reporter"
SPARSIFIER = "sparsifier"
MASKING_FUNCTION = "masking_function"
PRIVACY_ENGINE = "privacy_engine"
class RegistryError(Exception):
pass
class Registry:
_registered_components: Dict[ComponentType, Dict[Type, Type]] = (
collections.defaultdict(dict)
)
@classmethod
def add(cls, component_type: ComponentType, cls_to_add: Type, config_cls: Type):
component = cls._registered_components[component_type]
if config_cls in component:
raise RegistryError(
f"Cannot add {cls_to_add} to {component_type} "
f"for task_config type {config_cls}; "
f"it's already registered for {component[config_cls]}"
)
component[config_cls] = cls_to_add
@classmethod
def get(cls, component_type: ComponentType, config_cls: Type) -> Type:
if component_type not in cls._registered_components:
raise RegistryError(f"type {component_type} doesn't exist")
if config_cls not in cls._registered_components[component_type]:
raise RegistryError(
f"unregistered config class {config_cls.__name__} for {component_type}"
)
return cls._registered_components[component_type][config_cls]
@classmethod
def values(cls, component_type: ComponentType) -> Tuple[Type, ...]:
if component_type not in cls._registered_components:
raise RegistryError(f"type {component_type} doesn't exist")
return tuple(cls._registered_components[component_type].values())
@classmethod
def configs(cls, component_type: ComponentType) -> Tuple[Type, ...]:
if component_type not in cls._registered_components:
raise RegistryError(f"type {component_type} doesn't exist")
return tuple(cls._registered_components[component_type].keys())
@classmethod
def subconfigs(cls, config_cls: Type) -> Tuple[Type, ...]:
return tuple(
sub_cls
for sub_cls in cls.configs(config_cls.__COMPONENT_TYPE__)
if issubclass(sub_cls.__COMPONENT__, config_cls.__COMPONENT__)
)
class ComponentMeta(type):
def __new__(metacls, typename, bases, namespace):
if "Config" not in namespace:
# We need to dynamically create a new Config class per
# instance rather than inheriting a single empty config class
# because components are registered uniquely by config class.
# If a parent class specifies a config class, inherit from it.
parent_config = next(
(base.Config for base in bases if hasattr(base, "Config")), None
)
if parent_config is not None:
class Config(parent_config):
pass
else:
class Config(ConfigBase):
pass
namespace["Config"] = Config
component_type = next(
(
base.__COMPONENT_TYPE__
for base in bases
if hasattr(base, "__COMPONENT_TYPE__")
),
namespace.get("__COMPONENT_TYPE__"),
)
new_cls = super().__new__(metacls, typename, bases, namespace)
new_cls.Config.__COMPONENT_TYPE__ = component_type
new_cls.Config.__name__ = f"{typename}.Config"
new_cls.Config.__COMPONENT__ = new_cls
new_cls.Config.__EXPANSIBLE__ = namespace.get("__EXPANSIBLE__")
if component_type:
Registry.add(component_type, new_cls, new_cls.Config)
return new_cls
def __dir__(cls):
"""Jit doesnt allow scripting of attributes whose classname includes "."
Example Repro:
class OldModule(Module):
class Config(ConfigBase):
a: int = 5
@classmethod
def from_config(cls, config: Config):
return cls(config.a)
def __init__(self, a):
super().__init__()
self.a = a
def forward(self, b: int) -> int:
return b + self.a
m = OldModule.from_config(OldModule.Config())
jit.script(m)
> RuntimeError: Could not get qualified name for class 'OldModule.Config':
'OldModule.Config' is not a valid identifier
print(m.Config.__name__)
> OldModule.Config
At the sametime, we dont need to script the config classes because they
are not needed during inference time. Hence in this workaround we skip
the config classes.
Ideal solution is that when building models they should be inheriting
from nn.Module only and not Component. This requires significant changes
to the way models are created in PyText.
"""
result = super().__dir__()
return [
r
for r in result
if not (
isinstance(getattr(cls, r, None), type)
and issubclass(getattr(cls, r, None), ConfigBase)
)
]
class Component(metaclass=ComponentMeta):
class Config(ConfigBase):
pass
@classmethod
def from_config(cls, config, *args, **kwargs):
return cls(config, *args, **kwargs)
def __init__(self, config=None, *args, **kwargs):
self.config = config
def register_tasks(task_cls: Union[Type, List[Type]]):
"""
Task classes are already added to registry during declaration, pass them
as parameters here just to make sure they're imported
"""
vars(PyTextConfig)["__annotations__"]["task"].__args__ = Registry.configs(
ComponentType.TASK
)
def create_component(component_type: ComponentType, config: Any, *args, **kwargs):
config_cls = type(config)
cls = Registry.get(component_type, config_cls)
try:
return cls.from_config(config, *args, **kwargs)
except TypeError as e:
raise Exception(f"Can't create component {cls}: {str(e)}")
def create_data_handler(data_handler_config, *args, **kwargs):
return create_component(
ComponentType.DATA_HANDLER, data_handler_config, *args, **kwargs
)
def create_featurizer(featurizer_config, *args, **kwargs):
return create_component(
ComponentType.FEATURIZER, featurizer_config, *args, **kwargs
)
def create_trainer(trainer_config, model: torch.nn.Module, *args, **kwargs):
return create_component(
ComponentType.TRAINER, trainer_config, model, *args, **kwargs
)
def create_model(model_config, *args, **kwargs):
return create_component(ComponentType.MODEL, model_config, *args, **kwargs)
def create_optimizer(optimizer_config, model: torch.nn.Module, *args, **kwargs):
return create_component(
ComponentType.OPTIMIZER, optimizer_config, model, *args, **kwargs
)
def create_scheduler(scheduler_config, optimizer, *args, **kwargs):
if hasattr(optimizer, "fp32_optimizer"):
optimizer = optimizer.fp32_optimizer
return create_component(
ComponentType.SCHEDULER, scheduler_config, optimizer, *args, **kwargs
)
def create_sparsifier(sparsifier_config, *args, **kwargs):
return create_component(
ComponentType.SPARSIFIER, sparsifier_config, *args, **kwargs
)
def create_privacy_engine(privacy_engine_config, *args, **kwargs):
return create_component(
ComponentType.PRIVACY_ENGINE, privacy_engine_config, *args, **kwargs
)
def create_predictor(predictor_config, *args, **kwargs):
return create_component(ComponentType.PREDICTOR, predictor_config, *args, **kwargs)
def create_exporter(exporter_config, *args, **kwargs):
return create_component(ComponentType.EXPORTER, exporter_config, *args, **kwargs)
def create_loss(loss_config, *args, **kwargs):
return create_component(ComponentType.LOSS, loss_config, *args, **kwargs)
def create_metric_reporter(module_config, *args, **kwargs):
return create_component(
ComponentType.METRIC_REPORTER, module_config, *args, **kwargs
)
def get_component_name(obj):
"""
Return the human-readable name of the class of `obj`.
Document the type of a config field and can be used as a Union value
in a json config.
"""
if obj is type(None):
return None
if not hasattr(obj, "__module__"): # builtins
return obj.__class__.__name__
if obj.__module__ == "typing":
return str(obj)[7:]
if hasattr(obj, "__qualname__"): # Class name unaltered by meta
ret = obj.__qualname__
elif hasattr(obj, "__name__"):
ret = obj.__name__
else:
ret = obj.__class__.__name__
if ret.endswith(".Config"):
return obj.__COMPONENT__.__name__
return ret
| 32.361486 | 87 | 0.650485 |
cc4c78d884448ca2c469cc3010c6bda56c1d186a | 7,024 | py | Python | scripts/fixup_spanner_admin_database_v1_keywords.py | asthamohta/python-spanner | 321bc7faf364ad423da08ae4e2c0d6f76834dc09 | [
"Apache-2.0"
] | 49 | 2020-02-06T17:36:32.000Z | 2022-03-31T05:32:29.000Z | scripts/fixup_spanner_admin_database_v1_keywords.py | asthamohta/python-spanner | 321bc7faf364ad423da08ae4e2c0d6f76834dc09 | [
"Apache-2.0"
] | 417 | 2020-01-31T23:12:28.000Z | 2022-03-30T22:42:11.000Z | scripts/fixup_spanner_admin_database_v1_keywords.py | asthamohta/python-spanner | 321bc7faf364ad423da08ae4e2c0d6f76834dc09 | [
"Apache-2.0"
] | 46 | 2020-01-31T22:54:25.000Z | 2022-03-29T12:04:55.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class spanner_admin_databaseCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'create_backup': ('parent', 'backup_id', 'backup', 'encryption_config', ),
'create_database': ('parent', 'create_statement', 'extra_statements', 'encryption_config', ),
'delete_backup': ('name', ),
'drop_database': ('database', ),
'get_backup': ('name', ),
'get_database': ('name', ),
'get_database_ddl': ('database', ),
'get_iam_policy': ('resource', 'options', ),
'list_backup_operations': ('parent', 'filter', 'page_size', 'page_token', ),
'list_backups': ('parent', 'filter', 'page_size', 'page_token', ),
'list_database_operations': ('parent', 'filter', 'page_size', 'page_token', ),
'list_databases': ('parent', 'page_size', 'page_token', ),
'restore_database': ('parent', 'database_id', 'backup', 'encryption_config', ),
'set_iam_policy': ('resource', 'policy', ),
'test_iam_permissions': ('resource', 'permissions', ),
'update_backup': ('backup', 'update_mask', ),
'update_database_ddl': ('database', 'statements', 'operation_id', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=spanner_admin_databaseCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the spanner_admin_database client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| 36.393782 | 101 | 0.626851 |
87fd7efa7685def2542bbeb325037ab9d8ac1b65 | 5,419 | py | Python | train.py | camall3n/pytorch_sac | 36cfaae3fdca91f1c80a41dfd7e805051f9f4c9f | [
"MIT"
] | null | null | null | train.py | camall3n/pytorch_sac | 36cfaae3fdca91f1c80a41dfd7e805051f9f4c9f | [
"MIT"
] | null | null | null | train.py | camall3n/pytorch_sac | 36cfaae3fdca91f1c80a41dfd7e805051f9f4c9f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
import os
import sys
import time
from tqdm import tqdm
import pickle as pkl
from video import VideoRecorder
from logger import Logger
from replay_buffer import ReplayBuffer
import utils
import dmc2gym
import hydra
def make_env(cfg):
"""Helper function to create dm_control environment"""
if cfg.env == 'ball_in_cup_catch':
domain_name = 'ball_in_cup'
task_name = 'catch'
else:
domain_name = cfg.env.split('_')[0]
task_name = '_'.join(cfg.env.split('_')[1:])
env = dmc2gym.make(domain_name=domain_name,
task_name=task_name,
seed=cfg.seed,
visualize_reward=True)
env.seed(cfg.seed)
assert env.action_space.low.min() >= -1
assert env.action_space.high.max() <= 1
return env
class Workspace(object):
def __init__(self, cfg):
self.work_dir = os.getcwd()
print(f'workspace: {self.work_dir}')
self.cfg = cfg
self.logger = Logger(self.work_dir,
save_tb=cfg.log_save_tb,
log_frequency=cfg.log_frequency,
agent=cfg.agent.name)
utils.set_seed_everywhere(cfg.seed)
self.device = torch.device(cfg.device)
self.env = utils.make_env(cfg)
cfg.agent.params.obs_dim = self.env.observation_space.shape[0]
cfg.agent.params.action_dim = self.env.action_space.shape[0]
cfg.agent.params.action_range = [
float(self.env.action_space.low.min()),
float(self.env.action_space.high.max())
]
self.agent = hydra.utils.instantiate(cfg.agent)
self.replay_buffer = ReplayBuffer(self.env.observation_space.shape,
self.env.action_space.shape,
int(cfg.replay_buffer_capacity),
self.device)
self.video_recorder = VideoRecorder(
self.work_dir if cfg.save_video else None)
self.step = 0
def evaluate(self):
average_episode_reward = 0
for episode in range(self.cfg.num_eval_episodes):
obs = self.env.reset()
self.agent.reset()
self.video_recorder.init(enabled=(episode == 0))
done = False
episode_reward = 0
while not done:
with utils.eval_mode(self.agent):
action = self.agent.act(obs, sample=False)
obs, reward, done, _ = self.env.step(action)
self.video_recorder.record(self.env)
episode_reward += reward
average_episode_reward += episode_reward
self.video_recorder.save(f'{self.step}.mp4')
average_episode_reward /= self.cfg.num_eval_episodes
self.logger.log('eval/episode_reward', average_episode_reward,
self.step)
self.logger.dump(self.step)
def run(self):
episode, episode_reward, done = 0, 0, True
start_time = time.time()
pbar = tqdm(total=self.cfg.num_train_steps)
while self.step < self.cfg.num_train_steps:
if done:
if self.step > 0:
self.logger.log('train/duration',
time.time() - start_time, self.step)
start_time = time.time()
self.logger.dump(
self.step, save=(self.step > self.cfg.num_seed_steps))
# evaluate agent periodically
if self.step > 0 and self.step % self.cfg.eval_frequency == 0:
self.logger.log('eval/episode', episode, self.step)
self.evaluate()
self.logger.log('train/episode_reward', episode_reward,
self.step)
obs = self.env.reset()
self.agent.reset()
done = False
episode_reward = 0
episode_step = 0
episode += 1
self.logger.log('train/episode', episode, self.step)
# sample action for data collection
if self.step < self.cfg.num_seed_steps:
action = self.env.action_space.sample()
else:
with utils.eval_mode(self.agent):
action = self.agent.act(obs, sample=True)
# run training update
if self.step >= self.cfg.num_seed_steps:
self.agent.update(self.replay_buffer, self.logger, self.step)
next_obs, reward, done, _ = self.env.step(action)
# allow infinite bootstrap
done = float(done)
done_no_max = 0 if episode_step + 1 == self.env._max_episode_steps else done
episode_reward += reward
self.replay_buffer.add(obs, action, reward, next_obs, done,
done_no_max)
obs = next_obs
episode_step += 1
self.step += 1
pbar.update(1)
pbar.close()
@hydra.main(config_path='config/train.yaml', strict=True)
def main(cfg):
workspace = Workspace(cfg)
workspace.run()
if __name__ == '__main__':
main()
| 32.842424 | 88 | 0.557667 |
d18a3430c062ac4bdb0a37ce7345445fbc6f654e | 223 | py | Python | MillisecondCounter/__init__.py | stuartornum/python-MillisecondCounter | 64f31cdd9fcae18ae5dabe4f0036d32f445b4e1a | [
"Apache-2.0"
] | null | null | null | MillisecondCounter/__init__.py | stuartornum/python-MillisecondCounter | 64f31cdd9fcae18ae5dabe4f0036d32f445b4e1a | [
"Apache-2.0"
] | null | null | null | MillisecondCounter/__init__.py | stuartornum/python-MillisecondCounter | 64f31cdd9fcae18ae5dabe4f0036d32f445b4e1a | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
class ExecutionCounter():
def __init__(self):
self.st = datetime.now()
def finish(self):
ft = datetime.now() - self.st
return int(ft.total_seconds() * 1000)
| 18.583333 | 45 | 0.623318 |
2fa35859a5bf93c7668ca8ca12beaee01e297ca1 | 14,702 | py | Python | tests/test_ucb.py | jaywonchung/mabwiser | 805326a99213f94a6e813530cebf0c8a0f96a2d1 | [
"Apache-2.0"
] | 60 | 2020-06-10T11:20:52.000Z | 2022-03-25T02:16:47.000Z | tests/test_ucb.py | jaywonchung/mabwiser | 805326a99213f94a6e813530cebf0c8a0f96a2d1 | [
"Apache-2.0"
] | 24 | 2020-06-04T18:40:21.000Z | 2022-03-24T16:49:51.000Z | tests/test_ucb.py | jaywonchung/mabwiser | 805326a99213f94a6e813530cebf0c8a0f96a2d1 | [
"Apache-2.0"
] | 12 | 2020-11-30T10:37:05.000Z | 2022-03-25T02:16:41.000Z | # -*- coding: utf-8 -*-
import datetime
import math
import numpy as np
import pandas as pd
from mabwiser.mab import LearningPolicy
from mabwiser.ucb import _UCB1
from tests.test_base import BaseTest
class UCBTest(BaseTest):
def test_alpha0(self):
arm, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=LearningPolicy.UCB1(alpha=0),
seed=123456,
num_run=3,
is_predict=True)
self.assertEqual(len(arm), 3)
self.assertEqual(arm, [3, 3, 3])
def test_alpha0_expectations(self):
arm, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=LearningPolicy.UCB1(alpha=0),
seed=123456,
num_run=1,
is_predict=False)
self.assertDictEqual(arm, {1: 0.0, 2: 0.0, 3: 1.0})
def test_alpha1(self):
arm, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=LearningPolicy.UCB1(alpha=1),
seed=123456,
num_run=3,
is_predict=True)
self.assertEqual(len(arm), 3)
self.assertEqual(arm, [1, 1, 1])
def test_alpha1_expectations(self):
arm, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=LearningPolicy.UCB1(alpha=1),
seed=123456,
num_run=1,
is_predict=False)
self.assertDictEqual(arm, {1: 1.5723073962832794, 2: 1.5174271293851465, 3: 1.5597051824376162})
def test_np(self):
arm, mab = self.predict(arms=[1, 2, 3],
decisions=np.asarray([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=np.asarray([0, 0, 1, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=LearningPolicy.UCB1(alpha=1),
seed=123456,
num_run=3,
is_predict=True)
self.assertEqual(len(arm), 3)
self.assertEqual(arm, [1, 1, 1])
def test_df(self):
df = pd.DataFrame({'decisions': [1, 1, 1, 2, 2, 3, 3, 3, 3, 3], 'rewards': [0, 0, 1, 0, 0, 0, 0, 1, 1, 1]})
arm, mab = self.predict(arms=[1, 2, 3],
decisions=df['decisions'],
rewards=df['rewards'],
learning_policy=LearningPolicy.UCB1(alpha=1),
seed=123456,
num_run=3,
is_predict=True)
self.assertEqual(len(arm), 3)
self.assertEqual(arm, [1, 1, 1])
def test_df_list(self):
df = pd.DataFrame({'decisions': [1, 1, 1, 2, 2, 3, 3, 3, 3, 3], 'rewards': [0, 0, 1, 0, 0, 0, 0, 1, 1, 1]})
arm, mab = self.predict(arms=[1, 2, 3],
decisions=df['decisions'],
rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=LearningPolicy.UCB1(alpha=1),
seed=123456,
num_run=3,
is_predict=True)
self.assertEqual(len(arm), 3)
self.assertEqual(arm, [1, 1, 1])
def test_ucb_t1(self):
arm, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 3, 2, 2, 3, 1, 3],
rewards=[0, 1, 1, 0, 1, 0, 1, 1, 1],
learning_policy=LearningPolicy.UCB1(alpha=0.24),
seed=123456,
num_run=4,
is_predict=True)
self.assertEqual(len(arm), 4)
self.assertEqual(arm, [1, 1, 1, 1])
def test_ucb_t2(self):
arm, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 3, 2, 2, 3, 1, 3],
rewards=[0, 1, 1, 0, 1, 0, 1, 1, 1],
learning_policy=LearningPolicy.UCB1(alpha=1.5),
seed=71,
num_run=4,
is_predict=True)
self.assertEqual(len(arm), 4)
self.assertEqual(arm, [2, 2, 2, 2])
def test_ucb_t3(self):
arm, mab = self.predict(arms=[1, 2, 4],
decisions=[1, 1, 4, 4, 2, 2, 1, 1, 4, 2, 1, 4, 1, 2, 4],
rewards=[7, 9, 10, 20, 2, 5, 8, 15, 17, 11, 0, 5, 2, 9, 3],
learning_policy=LearningPolicy.UCB1(alpha=1.25),
seed=123456,
num_run=4,
is_predict=True)
self.assertEqual(len(arm), 4)
self.assertEqual(arm, [4, 4, 4, 4])
def test_ucb_t4(self):
arm, mab = self.predict(arms=[1, 2, 4],
decisions=[1, 1, 4, 4, 2, 2, 1, 1, 4, 2, 1, 4, 1, 2, 4],
rewards=[7, 9, 10, 20, 2, 5, 8, 15, 17, 11, 0, 5, 2, 9, 3],
learning_policy=LearningPolicy.UCB1(alpha=2),
seed=23,
num_run=4,
is_predict=True)
self.assertEqual(len(arm), 4)
self.assertEqual(arm, [4, 4, 4, 4])
def test_ucb_t5(self):
arm, mab = self.predict(arms=['one', 'two', 'three'],
decisions=['one', 'one', 'one', 'three', 'two', 'two', 'three', 'one', 'three', 'two'],
rewards=[1, 0, 1, 0, 1, 0, 1, 1, 1, 0],
learning_policy=LearningPolicy.UCB1(alpha=1),
seed=23,
num_run=4,
is_predict=True)
self.assertEqual(len(arm), 4)
self.assertEqual(arm, ['three', 'three', 'three', 'three'])
def test_ucb_t6(self):
arm, mab = self.predict(arms=['one', 'two', 'three'],
decisions=['one', 'one', 'one', 'three', 'two', 'two', 'three', 'one', 'three', 'two'],
rewards=[2, 7, 7, 9, 1, 3, 1, 2, 6, 4],
learning_policy=LearningPolicy.UCB1(alpha=1.25),
seed=17,
num_run=4,
is_predict=True)
self.assertEqual(len(arm), 4)
self.assertEqual(arm, ['three', 'three', 'three', 'three'])
def test_ucb_t7(self):
arm, mab = self.predict(arms=['a', 'b', 'c'],
decisions=['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a'],
rewards=[-1.25, 12, 0.7, 10, 12, 9.2, -1, -10, 4, 0],
learning_policy=LearningPolicy.UCB1(alpha=1.25),
seed=123456,
num_run=4,
is_predict=True)
self.assertEqual(len(arm), 4)
self.assertEqual(arm, ['b', 'b', 'b', 'b'])
def test_ucb_t8(self):
arm, mab = self.predict(arms=['a', 'b', 'c'],
decisions=['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a'],
rewards=[-1.25, 0.7, 12, 10, 12, 9.2, -1, -10, 4, 0],
learning_policy=LearningPolicy.UCB1(alpha=0.5),
seed=9,
num_run=4,
is_predict=True)
self.assertEqual(len(arm), 4)
self.assertEqual(arm, ['c', 'c', 'c', 'c'])
def test_ucb_t9(self):
# Dates to test
a = datetime.datetime(2018, 1, 1)
b = datetime.datetime(2017, 7, 31)
c = datetime.datetime(2018, 9, 15)
arm, mab = self.predict(arms=[a, b, c],
decisions=[a, b, c, a, b, c, a, b, c, a],
rewards=[1.25, 0.7, 12, 10, 1.43, 0.2, -1, -10, 4, 0],
learning_policy=LearningPolicy.UCB1(alpha=0.25),
seed=123456,
num_run=4,
is_predict=True)
self.assertEqual(len(arm), 4)
self.assertEqual(arm, [c, c, c, c])
def test_ucb_t10(self):
# Dates to test
a = datetime.datetime(2018, 1, 1)
b = datetime.datetime(2017, 7, 31)
c = datetime.datetime(2018, 9, 15)
arm, mab = self.predict(arms=[a, b, c],
decisions=[a, b, c, a, b, c, a, b, c, a, b, b],
rewards=[7, 12, 1, -10, 5, 1, 2, 9, 3, 3, 6, 7],
learning_policy=LearningPolicy.UCB1(alpha=1),
seed=7,
num_run=4,
is_predict=True)
self.assertEqual(len(arm), 4)
self.assertEqual(arm, [b, b, b, b])
def test_unused_arm(self):
arm, mab = self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=LearningPolicy.UCB1(alpha=1),
seed=123456,
num_run=1,
is_predict=True)
self.assertTrue(len(mab._imp.arm_to_expectation), 4)
def test_fit_twice(self):
arm, mab = self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=LearningPolicy.UCB1(alpha=1),
seed=123456,
num_run=1,
is_predict=True)
self.assertTrue(len(mab._imp.arm_to_expectation), 4)
mean = mab._imp.arm_to_mean[1]
ci = mab._imp.arm_to_expectation[1]
self.assertAlmostEqual(0.3333333333333333, mean)
self.assertAlmostEqual(1.5723073962832794, ci)
mean1 = mab._imp.arm_to_mean[4]
ci1 = mab._imp.arm_to_expectation[4]
self.assertEqual(mean1, 0)
self.assertEqual(ci1, 0)
# Fit again
decisions2 = [1, 3, 4]
rewards2 = [0, 1, 1]
mab.fit(decisions2, rewards2)
mean2 = mab._imp.arm_to_mean[1]
ci2 = mab._imp.arm_to_expectation[1]
mean3 = mab._imp.arm_to_mean[4]
ci3 = mab._imp.arm_to_expectation[4]
self.assertEqual(mean2, 0)
self.assertAlmostEqual(0, mean2)
self.assertAlmostEqual(1.4823038073675112, ci2)
self.assertEqual(mean3, 1)
self.assertAlmostEqual(2.4823038073675114, ci3)
def test_partial_fit(self):
arm, mab = self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=LearningPolicy.UCB1(alpha=1),
seed=123456,
num_run=1,
is_predict=True)
self.assertTrue(len(mab._imp.arm_to_expectation), 4)
mean = mab._imp.arm_to_mean[1]
ci = mab._imp.arm_to_expectation[1]
mean1 = mab._imp.arm_to_mean[2]
ci1 = mab._imp.arm_to_expectation[2]
self.assertAlmostEqual(0.3333333333333333, mean)
self.assertAlmostEqual(1.5723073962832794, ci)
self.assertEqual(mean1, 0)
self.assertAlmostEqual(ci1, 1.5174271293851465)
mean2 = mab._imp.arm_to_mean[4]
ci2 = mab._imp.arm_to_expectation[4]
self.assertEqual(mean2, 0)
self.assertEqual(ci2, 0)
# Fit again
decisions2 = [1, 3, 4]
rewards2 = [0, 1, 1]
mab.partial_fit(decisions2, rewards2)
mean3 = mab._imp.arm_to_mean[1]
ci3 = mab._imp.arm_to_expectation[1]
mean4 = mab._imp.arm_to_mean[4]
ci4 = mab._imp.arm_to_expectation[4]
mean5 = mab._imp.arm_to_mean[2]
ci5 = mab._imp.arm_to_expectation[2]
self.assertEqual(mean3, 0.25)
self.assertAlmostEqual(1.3824639856219572, ci3)
self.assertEqual(mean4, 1)
self.assertAlmostEqual(3.2649279712439143, ci4)
self.assertEqual(mean5, 0)
self.assertAlmostEqual(ci5, 1.6015459273656616)
def test_add_arm(self):
arm, mab = self.predict(arms=[1, 2, 3],
decisions=[1, 2, 1, 1, 2],
rewards=[10, 4, 3, 5, 6],
learning_policy=LearningPolicy.UCB1(1.0),
seed=123456,
num_run=1,
is_predict=True)
mab.add_arm(4)
self.assertTrue(4 in mab.arms)
self.assertTrue(4 in mab._imp.arms)
self.assertTrue(mab._imp.arm_to_expectation[4] == 0)
self.assertTrue(mab._imp.arm_to_mean[4] == 0)
def test_confidence(self):
# parameters
mean = 20
arm_count = 150
total_count = 500
alpha = 1
cb = _UCB1._get_ucb(mean, alpha, total_count, arm_count)
self.assertAlmostEqual(cb, 20.287856633260894)
alpha = 0.25
cb = _UCB1._get_ucb(mean, alpha, total_count, arm_count)
self.assertAlmostEqual(cb, 20.07196415831522)
alpha = 3.33
cb = _UCB1._get_ucb(mean, alpha, total_count, arm_count)
self.assertAlmostEqual(cb, 20.95856258875877)
| 38.997347 | 119 | 0.435859 |
3710414b2496a8c7b66bb65415facd2cc7827920 | 1,804 | py | Python | porE/results/results_PSD/test_psd_adaptive_stepsize/convergence_MCsteps/results/eval.py | kaitrepte/porE | 1d11f7aca6aa0859f9363c42d8a2c4b3de0f2eec | [
"Apache-2.0"
] | 2 | 2020-08-06T09:35:50.000Z | 2021-04-12T14:45:45.000Z | porE/results/results_PSD/test_psd_adaptive_stepsize/convergence_MCsteps/results/eval.py | kaitrepte/porE | 1d11f7aca6aa0859f9363c42d8a2c4b3de0f2eec | [
"Apache-2.0"
] | 9 | 2020-08-04T16:14:21.000Z | 2020-12-19T10:16:37.000Z | porE/results/results_PSD/test_psd_adaptive_stepsize/convergence_MCsteps/results/eval.py | kaitrepte/porE | 1d11f7aca6aa0859f9363c42d8a2c4b3de0f2eec | [
"Apache-2.0"
] | 2 | 2019-09-27T21:53:49.000Z | 2021-05-07T09:17:39.000Z | import os
structures = ['do','vo','dc','vc','u6','u7','u8','m5','ir','h1','ho']
mc_steps = [250,500,750,1000,1250,1500,1750,2000,2250,2500,2750,3000,3500,4000,4500,5000,5500,6000,7000,8000,9000,10000,20000]
for i in range(len(structures)):
pores = []
distr = []
time = []
for j in range(len(mc_steps)):
try:
ffile = open(structures[i]+'_'+str(mc_steps[j])+'.out','r')
lines = ffile.readlines()
ffile.close()
tmp_pores = []
tmp_distr = []
for b in range(len(lines)):
splitt = lines[b].split()
if len(splitt) > 1:
if splitt[-1] == '(fractional)':
k = 0
l = 0
while k == 0:
l = l + 1
splitt2 = lines[b+l].split()
if len(splitt2) == 0:
k = 1
else:
tmp_pores.append(float(splitt2[0]))
tmp_distr.append(float(splitt2[1]))
if splitt[0] == 'Total':
time.append(float(splitt[3]))
pores.append(tmp_pores)
distr.append(tmp_distr)
except FileNotFoundError:
continue
# if any pores were found
print('Structure '+structures[i])
if pores != []:
for t in range(len(pores)):
super_string = ''
for ff in range(len(pores[t])):
super_string = super_string+' '+str('%10.4f' % pores[t][ff])+'('+str('%4.1f' % distr[t][ff])+')'
print(str('%5.0i' % mc_steps[t])+' MC steps\t time = '+str('%10.4f' % time[t])+'\t'+super_string)
| 34.037736 | 128 | 0.432927 |
2ecfcaa3b229fa46474c864f7a29ee3707f88941 | 32,351 | py | Python | parameters_input.py | gianmarco-lorenti/RECOpt | c7916861db033f9d917d05094102194202e3bb09 | [
"MIT"
] | 8 | 2021-03-08T09:30:16.000Z | 2022-02-18T19:40:41.000Z | parameters_input.py | gianmarco-lorenti/RECOpt | c7916861db033f9d917d05094102194202e3bb09 | [
"MIT"
] | null | null | null | parameters_input.py | gianmarco-lorenti/RECOpt | c7916861db033f9d917d05094102194202e3bb09 | [
"MIT"
] | 1 | 2021-05-24T13:21:36.000Z | 2021-05-24T13:21:36.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 10 16:09:48 2020
@author: giamm
"""
import numpy as np
import csv
from pathlib import Path
from tabulate import tabulate
import datareader
from levenshtein_distance import Leven_dist_comparison
##############################################################################
# This file is used to let the user enter the input parameters from keyboard.
# When a value is not given by the user, a default value is assigned.
##############################################################################
# The base path is saved in the variable basepath, it is used to move among
# directories to find the files that need to be read.
basepath = Path(__file__).parent
# A /Parameters folder is created in order to store the parameters as .csv files
dirname = 'Parameters'
try: Path.mkdir(basepath / dirname)
except Exception: pass
## Parameters
# Simulation parameters that can be changed
# n_hh #number of households (-)
# n_people_avg #average number of members for each household (-)
# ftg_avg #average footage of each household (m2)
# location #geographical location: 'north' | 'centre' | 'south'
# power_max #maximum power available from the grid (contractual power) (W)
# en_class #energetic class of the appiances: 'A+++' | 'A++' | 'A+' | 'A' | 'B' | 'C' | 'D'
# toll #tolerance on the displacement of the appliance's daily time-on, i.e. duration (%)
# devsta #standard deviation of the appliance's daily time-on, i.e. duration (min)
# dt_aggr #aggregated data timestep (min) 15 | 30 | 60
# q_max #quantile for the maximum instantaneous load profile (%)
# q_med #quantile for the medium instantaneous load profile (%)
# q_min #quantile for the minimum instantaneous load profile (%)
# time_scale #time-scale for plotting: 'min' | 'h'
# power_scale #power-scale for plotting: 'W' | 'kW' | 'MW'
# energy_scale #energy-scale for plotting: 'kWh' | 'MWh'
## Simulation setup
# Parameters for the simulation setup that can be changed (both for PV and battery)
# sim_type #type of simulation: 'fixed' size | 'parametric'
# size #fixed size of the system (active if sim_type == 'fixed') (kW)
# size_min #minimum size of the system (active if sim_type == 'parametric') (kW)
# size_max #maximum size of the system (active if sim_type == 'parametric') (kW)
################################################################################################################################################################################
## Creating a method to change the parameters entering their values from keyboard
# All the parameters that can be changed are declared as keys of param_dict, while
# the value for each key contains the type (int, float, str) of the parameter, its
# defualt value and its boundaries/possible values and its unit of measure (the latter
# is just to be written in the .csv file where the parameters will be saved).
# The current values of the parameters are read from a .csv file that has been
# created previously and given as values in a dictionary. If the file does not exist yet,
# default values are applied.
# The user is then asked if there is any change desired in the parameters' values.
def parameters_input():
'''
The method creates a dictionary where the parameters (updated to user's input) for the simulation are stored.
The updated parameters will be saved in a .csv file.
'''
# Creating a dictionary that contains all the parameters, their type, default values, etc.
param_dict = {
'n_hh': {'type': int, 'default_val': 2, 'min_val': 1, 'max_val': 10000, 'uom': '(units)'},
# 'toll': {'type': int, 'default_val': 15., 'min_val': 0., 'max_val': 100, 'uom': '(min)'},
# 'devsta': {'type': int, 'default_val': 2, 'min_val': 1, 'max_val': 100, 'uom': '(min)'},
# 'q_max': {'type': int, 'default_val': 85, 'min_val': 1, 'max_val': 100, 'uom': '(%)'},
# 'q_med': {'type': int, 'default_val': 50, 'min_val': 1, 'max_val': 100, 'uom': '(%)'},
# 'q_min': {'type': int, 'default_val': 15, 'min_val': 1, 'max_val': 100, 'uom': '(%)'},
# 'n_people_avg': {'type': float, 'default_val': 2.7, 'min_val': 1., 'max_val': 10., 'uom': '(people/household)'},
'location': {'type': str, 'default_val': 'north', 'possible_values': ['north', 'south', 'centre'], 'uom': '(/)'},
'power_max': {'type': float, 'default_val': 3., 'min_val': 1., 'max_val': 10., 'uom': '(kW)'},
'en_class': {'type': str, 'default_val': 'A+', 'possible_values': ['A+++', 'A++', 'A+', 'A', 'B', 'C', 'D'], 'uom': '(/)'},
'ftg_avg': {'type': float, 'default_val': 100., 'min_val': 10., 'max_val': 1000., 'uom': '(m2)'},
'dt_aggr':{'type': int, 'default_val': 60, 'possible_values': [15, 30, 60], 'uom': '(min)'},
# 'time_scale': {'type': str, 'default_val': 'h', 'possible_values': ['min', 'h'], 'uom': '(/)'},
# 'power_scale': {'type': str, 'default_val': 'kW', 'possible_values': ['W', 'kW', 'MW'], 'uom': '(/)'},
# 'energy_scale': {'type': str, 'default_val': 'MWh', 'possible_values': ['kWh', 'MWh'], 'uom': '(/)'},
}
# Provididng parameters' description
param_dict['n_hh']['description'] = 'number of households'
# param_dict['toll']['description'] = 'tollerance on appliances\' duration'
# param_dict['devsta']['description'] = 'standard deviation on appliances\' duration'
# param_dict['q_max']['description'] = 'quantile for the maximum instantaneous load profile'
# param_dict['q_med']['description'] = 'quantile for the medium instantaneous load profile'
# param_dict['q_min']['description'] = 'quantile for the minimum instantaneous load profile'
param_dict['dt_aggr']['description'] = 'time-step for the aggregation'
# param_dict['n_people_avg']['description'] = 'average number of people per household'
param_dict['ftg_avg']['description'] = 'average footage of the households'
param_dict['power_max']['description'] = 'maximum (contractual) power'
param_dict['location']['description'] = 'location (north - centre - south)'
param_dict['en_class']['description'] = 'energy class of the appliances (A+++ - D)'
# param_dict['time_scale']['description'] = 'time-scale for plotting'
# param_dict['power_scale']['description'] = 'power-scale for plotting'
# param_dict['energy_scale']['description'] = 'energy-scale for plotting'
# Creating a list that contains all the parameters names (usefull for the inputs from keyboard)
param_list = list(param_dict.keys())
# Creating a list of possible commands that will stop the execution of the code
stop_commands = ['', 'stop', 'done', 'no', 'none']
# The current values for the parameters a read from the file parameters.csv. If it does not exist yet
# default values are assigned to the parameters
params = datareader.read_param('parameters', ';', dirname)
if not bool(params):
for param in param_dict: params[param] = param_dict[param]['default_val']
# Printing the current values for the parameters
message = '\nThe parameters for the simulation are currently set as follows\n'
print(message)
tab = []
for param in params:
row = [param, params[param], param_dict[param]['uom'].strip('() '), param_dict[param]['description']]
tab.append(row)
print(tabulate(tab, headers=['Parameter', 'Value', 'Unit of measure', 'Description']))
# Starting the input of new values
message = '\nWould you like to change any parameter?\nPress \'enter\' to avoid or\nEnter \'ok\' to start changing: '
start_flag = input(message).strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_')
if start_flag in stop_commands: message = '\nNo parameter will be changed.\n'
else: message = '\nUpper/lower cases, underscores, quotation marks can be disregarded.\nPress \'enter\' to stop at any time.'
print(message)
# Starting the procedure for updating the values of the parameters
while start_flag not in stop_commands:
# Asking for a command-line input in order to change a parameter's value
param_change = input('\nTo change a parameter write the whole expression (ex. n hh = 100): ') \
.strip("\"',. ").lower().replace(' ', '_').replace('-', '_')
# Exiting the loop if a "stop-command" is given
if param_change in stop_commands: break
# Finding the equality sign in the expression entered by the user, in order to
# divide the parameter's name from the value
index = param_change.find('=')
param_name = param_change[:index].strip("=\"',. _").lower().replace(' ', '_').replace('-', '_')
param_val = param_change[index + 1:].strip("=\"',. _ ").lower().replace(' ', '_').replace('-', '_')
# Assessing if the parameter's name entered by the user is in the parameter's list;
# otherwise the Leven_dist_comparison method is used to suggest the closest match
if param_name not in param_list:
options = Leven_dist_comparison(param_name, param_list)
if len(options) == 1: message = 'Maybe you mean {} (rewrite the name to confirm): '.format(options[0])
else : message = 'No match found, please rewrite the parameter\'s name: '
param_name = input(message).strip("=\"',. ").lower().replace(' ', '_').replace('-', '_')
if param_name in stop_commands: break
if param_name in stop_commands: break
elif param_name not in param_list: print('Sorry no quick fix, try again please.'); continue
# After identifying the parameter that is going to be changed, the value entered by the user
# is checked to be consistent with the possible values the parameter can assume
if param_dict[param_name]['type'] == int:
while True:
if param_val in stop_commands: param_val = param_dict[param_name]['default_val']; break
try: param_val = int(param_val)
except: param_val = input('Please, enter an integer value for {}: '.format(param_name)) \
.strip("=\"',. ").lower().replace(' ', '_').replace('-', '_'); continue
if 'possible_values' not in param_dict[param_name]:
low_lim = param_dict[param_name]['min_val']
up_lim = param_dict[param_name]['max_val']
if param_val >= low_lim and param_val <= up_lim: break
else: param_val = input('Please, enter an integer between {} and {}: '.format(low_lim, up_lim)) \
.strip("=\"',. ").lower().replace(' ', '_').replace('-', '_'); continue
else:
possible_values = param_dict[param_name]['possible_values']
if param_val in possible_values: break
else: param_val = input('Please, enter an integer in {}: '.format(possible_values)) \
.strip("=\"',. ").lower().replace(' ', '_').replace('-', '_'); continue
elif param_dict[param_name]['type'] == float:
low_lim = param_dict[param_name]['min_val']
up_lim = param_dict[param_name]['max_val']
while True:
if param_val in stop_commands: param_val = param_dict[param_name]['default_val']; break
try: param_val = float(param_val)
except: param_val = input('Please, enter a number: ') \
.strip("=\"',. ").lower().replace(' ', '_').replace('-', '_'); continue
if param_val >= low_lim and param_val <= up_lim: break
else: param_val = input('Please, enter a number between {} and {}: '.format(low_lim, up_lim)) \
.strip("=\"',. ").lower().replace(' ', '_').replace('-', '_'); continue
elif param_dict[param_name]['type'] == str:
possible_values = param_dict[param_name]['possible_values']
possible_values_low = []
for value in possible_values: possible_values_low.append(value.lower())
while True:
if param_val in stop_commands: param_val = param_dict[param_name]['default_val']; break
if param_val in possible_values_low: param_val = possible_values[possible_values_low.index(param_val)]; break
else: param_val = input('Please, choose between {}: '.format(possible_values)) \
.strip("=\"',. ").lower().replace(' ', '_').replace('-', '_'); continue
# Updating the parameter's value
params[param_name] = param_val
print('Done: {} changed to {} {}.'.format(param_name, param_val, param_dict[param_name]['uom'].strip('()')))
# Storing the parameters (updated) in a .csv file
filename = 'parameters.csv'
fpath = basepath / dirname
with open(fpath / filename , mode='w', newline='') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=';', quotechar="'", quoting=csv.QUOTE_NONNUMERIC)
csv_writer.writerow(['Name', 'Value', 'Unit of measure'])
for param in params:
csv_writer.writerow([param , params[param], param_dict[param]['uom']])
# Returning a dictionary with the updated values for the parameters
return(params)
################################################################################################################################################################################
## Creating a method to change the parameters entering their values from keyboard
# All the parameters that can be changed are declared as keys of param_dict, while
# the value for each key contains the type (int, float, str) of the parameter, its
# defualt value and its boundaries/possible values and its unit of measure (the latter
# is just to be written in the .csv file where the parameters will be saved).
# The current values of the parameters are read from a .csv file that has been
# created previously and given as values in a dictionary. If the file does not exist yet,
# default values are applied.
# The user is then asked if there is any change desired in the parameters' values.
def simulation_setup(tech):
'''
The method creates a dictionary where the simulation setup (updated to user's input) is stored.
The updated parameters will be saved in a .csv file.
Input:
tech - str, the technology about which the simulation setup can changed
Output:
save_params - dict, containing the updated parameters for the simulation setup
size_range - list, containing the range of sizes to be explored
'''
## Parameters that can be changed
# Creating a dictionary that contains all the parameters, their type, default values, etc.
param_dict = {
'sim_type': {'type': str, 'default_val': 'fixed', 'possible_values': ['fixed', 'parametric'], 'uom': '(/)'},
'size': {'type': float, 'default_val': 2, 'min_val': 0.5, 'max_val': 10000, 'uom': '(kW)'},
'size_min': {'type': float, 'default_val': 2, 'min_val': 0.5, 'max_val': 10000, 'uom': '(kW)'},
'size_max': {'type': float, 'default_val': 2., 'min_val': 0.5, 'max_val': 10000, 'uom': '(kW)'},
'n_sizes': {'type': int, 'default_val': 1, 'min_val': 1, 'max_val': 5, 'uom': '(/)'}
}
if tech.strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_') == 'battery':
param_dict = {
'sim_type': {'type': str, 'default_val': 'parametric', 'possible_values': ['fixed', 'parametric'], 'uom': '(/)'},
'size': {'type': float, 'default_val': 2, 'min_val': 0, 'max_val': 10000, 'uom': '(kWh)'},
'size_min': {'type': float, 'default_val': 1, 'min_val': 0, 'max_val': 10000, 'uom': '(kWh)'},
'size_max': {'type': float, 'default_val': 5., 'min_val': 0, 'max_val': 10000, 'uom': '(kWh)'},
'n_sizes': {'type': int, 'default_val': 5, 'min_val': 1, 'max_val': 5, 'uom': '(/)'}
}
# Adding a description to each parameter
param_dict['sim_type']['description'] = 'Type of simulation for {}: \'fixed\' size or \'parametric\''.format(tech)
param_dict['size']['description'] = 'Fixed size for {}'.format(tech)
param_dict['size_min']['description'] = 'Minimum size of the {}'.format(tech)
param_dict['size_max']['description'] = 'Maximum size of the {}'.format(tech)
param_dict['n_sizes']['description'] = 'Number of sizes of the {} to be evaluated'.format(tech)
# The current values for the parameters a read from the file parameters.csv. If it does not exist yet
# default values are assigned to the parameters
params = datareader.read_param('{}_simulation_setup'.format(tech.strip("',.=\"_ ").lower().replace(' ', '_')), ';', dirname)
if not bool(params):
for param in param_dict: params[param] = param_dict[param]['default_val']
# Printing the current values for the parameters
message = '\nThe simulation for the {} is currently set as follows\n'.format(tech)
print(message)
# The parameters that are printed depend on the type of simulation
tab = []
if params['sim_type'] == 'fixed':
param = 'sim_type'
tab.append([param, params[param], param_dict[param]['uom'].strip('() '), param_dict[param]['description']])
param = 'size'
tab.append([param, params[param], param_dict[param]['uom'].strip('() '), param_dict[param]['description']])
elif params['sim_type'] == 'parametric':
param = 'sim_type'
tab.append([param, params[param], param_dict[param]['uom'].strip('() '), param_dict[param]['description']])
param = 'size_min'
tab.append([param, params[param], param_dict[param]['uom'].strip('() '), param_dict[param]['description']])
param = 'size_max'
tab.append([param, params[param], param_dict[param]['uom'].strip('() '), param_dict[param]['description']])
param = 'n_sizes'
tab.append([param, params[param], param_dict[param]['uom'].strip('() '), param_dict[param]['description']])
print(tabulate(tab, headers=['Parameter', 'Value', 'Unit of measure', 'Description']))
## Paramter's update from keyboard
# Creating a list of possible commands that will stop the execution of the code
stop_commands = ['', 'stop', 'done', 'no', 'none']
# Creating a list of the paramters contained in params
param_list = list(params.keys())
# Starting the input of new values
message = '\nWould you like to change any parameter?\nPress \'enter\' to avoid or\nEnter \'ok\' to start changing: '
start_flag = input(message).strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_')
if start_flag in stop_commands: message = '\nNo parameter will be changed.\n'
else: message = '\nUpper/lower cases, underscores, quotation marks can be disregarded.\nPress \'enter\' to stop at any time.'
print(message)
# Starting the procedure for updating the values of the parameters
while start_flag not in stop_commands:
# Asking for a command-line input in order to change a parameter's value
param_change = input('\nWrite the whole expression (ex. sim type = fixed): ').strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_')
# Exiting the loop if a "stop-command" is given
if param_change in stop_commands: break
# Finding the equality sign in the expression entered by the user, in order to
# divide the parameter's name from the value
index = param_change.find('=')
param_name = param_change[:index].strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_')
param_val = param_change[index + 1:].strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_')
# Assessing if the parameter's name entered by the user is in the parameter's list;
# otherwise the Leven_dist_comparison method is used to suggest the closest match
if param_name not in param_list:
options = Leven_dist_comparison(param_name, param_list)
if len(options) == 1: message = 'Maybe you mean {} (rewrite the name to confirm): '.format(options[0])
else : message = 'No match found, please rewrite the parameter\'s name: '
param_name = input(message).strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_')
if param_name in stop_commands: break
# If a break command is given, the outer loop still has not been broken
if param_name in stop_commands: break
# If the parameter name is still not in the list, a new iteration is started
elif param_name not in param_list: print('Sorry no quick fix, try again please.'); continue
## Type of simulation
# If the the parameter that is going to be changed is the simulation type, also the fixed size/ size range boundaries
# must be updated, therefore a proper implementation is needed
if param_name == 'sim_type':
possible_values = param_dict[param_name]['possible_values']
possible_values = [value.strip("=\"',. ").lower().replace(' ', '_').replace('-', '_') for value in possible_values]
default_value = param_dict[param_name]['default_val']
# if param_val in stop_commands: param_val = default_value
# If the updated value is not in the possible values, the user is aske dto re-write it, after giving a
# suggestion. If the value is still not in the possible values, the dafult value is applied
if param_val not in possible_values:
options = Leven_dist_comparison(param_val, possible_values)
if len(options) == 1: message = 'Maybe you mean \'{}\' (rewrite the name to confirm): '.format(options[0])
else : message = 'Please, rewrite the type of simulation you want to perform: '
param_val = input(message).strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_')
if param_val in stop_commands: break
elif param_val not in possible_values: print('Sorry no quick fix, default value will be assigned.'); param_val = default_value
# Updating the value for the type of simulation
params[param_name] = param_val
print('Done: {} changed to {}.'.format(param_name, param_val))
# As already mentioned, if one wants to change the simulation type, also the size/range boundaries must be changed
message = '\nIf you want to change the simulation type, you also need to change the size/size-range boundaries'
print(message)
## Sizes range boundaries
sim_type = param_val
# For fixed size type, minimum and maximum size are the same
if sim_type == 'fixed':
message = 'Enter the size of the {} {}: '.format(tech, param_dict['size']['uom'])
size = input(message)
try: default_value = params['size']
except: default_value = param_dict['size']['default_val']
low_lim = param_dict['size']['min_val']
up_lim = param_dict['size']['max_val']
while True:
size = size.strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_')
if size in stop_commands: size = default_value; break
try: size = float(size)
except: size = input('Please, enter a number: '); continue
if size >= low_lim and size <= up_lim: break
else: size = input('Please, enter a number between {} and {}: '.format(low_lim, up_lim)); continue
params['size'] = size
# For parametric type, both minimum and maximum size are to be specified
if sim_type == 'parametric':
# Minimum size
message = 'Enter the minimum size of the {} {}: '.format(tech, param_dict['size_min']['uom'])
size_min = input(message)
try: default_value = params['size_min']
except: default_value = param_dict['size_min']['default_val']
low_lim = param_dict['size_min']['min_val']
up_lim = param_dict['size_min']['max_val']
while True:
size_min = size_min.strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_')
if size_min in stop_commands: size_min = default_value; break
try: size_min = float(size_min)
except: size_min = input('Please, enter a number: '); continue
if size_min >= low_lim and size_min <= up_lim: break
else: size_min = input('Please, enter a number between {} and {}: '.format(low_lim, up_lim)); continue
# Maximum size
message = 'Enter the maximum size of the {} {}: '.format(tech, param_dict['size_max']['uom'])
size_max = input(message)
try: default_value = params['size_max']
except: default_value = param_dict['size_max']['default_val']
low_lim = param_dict['size_max']['min_val']
up_lim = param_dict['size_max']['max_val']
while True:
size_max = size_max.strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_')
if size_max in stop_commands: size_max = default_value; break
try: size_max = float(size_max)
except: size_max = input('Please, enter a number: '); continue
if size_max >= low_lim and size_max <= up_lim: break
else: size_max = input('Please, enter a number between {} and {}: '.format(low_lim, up_lim)); continue
# Making sure size_min is smaller than size_max
if size_min > size_max: size_min, size_max = size_max, size_min
# Number of sizes
message = 'Enter the number of sizes of the {} to be evaluated: '.format(tech)
n_sizes = input(message).strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_')
try: default_value = params['n_sizes']
except: default_value = param_dict['n_sizes']['default_val']
low_lim = param_dict['n_sizes']['min_val']
up_lim = param_dict['n_sizes']['max_val']
while True:
if n_sizes in stop_commands: n_sizes = default_value; break
try: n_sizes = int(n_sizes)
except: n_sizes = input('Please, enter an integer: ') \
.strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_'); continue
if n_sizes >= low_lim and n_sizes <= up_lim: break
else: n_sizes = input('I am good but I can be slow! Please enter an integer between {} and {}: '.format(low_lim, up_lim)) \
.strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_'); continue
# Storing the updated parameters
params['size_min'] = size_min
params['size_max'] = size_max
params['n_sizes'] = n_sizes
# If the simulation type has been changed and the size/range boundaries have been changed as well there is
# nothing left to change, so the loop can be broken
break
# If the parameter to be changed was not the simulation type, the usual procedure can be followed
# to check is the specified value is consisent with the type of the paramter and the minimum/maximum values
else:
low_lim = param_dict[param_name]['min_val']
up_lim = param_dict[param_name]['max_val']
while True:
param_val = param_val
if param_val in stop_commands: param_val = param_dict[param_name]['default_val']; break
try: param_val = float(param_val)
except: param_val = input('Please, enter a number: ') \
.strip("',.=\"_ ").lower().replace(' ', '_').replace('-', '_'); continue
if param_val >= low_lim and param_val <= up_lim: break
else: param_val = input('Please, enter a number between {} and {}: '.format(low_lim, up_lim)) \
.strip("',.=\"_ ").lower().replace('-', '_'); continue
params[param_name] = param_val
print('Done: {} changed to {} {}.'.format(param_name, param_val, param_dict[param_name]['uom'].strip('()')))
## Storing the updated parameters
# Only the ones of interest are stored
if params['sim_type'] == 'parametric':
if params['size_min'] == params['size_max'] or params['n_sizes'] == 1:
save_params = {
'sim_type': 'fixed',
'size': params['size_min']
}
else:
save_params = {
'sim_type': params['sim_type'],
'size_min': params['size_min'],
'size_max': params['size_max'],
'n_sizes': int(params['n_sizes']),
}
elif params['sim_type'] == 'fixed':
save_params = {
'sim_type': params['sim_type'],
'size': params['size'],
}
# Storing the parameters (updated) in a .csv file
filename = '{}_simulation_setup.csv'.format(tech.strip("',.=\"_ ").lower().replace(' ', '_'))
fpath = basepath / dirname
with open(fpath / filename , mode='w', newline='') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=';', quotechar="'", quoting=csv.QUOTE_NONNUMERIC)
csv_writer.writerow(['Name', 'Value'])
for param in save_params:
csv_writer.writerow([param , save_params[param]])
## Creating the sizes range
# Once that the simulation setup has been defined, the range for the size can be built and returned to main
# Updating the simulation type to the user's input
sim_type = save_params['sim_type']
# Updating the range's boundaries to the user's input
if sim_type == 'fixed': size_min, size_max, n_sizes = save_params['size'], save_params['size'], 1
if sim_type == 'parametric': size_min, size_max, n_sizes = save_params['size_min'], save_params['size_max'], save_params['n_sizes']
# The boundaries are rounded up to .5 precision
size_min = int(size_min*2)/2
size_max = int(size_max*2)/2
n_sizes = int(n_sizes)
# The length of the range is evaluated
size_range_length = max(size_max - size_min, 0.5)
# Making sure that the difference in size is at least of 0.5 (in case this does not happen, the number of sizes is decreased)
n_sizes = min(n_sizes, int(2*size_range_length + 1))
# Creating the size range and making sure that all sizes are rounded up to .5
size_range = np.linspace(size_min, size_max, n_sizes)
size_range = [int(size*2)/2 for size in size_range]
# # The length of the range is evaluated
# size_range_length = size_max - size_min
# # The step for the size is chosen depending on the length of the range
# if size_range_length <= 2.5: d_size = 0.5
# elif size_range_length > 2.5 and size_range_length <=5: d_size = 1
# elif size_range_length > 5 and size_range_length <= 10: d_size = 2
# else: d_size = int(size_range_length/5)
# # The range is created
# size_range = np.arange(size_min, size_max + d_size, d_size)
# if size_range[-1] != size_max: size_range[-1] = size_max
return(save_params, list(size_range))
| 50.786499 | 176 | 0.593583 |
bb57c52f8356f0aa48f610dadb03e8ba80d66016 | 12,430 | py | Python | test/python/transpiler/test_optimize_1q_gates.py | romainfd/qiskit-terra | b5285ccc5cb1d17b7c73402833f2750b93652426 | [
"Apache-2.0"
] | 2 | 2020-12-26T21:12:30.000Z | 2021-05-18T12:53:42.000Z | test/python/transpiler/test_optimize_1q_gates.py | romainfd/qiskit-terra | b5285ccc5cb1d17b7c73402833f2750b93652426 | [
"Apache-2.0"
] | 1 | 2020-03-29T19:57:14.000Z | 2020-03-29T21:49:25.000Z | test/python/transpiler/test_optimize_1q_gates.py | romainfd/qiskit-terra | b5285ccc5cb1d17b7c73402833f2750b93652426 | [
"Apache-2.0"
] | 1 | 2020-07-13T17:56:46.000Z | 2020-07-13T17:56:46.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the optimize-1q-gate pass"""
import unittest
import numpy as np
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Optimize1qGates, Unroller
from qiskit.converters import circuit_to_dag
from qiskit.test import QiskitTestCase
from qiskit.circuit import Parameter
from qiskit.transpiler.exceptions import TranspilerError
class TestOptimize1qGates(QiskitTestCase):
"""Test for 1q gate optimizations. """
def test_dont_optimize_id(self):
"""Identity gates are like 'wait' commands.
They should never be optimized (even without barriers).
See: https://github.com/Qiskit/qiskit-terra/issues/2373
"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.i(qr)
circuit.i(qr)
dag = circuit_to_dag(circuit)
pass_ = Optimize1qGates()
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_optimize_h_gates_pass_manager(self):
"""Transpile: qr:--[H]-[H]-[H]-- == qr:--[u2]-- """
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
circuit.h(qr[0])
circuit.h(qr[0])
expected = QuantumCircuit(qr)
expected.u2(0, np.pi, qr[0])
passmanager = PassManager()
passmanager.append(Unroller(['u2']))
passmanager.append(Optimize1qGates())
result = passmanager.run(circuit)
self.assertEqual(expected, result)
def test_optimize_1q_gates_collapse_identity_equivalent(self):
"""test optimize_1q_gates removes u1(2*pi) rotations.
See: https://github.com/Qiskit/qiskit-terra/issues/159
"""
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.cx(qr[1], qr[0])
qc.u1(2 * np.pi, qr[0])
qc.cx(qr[1], qr[0])
qc.u1(np.pi / 2, qr[0]) # these three should combine
qc.u1(np.pi, qr[0]) # to identity then
qc.u1(np.pi / 2, qr[0]) # optimized away.
qc.cx(qr[1], qr[0])
qc.u1(np.pi, qr[1])
qc.u1(np.pi, qr[1])
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
dag = circuit_to_dag(qc)
simplified_dag = Optimize1qGates().run(dag)
num_u1_gates_remaining = len(simplified_dag.named_nodes('u1'))
self.assertEqual(num_u1_gates_remaining, 0)
def test_ignores_conditional_rotations(self):
"""Conditional rotations should not be considered in the chain.
qr0:--[U1]-[U1]-[U1]-[U1]- qr0:--[U1]-[U1]-
|| || || ||
cr0:===.================== == cr0:===.====.===
|| ||
cr1:========.============= cr1:========.===
"""
qr = QuantumRegister(1, 'qr')
cr = ClassicalRegister(2, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.u1(0.1, qr).c_if(cr, 1)
circuit.u1(0.2, qr).c_if(cr, 3)
circuit.u1(0.3, qr)
circuit.u1(0.4, qr)
dag = circuit_to_dag(circuit)
expected = QuantumCircuit(qr, cr)
expected.u1(0.1, qr).c_if(cr, 1)
expected.u1(0.2, qr).c_if(cr, 3)
expected.u1(0.7, qr)
pass_ = Optimize1qGates()
after = pass_.run(dag)
self.assertEqual(circuit_to_dag(expected), after)
def test_in_the_back(self):
"""Optimizations can be in the back of the circuit.
See https://github.com/Qiskit/qiskit-terra/issues/2004.
qr0:--[U1]-[U1]-[H]-- qr0:--[U1]-[H]--
"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.u1(0.3, qr)
circuit.u1(0.4, qr)
circuit.h(qr)
dag = circuit_to_dag(circuit)
expected = QuantumCircuit(qr)
expected.u1(0.7, qr)
expected.h(qr)
pass_ = Optimize1qGates()
after = pass_.run(dag)
self.assertEqual(circuit_to_dag(expected), after)
def test_single_parameterized_circuit(self):
"""Parameters should be treated as opaque gates."""
qr = QuantumRegister(1)
qc = QuantumCircuit(qr)
theta = Parameter('theta')
qc.u1(0.3, qr)
qc.u1(0.4, qr)
qc.u1(theta, qr)
qc.u1(0.1, qr)
qc.u1(0.2, qr)
dag = circuit_to_dag(qc)
expected = QuantumCircuit(qr)
expected.u1(0.7, qr)
expected.u1(theta, qr)
expected.u1(0.3, qr)
after = Optimize1qGates().run(dag)
self.assertEqual(circuit_to_dag(expected), after)
def test_parameterized_circuits(self):
"""Parameters should be treated as opaque gates."""
qr = QuantumRegister(1)
qc = QuantumCircuit(qr)
theta = Parameter('theta')
qc.u1(0.3, qr)
qc.u1(0.4, qr)
qc.u1(theta, qr)
qc.u1(0.1, qr)
qc.u1(0.2, qr)
qc.u1(theta, qr)
qc.u1(0.3, qr)
qc.u1(0.2, qr)
dag = circuit_to_dag(qc)
expected = QuantumCircuit(qr)
expected.u1(0.7, qr)
expected.u1(theta, qr)
expected.u1(0.3, qr)
expected.u1(theta, qr)
expected.u1(0.5, qr)
after = Optimize1qGates().run(dag)
self.assertEqual(circuit_to_dag(expected), after)
def test_parameterized_expressions_in_circuits(self):
"""Expressions of Parameters should be treated as opaque gates."""
qr = QuantumRegister(1)
qc = QuantumCircuit(qr)
theta = Parameter('theta')
phi = Parameter('phi')
sum_ = theta + phi
product_ = theta * phi
qc.u1(0.3, qr)
qc.u1(0.4, qr)
qc.u1(theta, qr)
qc.u1(phi, qr)
qc.u1(sum_, qr)
qc.u1(product_, qr)
qc.u1(0.3, qr)
qc.u1(0.2, qr)
dag = circuit_to_dag(qc)
expected = QuantumCircuit(qr)
expected.u1(0.7, qr)
expected.u1(theta, qr)
expected.u1(phi, qr)
expected.u1(sum_, qr)
expected.u1(product_, qr)
expected.u1(0.5, qr)
after = Optimize1qGates().run(dag)
self.assertEqual(circuit_to_dag(expected), after)
class TestOptimize1qGatesParamReduction(QiskitTestCase):
"""Test for 1q gate optimizations parameter reduction, reduce n in Un """
def test_optimize_u3_to_u2(self):
"""U3(pi/2, pi/3, pi/4) -> U2(pi/3, pi/4)"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.u3(np.pi / 2, np.pi / 3, np.pi / 4, qr[0])
expected = QuantumCircuit(qr)
expected.u2(np.pi / 3, np.pi / 4, qr[0])
passmanager = PassManager()
passmanager.append(Optimize1qGates())
result = passmanager.run(circuit)
self.assertEqual(expected, result)
def test_optimize_u3_to_u2_round(self):
"""U3(1.5707963267948961, 1.0471975511965971, 0.7853981633974489) -> U2(pi/3, pi/4)"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.u3(1.5707963267948961, 1.0471975511965971, 0.7853981633974489, qr[0])
expected = QuantumCircuit(qr)
expected.u2(np.pi / 3, np.pi / 4, qr[0])
passmanager = PassManager()
passmanager.append(Optimize1qGates())
result = passmanager.run(circuit)
self.assertEqual(expected, result)
def test_optimize_u3_to_u1(self):
"""U3(0, 0, pi/4) -> U1(pi/4)"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.u3(0, 0, np.pi / 4, qr[0])
expected = QuantumCircuit(qr)
expected.u1(np.pi / 4, qr[0])
passmanager = PassManager()
passmanager.append(Optimize1qGates())
result = passmanager.run(circuit)
self.assertEqual(expected, result)
def test_optimize_u3_to_u1_round(self):
"""U3(1e-16, 1e-16, pi/4) -> U1(pi/4)"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.u3(1e-16, 1e-16, np.pi / 4, qr[0])
expected = QuantumCircuit(qr)
expected.u1(np.pi / 4, qr[0])
passmanager = PassManager()
passmanager.append(Optimize1qGates())
result = passmanager.run(circuit)
self.assertEqual(expected, result)
class TestOptimize1qGatesBasis(QiskitTestCase):
"""Test for 1q gate optimizations parameter reduction with basis """
def test_optimize_u3_basis_u3(self):
"""U3(pi/2, pi/3, pi/4) (basis[u3]) -> U3(pi/2, pi/3, pi/4)"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.u3(np.pi / 2, np.pi / 3, np.pi / 4, qr[0])
passmanager = PassManager()
passmanager.append(Optimize1qGates(['u3']))
result = passmanager.run(circuit)
self.assertEqual(circuit, result)
def test_optimize_u3_basis_u2(self):
"""U3(pi/2, 0, pi/4) -> U2(0, pi/4)"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.u3(np.pi / 2, 0, np.pi / 4, qr[0])
expected = QuantumCircuit(qr)
expected.u2(0, np.pi / 4, qr[0])
passmanager = PassManager()
passmanager.append(Optimize1qGates(['u2']))
result = passmanager.run(circuit)
self.assertEqual(expected, result)
def test_optimize_u3_basis_u2_cx(self):
"""U3(pi/2, 0, pi/4) -> U2(0, pi/4). Basis [u2, cx]."""
qr = QuantumRegister(2, 'qr')
circuit = QuantumCircuit(qr)
circuit.u3(np.pi / 2, 0, np.pi / 4, qr[0])
circuit.cx(qr[0], qr[1])
expected = QuantumCircuit(qr)
expected.u2(0, np.pi / 4, qr[0])
expected.cx(qr[0], qr[1])
passmanager = PassManager()
passmanager.append(Optimize1qGates(['u2', 'cx']))
result = passmanager.run(circuit)
self.assertEqual(expected, result)
def test_optimize_u1_basis_u2_u3(self):
"""U1(pi/4) -> U3(0, 0, pi/4). Basis [u2, u3]."""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.u1(np.pi / 4, qr[0])
expected = QuantumCircuit(qr)
expected.u3(0, 0, np.pi / 4, qr[0])
passmanager = PassManager()
passmanager.append(Optimize1qGates(['u2', 'u3']))
result = passmanager.run(circuit)
self.assertEqual(expected, result)
def test_optimize_u1_basis_u2(self):
"""U1(pi/4) -> Raises. Basis [u2]"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.u1(np.pi / 4, qr[0])
expected = QuantumCircuit(qr)
expected.u3(0, 0, np.pi / 4, qr[0])
passmanager = PassManager()
passmanager.append(Optimize1qGates(['u2']))
with self.assertRaises(TranspilerError):
_ = passmanager.run(circuit)
def test_optimize_u3_basis_u2_u1(self):
"""U3(pi/2, 0, pi/4) -> U2(0, pi/4). Basis [u2, u1]."""
qr = QuantumRegister(2, 'qr')
circuit = QuantumCircuit(qr)
circuit.u3(np.pi / 2, 0, np.pi / 4, qr[0])
expected = QuantumCircuit(qr)
expected.u2(0, np.pi / 4, qr[0])
passmanager = PassManager()
passmanager.append(Optimize1qGates(['u2', 'u1']))
result = passmanager.run(circuit)
self.assertEqual(expected, result)
def test_optimize_u3_basis_u1(self):
"""U3(0, 0, pi/4) -> U1(pi/4). Basis [u1]."""
qr = QuantumRegister(2, 'qr')
circuit = QuantumCircuit(qr)
circuit.u3(0, 0, np.pi / 4, qr[0])
expected = QuantumCircuit(qr)
expected.u1(np.pi / 4, qr[0])
passmanager = PassManager()
passmanager.append(Optimize1qGates(['u1']))
result = passmanager.run(circuit)
self.assertEqual(expected, result)
if __name__ == '__main__':
unittest.main()
| 31.075 | 95 | 0.582623 |
776901d2ff6579e160c48bdd174c3f080f28f876 | 3,351 | py | Python | trdg/computer_text_generator.py | Cospel/TextRecognitionDataGenerator | 2339383f61a926945c06206ba5bb6358c0cb3568 | [
"MIT"
] | null | null | null | trdg/computer_text_generator.py | Cospel/TextRecognitionDataGenerator | 2339383f61a926945c06206ba5bb6358c0cb3568 | [
"MIT"
] | null | null | null | trdg/computer_text_generator.py | Cospel/TextRecognitionDataGenerator | 2339383f61a926945c06206ba5bb6358c0cb3568 | [
"MIT"
] | null | null | null | import random as rnd
from PIL import Image, ImageColor, ImageFont, ImageDraw, ImageFilter
def generate(text, font, text_color, font_size, orientation, space_width, fit, random_opacity, random_character_spacing):
if orientation == 0:
return _generate_horizontal_text(
text, font, text_color, font_size, space_width, fit, random_opacity, random_character_spacing
)
elif orientation == 1:
return _generate_vertical_text(
text, font, text_color, font_size, space_width, fit
)
else:
raise ValueError("Unknown orientation " + str(orientation))
def _generate_horizontal_text(text, font, text_color, font_size, space_width, fit, random_opacity, random_character_spacing):
try:
image_font = ImageFont.truetype(font=font, size=font_size)
except:
raise Exception("Unable to read font!")
words = text.split(" ")
space_width = image_font.getsize(" ")[0] * space_width
words_width = [image_font.getsize(w)[0] for w in words]
text_width = sum(words_width) + int(space_width) * (len(words) - 1)
text_height = max([image_font.getsize(w)[1] for w in words])
txt_img = Image.new("RGBA", (text_width, text_height), (0, 0, 0, 0))
txt_draw = ImageDraw.Draw(txt_img)
colors = [ImageColor.getrgb(c) for c in text_color.split(",")]
c1, c2 = colors[0], colors[-1]
alpha = rnd.randint(160, 255) if random_opacity else 255
fill = (
rnd.randint(min(c1[0], c2[0]), max(c1[0], c2[0])),
rnd.randint(min(c1[1], c2[1]), max(c1[1], c2[1])),
rnd.randint(min(c1[2], c2[2]), max(c1[2], c2[2])),
alpha,
)
for i, w in enumerate(words):
chars_width = [image_font.getsize(c)[0] for c in w]
characters_width = 0
for k, c in enumerate(w):
if random_character_spacing:
random_size = rnd.randint(0, int((image_font.getsize(c)[0]/100)*15)) if k !=0 else 0
else:
random_size = 0
txt_draw.text(
(((sum(words_width[0:i]) + i * int(space_width)) + (characters_width-random_size)), 0),
c,
fill=fill,
font=image_font,
)
characters_width += image_font.getsize(c)[0]
if fit:
return txt_img.crop(txt_img.getbbox())
else:
return txt_img
def _generate_vertical_text(text, font, text_color, font_size, space_width, fit):
image_font = ImageFont.truetype(font=font, size=font_size)
space_height = int(image_font.getsize(" ")[1] * space_width)
char_heights = [
image_font.getsize(c)[1] if c != " " else space_height for c in text
]
text_width = max([image_font.getsize(c)[0] for c in text])
text_height = sum(char_heights)
txt_img = Image.new("RGBA", (text_width, text_height), (0, 0, 0, 0))
txt_draw = ImageDraw.Draw(txt_img)
colors = [ImageColor.getrgb(c) for c in text_color.split(",")]
c1, c2 = colors[0], colors[-1]
fill = (
rnd.randint(c1[0], c2[0]),
rnd.randint(c1[1], c2[1]),
rnd.randint(c1[2], c2[2]),
)
for i, c in enumerate(text):
txt_draw.text((0, sum(char_heights[0:i])), c, fill=fill, font=image_font)
if fit:
return txt_img.crop(txt_img.getbbox())
else:
return txt_img
| 32.852941 | 125 | 0.614742 |
5d981dc23b4586235bffcb186f3a7b63ddeadc49 | 1,417 | py | Python | homeassistant/components/netatmo/const.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 5 | 2020-09-17T10:48:51.000Z | 2021-11-22T00:08:17.000Z | homeassistant/components/netatmo/const.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | homeassistant/components/netatmo/const.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 6 | 2019-12-01T19:06:52.000Z | 2020-09-17T00:57:06.000Z | """Constants used by the Netatmo component."""
from datetime import timedelta
API = "api"
DOMAIN = "netatmo"
MANUFACTURER = "Netatmo"
AUTH = "netatmo_auth"
CONF_PUBLIC = "public_sensor_config"
CAMERA_DATA = "netatmo_camera"
HOME_DATA = "netatmo_home_data"
OAUTH2_AUTHORIZE = "https://api.netatmo.com/oauth2/authorize"
OAUTH2_TOKEN = "https://api.netatmo.com/oauth2/token"
DATA_PERSONS = "netatmo_persons"
NETATMO_WEBHOOK_URL = None
DEFAULT_PERSON = "Unknown"
DEFAULT_DISCOVERY = True
DEFAULT_WEBHOOKS = False
EVENT_PERSON = "person"
EVENT_MOVEMENT = "movement"
EVENT_HUMAN = "human"
EVENT_ANIMAL = "animal"
EVENT_VEHICLE = "vehicle"
EVENT_BUS_PERSON = "netatmo_person"
EVENT_BUS_MOVEMENT = "netatmo_movement"
EVENT_BUS_HUMAN = "netatmo_human"
EVENT_BUS_ANIMAL = "netatmo_animal"
EVENT_BUS_VEHICLE = "netatmo_vehicle"
EVENT_BUS_OTHER = "netatmo_other"
ATTR_ID = "id"
ATTR_PSEUDO = "pseudo"
ATTR_NAME = "name"
ATTR_EVENT_TYPE = "event_type"
ATTR_MESSAGE = "message"
ATTR_CAMERA_ID = "camera_id"
ATTR_HOME_ID = "home_id"
ATTR_HOME_NAME = "home_name"
ATTR_PERSONS = "persons"
ATTR_IS_KNOWN = "is_known"
ATTR_FACE_URL = "face_url"
ATTR_SNAPSHOT_URL = "snapshot_url"
ATTR_VIGNETTE_URL = "vignette_url"
ATTR_SCHEDULE_ID = "schedule_id"
ATTR_SCHEDULE_NAME = "schedule_name"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
MIN_TIME_BETWEEN_EVENT_UPDATES = timedelta(seconds=5)
SERVICE_SETSCHEDULE = "set_schedule"
| 24.431034 | 61 | 0.790402 |
3187420f9565c71fc050b58ee88873ea7a1cff0c | 3,613 | py | Python | tests/integration/test_integration_user.py | UtrechtUniversity/SWORDS-UU | d9b45706566054541625ec363e41bdf97f58c6b1 | [
"MIT"
] | 1 | 2022-02-09T14:53:45.000Z | 2022-02-09T14:53:45.000Z | tests/integration/test_integration_user.py | UtrechtUniversity/SWORDS-UU | d9b45706566054541625ec363e41bdf97f58c6b1 | [
"MIT"
] | 28 | 2021-11-30T14:37:17.000Z | 2022-03-22T12:46:53.000Z | tests/integration/test_integration_user.py | UtrechtUniversity/SWORDS-UU | d9b45706566054541625ec363e41bdf97f58c6b1 | [
"MIT"
] | 1 | 2022-01-17T10:53:26.000Z | 2022-01-17T10:53:26.000Z | """
Tests for retrieval methods in user collection
"""
import pytest
from ghapi.all import GhApi
import os
import pandas as pd
import time
from collect_users.methods.github_search.github_search import get_complete_query_result, get_users_from_repos, get_users_from_users
from collect_users.scripts.enrich_users import read_input_file, get_userdata, update_users, Service
from collect_users.scripts.prepare_filtering import is_student
"""
Tests for github_search.py
"""
@pytest.fixture
def service():
return Service(api=GhApi(), sleep=10)
def test_search_topics(service):
topic_repos = get_complete_query_result(
f"topic:utrecht-university", "SEARCH_REPOS", service)
ids_topic_repos = get_users_from_repos(topic_repos, service)
time.sleep(5)
assert len(ids_topic_repos[0]
) == 3 and ids_topic_repos[0][0] == "github.com"
def test_search_users(service):
search_users = get_complete_query_result(
"utrecht university", "SEARCH_USERS", service)
ids_search_users = get_users_from_users(search_users, service)
time.sleep(5)
assert len(ids_search_users[0]
) == 3 and ids_search_users[0][0] == "github.com"
"""
Tests for user collection scripts
"""
@pytest.fixture
def path():
return os.path.dirname(__file__)
@pytest.fixture
def users_merged(path):
return read_input_file(os.path.join(path, "test_data/users_merged.csv"))
@pytest.fixture
def users_enriched(path):
return read_input_file(os.path.join(path, "test_data/users_enriched.xlsx"))
@pytest.fixture
def users_enriched_old(path):
return read_input_file(os.path.join(path, "test_data/users_enriched_summer2021.xlsx"))
def test_enrich_new_users(users_merged, service):
results_github_user_api = get_userdata(
users_merged.head(3)["user_id"], service)
results_github_user_api["login"] = results_github_user_api["login"].str.lower(
)
# key to merge is lowercase so this needs to be lowercase as well
df_users_enriched = users_merged.merge(results_github_user_api,
left_on="user_id",
right_on="login",
how="left")
df_users_enriched.drop(["login"], axis=1, inplace=True)
# is there at least one id that is not NaN? Then the script works as intended (accounts may be deleted over time)
assert df_users_enriched["id"].isnull().all() == False
def test_update_new_users(users_enriched_old, users_merged, service):
df_users_annotated = users_enriched_old
df_users = users_merged
df_users = df_users.drop_duplicates("user_id").reset_index(drop=True)
df_users["new_user"] = False
df_users.loc[~df_users["user_id"].isin(
df_users_annotated["user_id"].str.lower()), "new_user"] = True
df_users_update = pd.merge(df_users[df_users["new_user"]],
df_users_annotated,
left_on="user_id",
right_on="user_id",
how="left")
results_github_user_api = get_userdata(
df_users_update.head(2)["user_id"], service)
df_users_enriched = update_users(df_users_annotated,
results_github_user_api)
assert len(df_users_enriched) >= len(df_users_annotated)
def test_filter_users(users_enriched):
df_users_enriched = users_enriched
df_users_enriched["is_student"] = df_users_enriched['bio'].apply(
is_student)
assert df_users_enriched['is_student'].value_counts()[1] == 117
| 32.845455 | 131 | 0.687517 |
2a2980a8871f7e464ee083545cdbea51a6194b24 | 424 | py | Python | scripts/Hotword/assistant_direct.py | iamDyeus/KnickAI | c17d808c949cb3467031498e7252bd2095c04699 | [
"MIT"
] | 31 | 2021-11-08T18:42:17.000Z | 2022-03-25T07:45:46.000Z | scripts/Hotword/awake.py | iamDyeus/KnickAI | c17d808c949cb3467031498e7252bd2095c04699 | [
"MIT"
] | 6 | 2021-12-20T14:15:44.000Z | 2022-03-28T16:19:12.000Z | scripts/Hotword/awake.py | iamDyeus/KnickAI | c17d808c949cb3467031498e7252bd2095c04699 | [
"MIT"
] | 3 | 2021-11-13T09:38:12.000Z | 2022-03-25T07:44:17.000Z | import sys
sys.path.append('scripts/')
from console import intro_header
import time
#assistant directly works without giving introduction or any other shit.
def start_from_hibernation():
print(intro_header)
from standardfunctions import wishMe
wishMe()
time.sleep(1)
from FeatureExecution import showmagic
showmagic()
if __name__ == '__main__':
start_from_hibernation()
| 20.190476 | 73 | 0.716981 |
c40f505338c7dc96d483f09fbfb98c5dc3e260d5 | 1,184 | py | Python | Download_subs/Spinner.py | miroslavvidovic/python-scripts | 22c925bc2bd9b657ac3183c3c73af0061cb61b3b | [
"MIT"
] | null | null | null | Download_subs/Spinner.py | miroslavvidovic/python-scripts | 22c925bc2bd9b657ac3183c3c73af0061cb61b3b | [
"MIT"
] | null | null | null | Download_subs/Spinner.py | miroslavvidovic/python-scripts | 22c925bc2bd9b657ac3183c3c73af0061cb61b3b | [
"MIT"
] | null | null | null | """
Spinner.py
Spinner animation
"""
import sys
import time
import threading
class Spinner:
"""
Spinner running in a separate thread
"""
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
"""
Static method for the cursor animation
"""
while 1:
for cursor in '|/-\\':
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
"""
Write the cursor to the screen and create a feeling of a loading animation
"""
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def start(self):
"""
Start the spinner
"""
self.busy = True
threading.Thread(target=self.spinner_task).start()
def stop(self):
"""
Stop the spinner
"""
self.busy = False
time.sleep(self.delay)
| 18.793651 | 82 | 0.538007 |
2c5461fd0279f909bf96daf41413b6dc0701cc07 | 3,932 | py | Python | hashicorp_vault_client/hashicorp_vault_client/models/body111.py | drewmullen/HAC | fb185804fd244366f8f8d01df22835b3d96e7512 | [
"Apache-2.0"
] | null | null | null | hashicorp_vault_client/hashicorp_vault_client/models/body111.py | drewmullen/HAC | fb185804fd244366f8f8d01df22835b3d96e7512 | [
"Apache-2.0"
] | 2 | 2019-09-30T20:56:41.000Z | 2019-10-02T00:22:07.000Z | hashicorp_vault_client/hashicorp_vault_client/models/body111.py | drewmullen/HAC | fb185804fd244366f8f8d01df22835b3d96e7512 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
HashiCorp Vault API
HTTP API that gives you full access to Vault. All API routes are prefixed with `/v1/`. # noqa: E501
OpenAPI spec version: 1.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Body111(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'lease_id': 'str',
'sync': 'bool'
}
attribute_map = {
'lease_id': 'lease_id',
'sync': 'sync'
}
def __init__(self, lease_id=None, sync=True): # noqa: E501
"""Body111 - a model defined in Swagger""" # noqa: E501
self._lease_id = None
self._sync = None
self.discriminator = None
if lease_id is not None:
self.lease_id = lease_id
if sync is not None:
self.sync = sync
@property
def lease_id(self):
"""Gets the lease_id of this Body111. # noqa: E501
The lease identifier to renew. This is included with a lease. # noqa: E501
:return: The lease_id of this Body111. # noqa: E501
:rtype: str
"""
return self._lease_id
@lease_id.setter
def lease_id(self, lease_id):
"""Sets the lease_id of this Body111.
The lease identifier to renew. This is included with a lease. # noqa: E501
:param lease_id: The lease_id of this Body111. # noqa: E501
:type: str
"""
self._lease_id = lease_id
@property
def sync(self):
"""Gets the sync of this Body111. # noqa: E501
Whether or not to perform the revocation synchronously # noqa: E501
:return: The sync of this Body111. # noqa: E501
:rtype: bool
"""
return self._sync
@sync.setter
def sync(self, sync):
"""Sets the sync of this Body111.
Whether or not to perform the revocation synchronously # noqa: E501
:param sync: The sync of this Body111. # noqa: E501
:type: bool
"""
self._sync = sync
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Body111, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Body111):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.690141 | 104 | 0.553917 |
a8fe79e60649aee330fe8da105214a3e788b0563 | 1,636 | py | Python | t2s.py | zngrn/py-text-speech | ed6a21fde224d14e7b7897fdcb82066732d3f0a2 | [
"MIT"
] | null | null | null | t2s.py | zngrn/py-text-speech | ed6a21fde224d14e7b7897fdcb82066732d3f0a2 | [
"MIT"
] | null | null | null | t2s.py | zngrn/py-text-speech | ed6a21fde224d14e7b7897fdcb82066732d3f0a2 | [
"MIT"
] | null | null | null | import pyttsx3
import speech_recognition
from decouple import config
from datetime import datetime
from random import choice
from utils import working_on_it, apologies
USER = config('USER')
BOT = config('BOT')
engine = pyttsx3.init('nsss')
engine.setProperty('rate', 187)
engine.setProperty('volume', 1.0)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
def bot_speak(content):
engine.say(content)
engine.runAndWait()
engine.stop()
def bot_greet():
current_hour = datetime.now().hour
if current_hour in range(6,12):
bot_speak(f"Good Morning! {USER}")
elif current_hour in range(12, 16):
bot_speak(f"Good afternoon! {USER}")
elif current_hour in range(16, 21):
bot_speak(f"Good evening! {USER}")
bot_speak(f"I'm {BOT}. How can I be of assistance?")
def bot_listen():
r = speech_recognition.Recognizer()
with speech_recognition.Microphone() as source:
print('Listening...')
r.pause_threshold = 1
audio = r.listen(source)
try:
print('Analysing and recognizing audio input...')
query = r.recognize_google(audio, language='en-in')
if not 'exit' in query or 'stop' in query:
bot_speak(choice(working_on_it))
else:
bot_speak('If that\'s all, I\'ll take a bow...')
exit()
except Exception as e:
print(e)
bot_speak(choice(apologies))
query = 'None'
return query
if __name__ == "__main__":
bot_speak('Hello!')
bot_greet()
while True:
query = bot_listen().lower()
print(query) | 26.819672 | 60 | 0.639976 |
f919607268956928296e6ee4bd7db5767d5ae62f | 4,964 | py | Python | tests/unit/utils/test_measurables.py | Trendometrics/pyowm | ba1581c37a8c6a2e113a77670cc68fe2b4adeca6 | [
"MIT"
] | 799 | 2015-01-03T12:07:57.000Z | 2022-03-31T03:59:53.000Z | tests/unit/utils/test_measurables.py | Trendometrics/pyowm | ba1581c37a8c6a2e113a77670cc68fe2b4adeca6 | [
"MIT"
] | 279 | 2015-02-12T16:11:43.000Z | 2022-02-14T21:49:03.000Z | tests/unit/utils/test_measurables.py | Trendometrics/pyowm | ba1581c37a8c6a2e113a77670cc68fe2b4adeca6 | [
"MIT"
] | 215 | 2015-01-06T19:07:11.000Z | 2022-02-14T21:39:33.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from pyowm.utils import measurables
class TestMeasurablesUtils(unittest.TestCase):
def test_kelvin_dict_to(self):
kelvin_dict = {'a': 301.0, 'b': 280}
celsius_dict = {'a': 27.85, 'b': 6.85}
fahrenheit_dict = {'a': 82.13, 'b': 44.33}
self.assertEqual(celsius_dict,
measurables.kelvin_dict_to(
kelvin_dict,
"celsius")
)
self.assertEqual(fahrenheit_dict,
measurables.kelvin_dict_to(
kelvin_dict,
"fahrenheit")
)
self.assertEqual(kelvin_dict,
measurables.kelvin_dict_to(
kelvin_dict,
"kelvin")
)
def test_kelvin_dict_to_fails_with_unknown_temperature_units(self):
self.assertRaises(ValueError, measurables.kelvin_dict_to, {}, "xyz")
def test_kelvin_to_celsius(self):
kelvin = 301.0
expected = 27.85
result = measurables.kelvin_to_celsius(kelvin)
self.assertEqual(expected, result)
def test_kelvin_to_celsius_fails_with_negative_values(self):
self.assertRaises(ValueError, measurables.kelvin_to_celsius, -137.0)
def test_kelvin_to_fahrenheit(self):
kelvin = 301.0
expected = 82.13
result = measurables.kelvin_to_fahrenheit(kelvin)
self.assertEqual(expected, result)
def test_kelvin_to_fahrenheit_fails_with_negative_values(self):
self.assertRaises(ValueError, measurables.kelvin_to_fahrenheit, -137.0)
def test_metric_wind_dict_to_imperial(self):
input = {
'speed': 2,
'gust': 3,
'deg': 7.89
}
expected = {
'speed': 4.47388,
'gust': 6.71082,
'deg': 7.89
}
result = measurables.metric_wind_dict_to_imperial(input)
self.assertEqual(expected, result)
def test_metric_wind_dict_to_km_h(self):
input = {
'speed': 2,
'gust': 3,
'deg': 7.89
}
expected = {
'speed': 7.2,
'gust': 10.8,
'deg': 7.89
}
result = measurables.metric_wind_dict_to_km_h(input)
self.assertEqual(expected, result)
def test_metric_wind_dict_to_knots(self):
input = {
'speed': 2,
'gust': 3,
'deg': 7.89
}
expected = {'speed': 3.88768, 'gust': 5.83152, 'deg': 7.89}
result = measurables.metric_wind_dict_to_knots(input)
self.assertEqual(expected, result)
def test_metric_wind_dict_to_beaufort(self):
corner_values = {
'lower': 0.01,
'a': 0.2,
'b': 1.5,
'c': 3.3,
'd': 5.4,
'e': 7.9,
'f': 10.7,
'g': 13.8,
'h': 17.1,
'i': 20.7,
'j': 24.4,
'k': 28.4,
'l': 32.6,
'upper': 345,
'deg': 7.89
}
expected_corner_values_beaufort = {
'lower': 0,
'a': 0,
'b': 1,
'c': 2,
'd': 3,
'e': 4,
'f': 5,
'g': 6,
'h': 7,
'i': 8,
'j': 9,
'k': 10,
'l': 11,
'upper': 12,
'deg': 7.89
}
result_corner_values = measurables.metric_wind_dict_to_beaufort(corner_values)
self.assertEqual(result_corner_values, expected_corner_values_beaufort)
input = {
'speed': 17.9,
'gust': 2.89,
'deg': 7.89
}
expected = {'speed': 8, 'gust': 2, 'deg': 7.89}
result = measurables.metric_wind_dict_to_beaufort(input)
self.assertEqual(expected, result)
def test_metric_pressure_dict_to_inhg(self):
input = {'press': 1000, 'sea_level': 1, 'grnd_level': None}
expected = {'press': 29.53, 'sea_level': .03}
result = measurables.metric_pressure_dict_to_inhg(input)
print(result)
self.assertEqual(expected, result)
def test_visibility_distance_to(self):
distances = (100, 200, None)
cmp_kms = (.1, .2, None)
cmp_miles = (.06, .12, None)
case_one, case_two = list(), list()
for distance in distances:
case_one.append(measurables.visibility_distance_to(distance))
case_two.append(measurables.visibility_distance_to(distance, 'miles'))
self.assertTrue(tuple(case_one) == cmp_kms and tuple(case_two) == cmp_miles)
def test_visibility_distance_to_fails_with_invalid_unit(self):
self.assertRaises(ValueError, measurables.visibility_distance_to, 10, 'xyz') | 32.233766 | 86 | 0.524577 |
9138cba6ab9695becdf68d9bcc983603a92613cf | 2,482 | py | Python | rdflib/plugins/sparql/results/csvresults.py | gtfierro/rdflib | be3d026e9065c8f60f59ac79a70da9f3199f5f43 | [
"BSD-3-Clause"
] | 1 | 2022-02-02T23:04:51.000Z | 2022-02-02T23:04:51.000Z | rdflib/plugins/sparql/results/csvresults.py | gtfierro/rdflib | be3d026e9065c8f60f59ac79a70da9f3199f5f43 | [
"BSD-3-Clause"
] | 6 | 2021-11-22T19:10:32.000Z | 2022-01-31T19:16:37.000Z | rdflib/plugins/sparql/results/csvresults.py | jjon/rdflib | 4c2ab7b392b353bf3c6088017ec9351ce8ac3db6 | [
"BSD-3-Clause"
] | null | null | null | """
This module implements a parser and serializer for the CSV SPARQL result
formats
http://www.w3.org/TR/sparql11-results-csv-tsv/
"""
import codecs
import csv
from typing import IO
from rdflib import BNode, Literal, URIRef, Variable
from rdflib.query import Result, ResultParser, ResultSerializer
class CSVResultParser(ResultParser):
def __init__(self):
self.delim = ","
def parse(self, source, content_type=None):
r = Result("SELECT")
if isinstance(source.read(0), bytes):
# if reading from source returns bytes do utf-8 decoding
source = codecs.getreader("utf-8")(source)
reader = csv.reader(source, delimiter=self.delim)
r.vars = [Variable(x) for x in next(reader)]
r.bindings = []
for row in reader:
r.bindings.append(self.parseRow(row, r.vars))
return r
def parseRow(self, row, v):
return dict(
(var, val)
for var, val in zip(v, [self.convertTerm(t) for t in row])
if val is not None
)
def convertTerm(self, t):
if t == "":
return None
if t.startswith("_:"):
return BNode(t) # or generate new IDs?
if t.startswith("http://") or t.startswith("https://"): # TODO: more?
return URIRef(t)
return Literal(t)
class CSVResultSerializer(ResultSerializer):
def __init__(self, result):
ResultSerializer.__init__(self, result)
self.delim = ","
if result.type != "SELECT":
raise Exception("CSVSerializer can only serialize select query results")
def serialize(self, stream: IO, encoding: str = "utf-8", **kwargs):
# the serialiser writes bytes in the given encoding
# in py3 csv.writer is unicode aware and writes STRINGS,
# so we encode afterwards
import codecs
stream = codecs.getwriter(encoding)(stream) # type: ignore[assignment]
out = csv.writer(stream, delimiter=self.delim)
vs = [self.serializeTerm(v, encoding) for v in self.result.vars] # type: ignore[union-attr]
out.writerow(vs)
for row in self.result.bindings:
out.writerow(
[self.serializeTerm(row.get(v), encoding) for v in self.result.vars] # type: ignore[union-attr]
)
def serializeTerm(self, term, encoding):
if term is None:
return ""
else:
return term
| 28.204545 | 112 | 0.605157 |
0394509844fae3051822cae0605003ec3af98f7b | 103 | py | Python | mapserver/django/django_project/fba/apps.py | dimasciput/geocris-inasafe-fba | e8bc982f7328740235133605049634c6dca93279 | [
"MIT"
] | 1 | 2020-12-01T14:42:12.000Z | 2020-12-01T14:42:12.000Z | mapserver/django/django_project/fba/apps.py | dimasciput/geocris-inasafe-fba | e8bc982f7328740235133605049634c6dca93279 | [
"MIT"
] | 27 | 2020-05-13T10:05:47.000Z | 2020-11-25T10:53:29.000Z | mapserver/django/django_project/fba/apps.py | dimasciput/geocris-inasafe-fba | e8bc982f7328740235133605049634c6dca93279 | [
"MIT"
] | 1 | 2020-09-21T03:25:02.000Z | 2020-09-21T03:25:02.000Z | from django.apps import AppConfig
class Config(AppConfig):
name = 'fba'
verbose_name = "FbA"
| 14.714286 | 33 | 0.68932 |
a3b66dd28c7aef2c010775d5d59c5640306798db | 8,001 | py | Python | tests/test_invoke.py | miguelgrinberg/slam | a8c4dabe18093711b63577efb59d7febce8b9de1 | [
"MIT"
] | 79 | 2017-01-02T21:40:22.000Z | 2022-02-22T11:07:51.000Z | tests/test_invoke.py | miguelgrinberg/slam | a8c4dabe18093711b63577efb59d7febce8b9de1 | [
"MIT"
] | 17 | 2017-01-02T23:04:42.000Z | 2020-10-09T19:08:39.000Z | tests/test_invoke.py | miguelgrinberg/slam | a8c4dabe18093711b63577efb59d7febce8b9de1 | [
"MIT"
] | 13 | 2017-02-09T22:52:30.000Z | 2020-09-21T01:48:18.000Z | from io import BytesIO
import json
import mock
import sys
import unittest
import botocore
from slam import cli
from .test_deploy import config, describe_stacks_response
BUILTIN = '__builtin__'
if sys.version_info >= (3, 0):
BUILTIN = 'builtins'
class InvokeTests(unittest.TestCase):
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_invoke_with_args(self, _load_config, client):
mock_cfn = mock.MagicMock()
mock_lmb = mock.MagicMock()
mock_cfn.describe_stacks.return_value = describe_stacks_response
mock_lmb.invoke.return_value = {'StatusCode': 200,
'Payload': BytesIO(b'{"foo":"bar"}')}
client.side_effect = [mock_cfn, mock_lmb]
cli.main(['invoke', 'arg=string', 'arg2:=true', 'arg3:=123',
'arg4:={"foo":"bar"}'])
mock_cfn.describe_stacks.assert_called_once_with(StackName='foo')
mock_lmb.invoke.assert_called_once_with(
FunctionName='arn:lambda:foo', InvocationType='RequestResponse',
Payload='{"kwargs": {"arg": "string", "arg2": true, "arg3": 123, '
'"arg4": {"foo": "bar"}}}', Qualifier='dev')
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_invoke_with_stage(self, _load_config, client):
mock_cfn = mock.MagicMock()
mock_lmb = mock.MagicMock()
mock_cfn.describe_stacks.return_value = describe_stacks_response
mock_lmb.invoke.return_value = {'StatusCode': 200,
'Payload': BytesIO(b'{"foo":"bar"}')}
client.side_effect = [mock_cfn, mock_lmb]
cli.main(['invoke', '--stage', 'prod'])
mock_cfn.describe_stacks.assert_called_once_with(StackName='foo')
mock_lmb.invoke.assert_called_once_with(
FunctionName='arn:lambda:foo', InvocationType='RequestResponse',
Payload='{"kwargs": {}}', Qualifier='prod')
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_invoke_no_args(self, _load_config, client):
mock_cfn = mock.MagicMock()
mock_lmb = mock.MagicMock()
mock_cfn.describe_stacks.return_value = describe_stacks_response
mock_lmb.invoke.return_value = {'StatusCode': 200,
'Payload': BytesIO(b'{"foo":"bar"}')}
client.side_effect = [mock_cfn, mock_lmb]
cli.main(['invoke'])
mock_cfn.describe_stacks.assert_called_once_with(StackName='foo')
mock_lmb.invoke.assert_called_once_with(
FunctionName='arn:lambda:foo', InvocationType='RequestResponse',
Payload='{"kwargs": {}}', Qualifier='dev')
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_invoke_not_deployed(self, _load_config, client):
mock_cfn = mock.MagicMock()
mock_lmb = mock.MagicMock()
mock_cfn.describe_stacks.side_effect = \
botocore.exceptions.ClientError({'Error': {}}, 'operation')
client.side_effect = [mock_cfn, mock_lmb]
self.assertRaises(RuntimeError, cli.main, ['invoke'])
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_invoke_dry_run(self, _load_config, client):
mock_cfn = mock.MagicMock()
mock_lmb = mock.MagicMock()
mock_cfn.describe_stacks.return_value = describe_stacks_response
mock_lmb.invoke.return_value = {'StatusCode': 200,
'Payload': BytesIO(b'{"foo":"bar"}')}
client.side_effect = [mock_cfn, mock_lmb]
cli.main(['invoke', '--dry-run'])
mock_cfn.describe_stacks.assert_called_once_with(StackName='foo')
mock_lmb.invoke.assert_called_once_with(
FunctionName='arn:lambda:foo', InvocationType='DryRun',
Payload='{"kwargs": {}}', Qualifier='dev')
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_invoke_nowait(self, _load_config, client):
mock_cfn = mock.MagicMock()
mock_lmb = mock.MagicMock()
mock_cfn.describe_stacks.return_value = describe_stacks_response
mock_lmb.invoke.return_value = {'StatusCode': 202,
'Payload': BytesIO(b'')}
client.side_effect = [mock_cfn, mock_lmb]
cli.main(['invoke', '--nowait'])
mock_cfn.describe_stacks.assert_called_once_with(StackName='foo')
mock_lmb.invoke.assert_called_once_with(
FunctionName='arn:lambda:foo', InvocationType='Event',
Payload='{"kwargs": {}}', Qualifier='dev')
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_invoke_invalid_arg(self, _load_config, client):
mock_cfn = mock.MagicMock()
mock_lmb = mock.MagicMock()
mock_cfn.describe_stacks.return_value = describe_stacks_response
mock_lmb.invoke.return_value = {'StatusCode': 202,
'Payload': BytesIO(b'')}
client.side_effect = [mock_cfn, mock_lmb]
self.assertRaises(ValueError, cli.main, ['invoke', 'invalid-argument'])
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_invoke_unexpected_error(self, _load_config, client):
mock_cfn = mock.MagicMock()
mock_lmb = mock.MagicMock()
mock_cfn.describe_stacks.return_value = describe_stacks_response
mock_lmb.invoke.return_value = {'StatusCode': 500,
'Payload': BytesIO(b'')}
client.side_effect = [mock_cfn, mock_lmb]
self.assertRaises(RuntimeError, cli.main, ['invoke'])
@mock.patch(BUILTIN + '.print')
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_invoke_error(self, _load_config, client, mock_print):
mock_cfn = mock.MagicMock()
mock_lmb = mock.MagicMock()
mock_cfn.describe_stacks.return_value = describe_stacks_response
mock_lmb.invoke.return_value = {
'StatusCode': 200,
'FunctionError': 'Unhandled',
'Payload': BytesIO(json.dumps({
'stackTrace': [
['file.py', 123, 'module', 'code'],
['file2.py', 456, 'module2', 'code2']
],
'errorMessage': 'foo-error',
'errorType': 'FooError'
}).encode('utf-8'))
}
client.side_effect = [mock_cfn, mock_lmb]
cli.main(['invoke'])
output = ''.join([c[0][0] + '\n' for c in mock_print.call_args_list])
self.assertEqual(output, 'Traceback (most recent call last):\n'
' File "file.py", line 123, in module\n'
' code\n'
' File "file2.py", line 456, in module2\n'
' code2\n'
'FooError: foo-error\n')
@mock.patch(BUILTIN + '.print')
@mock.patch('slam.cli.boto3.client')
@mock.patch('slam.cli._load_config', return_value=config)
def test_invoke_error_no_stack_trace(self, _load_config, client,
mock_print):
mock_cfn = mock.MagicMock()
mock_lmb = mock.MagicMock()
mock_cfn.describe_stacks.return_value = describe_stacks_response
mock_lmb.invoke.return_value = {
'StatusCode': 200,
'FunctionError': 'Unhandled',
'Payload': BytesIO(json.dumps({}).encode('utf-8'))
}
client.side_effect = [mock_cfn, mock_lmb]
self.assertRaises(RuntimeError, cli.main, ['invoke'])
| 44.20442 | 79 | 0.611799 |
4560fab6fd3431da4e1f72bfdca11537ba9b1908 | 76,124 | py | Python | great_expectations/dataset/sqlalchemy_dataset.py | lcorneliussen/great_expectations | 00a94d9dd7397b726e951baf290f4b5d18101b4d | [
"Apache-2.0"
] | 1 | 2020-09-29T18:19:35.000Z | 2020-09-29T18:19:35.000Z | great_expectations/dataset/sqlalchemy_dataset.py | lcorneliussen/great_expectations | 00a94d9dd7397b726e951baf290f4b5d18101b4d | [
"Apache-2.0"
] | null | null | null | great_expectations/dataset/sqlalchemy_dataset.py | lcorneliussen/great_expectations | 00a94d9dd7397b726e951baf290f4b5d18101b4d | [
"Apache-2.0"
] | null | null | null | import inspect
import logging
import traceback
import uuid
import warnings
from datetime import datetime
from functools import wraps
from typing import Dict, Iterable, List
import numpy as np
import pandas as pd
from dateutil.parser import parse
from great_expectations.data_asset import DataAsset
from great_expectations.data_asset.util import DocInherit, parse_result_format
from great_expectations.dataset.util import (
check_sql_engine_dialect,
get_approximate_percentile_disc_sql,
get_sql_dialect_floating_point_infinity_value,
)
from great_expectations.util import import_library_module
from ..core import convert_to_json_serializable
from .dataset import Dataset
from .pandas_dataset import PandasDataset
logger = logging.getLogger(__name__)
try:
import sqlalchemy as sa
from sqlalchemy.dialects import registry
from sqlalchemy.engine import reflection
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.engine.result import RowProxy
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.sql.elements import Label, TextClause, WithinGroup
from sqlalchemy.sql.expression import BinaryExpression, literal
from sqlalchemy.sql.operators import custom_op
from sqlalchemy.sql.selectable import CTE, Select
except ImportError:
logger.debug(
"Unable to load SqlAlchemy context; install optional sqlalchemy dependency for support"
)
sa = None
registry = None
reflection = None
BinaryExpression = None
literal = None
Select = None
CTE = None
custom_op = None
Label = None
WithinGroup = None
TextClause = None
RowProxy = None
DefaultDialect = None
ProgrammingError = None
try:
import psycopg2
import sqlalchemy.dialects.postgresql.psycopg2 as sqlalchemy_psycopg2
except (ImportError, KeyError):
sqlalchemy_psycopg2 = None
try:
import sqlalchemy_redshift.dialect
except ImportError:
sqlalchemy_redshift = None
try:
import snowflake.sqlalchemy.snowdialect
# Sometimes "snowflake-sqlalchemy" fails to self-register in certain environments, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
registry.register("snowflake", "snowflake.sqlalchemy", "dialect")
except (ImportError, KeyError):
snowflake = None
try:
import pybigquery.sqlalchemy_bigquery
# Sometimes "pybigquery.sqlalchemy_bigquery" fails to self-register in certain environments, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
registry.register("bigquery", "pybigquery.sqlalchemy_bigquery", "BigQueryDialect")
try:
getattr(pybigquery.sqlalchemy_bigquery, "INTEGER")
bigquery_types_tuple = None
except AttributeError:
# In older versions of the pybigquery driver, types were not exported, so we use a hack
logger.warning(
"Old pybigquery driver version detected. Consider upgrading to 0.4.14 or later."
)
from collections import namedtuple
BigQueryTypes = namedtuple(
"BigQueryTypes", sorted(pybigquery.sqlalchemy_bigquery._type_map)
)
bigquery_types_tuple = BigQueryTypes(**pybigquery.sqlalchemy_bigquery._type_map)
except ImportError:
bigquery_types_tuple = None
pybigquery = None
try:
# SQLAlchemy does not export the "INT" type for the MS SQL Server dialect; however "INT" is supported by the engine.
# Since SQLAlchemy exports the "INTEGER" type for the MS SQL Server dialect, alias "INT" to the "INTEGER" type.
import sqlalchemy.dialects.mssql as mssqltypes
try:
getattr(mssqltypes, "INT")
except AttributeError:
mssqltypes.INT = mssqltypes.INTEGER
except ImportError:
pass
class SqlAlchemyBatchReference:
def __init__(self, engine, table_name=None, schema=None, query=None):
self._engine = engine
if table_name is None and query is None:
raise ValueError("Table_name or query must be specified")
self._table_name = table_name
self._schema = schema
self._query = query
def get_init_kwargs(self):
if self._table_name and self._query:
# This is allowed in BigQuery where a temporary table name must be provided *with* the
# custom sql to execute.
kwargs = {
"engine": self._engine,
"table_name": self._table_name,
"custom_sql": self._query,
}
elif self._table_name:
kwargs = {"engine": self._engine, "table_name": self._table_name}
else:
kwargs = {"engine": self._engine, "custom_sql": self._query}
if self._schema:
kwargs["schema"] = self._schema
return kwargs
class MetaSqlAlchemyDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def column_map_expectation(cls, func):
"""For SqlAlchemy, this decorator allows individual column_map_expectations to simply return the filter
that describes the expected condition on their data.
The decorator will then use that filter to obtain unexpected elements, relevant counts, and return the formatted
object.
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self, column, mostly=None, result_format=None, *args, **kwargs
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
result_format = parse_result_format(result_format)
if result_format["result_format"] == "COMPLETE":
warnings.warn(
"Setting result format to COMPLETE for a SqlAlchemyDataset can be dangerous because it will not limit the number of returned results."
)
unexpected_count_limit = None
else:
unexpected_count_limit = result_format["partial_unexpected_count"]
expected_condition: BinaryExpression = func(self, column, *args, **kwargs)
# Added to prepare for when an ignore_values argument is added to the expectation
ignore_values: list = [None]
if func.__name__ in [
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_null",
]:
ignore_values = []
# Counting the number of unexpected values can be expensive when there is a large
# number of np.nan values.
# This only happens on expect_column_values_to_not_be_null expectations.
# Since there is no reason to look for most common unexpected values in this case,
# we will instruct the result formatting method to skip this step.
result_format["partial_unexpected_count"] = 0
ignore_values_conditions: List[BinaryExpression] = []
if (
len(ignore_values) > 0
and None not in ignore_values
or len(ignore_values) > 1
and None in ignore_values
):
ignore_values_conditions += [
sa.column(column).in_(
[val for val in ignore_values if val is not None]
)
]
if None in ignore_values:
ignore_values_conditions += [sa.column(column).is_(None)]
ignore_values_condition: BinaryExpression
if len(ignore_values_conditions) > 1:
ignore_values_condition = sa.or_(*ignore_values_conditions)
elif len(ignore_values_conditions) == 1:
ignore_values_condition = ignore_values_conditions[0]
else:
ignore_values_condition = BinaryExpression(
sa.literal(False), sa.literal(True), custom_op("=")
)
count_query: Select
if self.sql_engine_dialect.name.lower() == "mssql":
count_query = self._get_count_query_mssql(
expected_condition=expected_condition,
ignore_values_condition=ignore_values_condition,
)
else:
count_query = self._get_count_query_generic_sqlalchemy(
expected_condition=expected_condition,
ignore_values_condition=ignore_values_condition,
)
count_results: dict = dict(self.engine.execute(count_query).fetchone())
# Handle case of empty table gracefully:
if (
"element_count" not in count_results
or count_results["element_count"] is None
):
count_results["element_count"] = 0
if "null_count" not in count_results or count_results["null_count"] is None:
count_results["null_count"] = 0
if (
"unexpected_count" not in count_results
or count_results["unexpected_count"] is None
):
count_results["unexpected_count"] = 0
# Some engines may return Decimal from count queries (lookin' at you MSSQL)
# Convert to integers
count_results["element_count"] = int(count_results["element_count"])
count_results["null_count"] = int(count_results["null_count"])
count_results["unexpected_count"] = int(count_results["unexpected_count"])
# Retrieve unexpected values
unexpected_query_results = self.engine.execute(
sa.select([sa.column(column)])
.select_from(self._table)
.where(
sa.and_(
sa.not_(expected_condition), sa.not_(ignore_values_condition)
)
)
.limit(unexpected_count_limit)
)
nonnull_count: int = count_results["element_count"] - count_results[
"null_count"
]
if "output_strftime_format" in kwargs:
output_strftime_format = kwargs["output_strftime_format"]
maybe_limited_unexpected_list = []
for x in unexpected_query_results.fetchall():
if isinstance(x[column], str):
col = parse(x[column])
else:
col = x[column]
maybe_limited_unexpected_list.append(
datetime.strftime(col, output_strftime_format)
)
else:
maybe_limited_unexpected_list = [
x[column] for x in unexpected_query_results.fetchall()
]
success_count = nonnull_count - count_results["unexpected_count"]
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
count_results["element_count"],
nonnull_count,
count_results["unexpected_count"],
maybe_limited_unexpected_list,
None,
)
if func.__name__ in [
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_null",
]:
# These results are unnecessary for the above expectations
del return_obj["result"]["unexpected_percent_nonmissing"]
del return_obj["result"]["missing_count"]
del return_obj["result"]["missing_percent"]
try:
del return_obj["result"]["partial_unexpected_counts"]
del return_obj["result"]["partial_unexpected_list"]
except KeyError:
pass
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
def _get_count_query_mssql(
self,
expected_condition: BinaryExpression,
ignore_values_condition: BinaryExpression,
) -> Select:
# mssql expects all temporary table names to have a prefix '#'
temp_table_name: str = f"#ge_tmp_{str(uuid.uuid4())[:8]}"
with self.engine.begin():
metadata: sa.MetaData = sa.MetaData(self.engine)
temp_table_obj: sa.Table = sa.Table(
temp_table_name,
metadata,
sa.Column("condition", sa.Integer, primary_key=False, nullable=False),
)
temp_table_obj.create(self.engine, checkfirst=True)
count_case_statement: List[sa.sql.elements.Label] = [
sa.case(
[
(
sa.and_(
sa.not_(expected_condition),
sa.not_(ignore_values_condition),
),
1,
)
],
else_=0,
).label("condition")
]
inner_case_query: sa.sql.dml.Insert = temp_table_obj.insert().from_select(
count_case_statement,
sa.select(count_case_statement).select_from(self._table),
)
self.engine.execute(inner_case_query)
element_count_query: Select = sa.select(
[
sa.func.count().label("element_count"),
sa.func.sum(sa.case([(ignore_values_condition, 1)], else_=0)).label(
"null_count"
),
]
).select_from(self._table).alias("ElementAndNullCountsSubquery")
unexpected_count_query: Select = sa.select(
[sa.func.sum(sa.column("condition")).label("unexpected_count"),]
).select_from(temp_table_obj).alias("UnexpectedCountSubquery")
count_query: Select = sa.select(
[
element_count_query.c.element_count,
element_count_query.c.null_count,
unexpected_count_query.c.unexpected_count,
]
)
return count_query
def _get_count_query_generic_sqlalchemy(
self,
expected_condition: BinaryExpression,
ignore_values_condition: BinaryExpression,
) -> Select:
return sa.select(
[
sa.func.count().label("element_count"),
sa.func.sum(sa.case([(ignore_values_condition, 1)], else_=0)).label(
"null_count"
),
sa.func.sum(
sa.case(
[
(
sa.and_(
sa.not_(expected_condition),
sa.not_(ignore_values_condition),
),
1,
)
],
else_=0,
)
).label("unexpected_count"),
]
).select_from(self._table)
class SqlAlchemyDataset(MetaSqlAlchemyDataset):
"""
--ge-feature-maturity-info--
id: validation_engine_sqlalchemy
title: Validation Engine - SQLAlchemy
icon:
short_description: Use SQLAlchemy to validate data in a database
description: Use SQLAlchemy to validate data in a database
how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/creating_batches/how_to_load_a_database_table_or_a_query_result_as_a_batch.html
maturity: Production
maturity_details:
api_stability: High
implementation_completeness: Moderate (temp table handling/permissions not universal)
unit_test_coverage: High
integration_infrastructure_test_coverage: N/A
documentation_completeness: Minimal (none)
bug_risk: Low
--ge-feature-maturity-info--
"""
@classmethod
def from_dataset(cls, dataset=None):
if isinstance(dataset, SqlAlchemyDataset):
return cls(table_name=str(dataset._table.name), engine=dataset.engine)
else:
raise ValueError("from_dataset requires a SqlAlchemy dataset")
def __init__(
self,
table_name=None,
engine=None,
connection_string=None,
custom_sql=None,
schema=None,
*args,
**kwargs,
):
if custom_sql and not table_name:
# NOTE: Eugene 2020-01-31: @James, this is a not a proper fix, but without it the "public" schema
# was used for a temp table and raising an error
schema = None
table_name = f"ge_tmp_{str(uuid.uuid4())[:8]}"
# mssql expects all temporary table names to have a prefix '#'
if engine.dialect.name.lower() == "mssql":
table_name = f"#{table_name}"
generated_table_name = table_name
else:
generated_table_name = None
if table_name is None:
raise ValueError("No table_name provided.")
if engine is None and connection_string is None:
raise ValueError("Engine or connection_string must be provided.")
if engine is not None:
self.engine = engine
else:
try:
self.engine = sa.create_engine(connection_string)
except Exception as err:
# Currently we do no error handling if the engine doesn't work out of the box.
raise err
if self.engine.dialect.name.lower() == "bigquery":
# In BigQuery the table name is already qualified with its schema name
self._table = sa.Table(table_name, sa.MetaData(), schema=None)
else:
self._table = sa.Table(table_name, sa.MetaData(), schema=schema)
# Get the dialect **for purposes of identifying types**
if self.engine.dialect.name.lower() in [
"postgresql",
"mysql",
"sqlite",
"oracle",
"mssql",
"oracle",
]:
# These are the officially included and supported dialects by sqlalchemy
self.dialect = import_library_module(
module_name="sqlalchemy.dialects." + self.engine.dialect.name
)
if engine and engine.dialect.name.lower() in ["sqlite", "mssql"]:
# sqlite/mssql temp tables only persist within a connection so override the engine
self.engine = engine.connect()
elif self.engine.dialect.name.lower() == "snowflake":
self.dialect = import_library_module(
module_name="snowflake.sqlalchemy.snowdialect"
)
elif self.engine.dialect.name.lower() == "redshift":
self.dialect = import_library_module(
module_name="sqlalchemy_redshift.dialect"
)
elif self.engine.dialect.name.lower() == "bigquery":
self.dialect = import_library_module(
module_name="pybigquery.sqlalchemy_bigquery"
)
else:
self.dialect = None
if schema is not None and custom_sql is not None:
# temporary table will be written to temp schema, so don't allow
# a user-defined schema
# NOTE: 20200306 - JPC - Previously, this would disallow both custom_sql (a query) and a schema, but
# that is overly restrictive -- snowflake could have had a schema specified, for example, in which to create
# a temporary table.
# raise ValueError("Cannot specify both schema and custom_sql.")
pass
if custom_sql is not None and self.engine.dialect.name.lower() == "bigquery":
if (
generated_table_name is not None
and self.engine.dialect.dataset_id is None
):
raise ValueError(
"No BigQuery dataset specified. Use bigquery_temp_table batch_kwarg or a specify a "
"default dataset in engine url"
)
if (
custom_sql is not None
and self.engine.dialect.name.lower() == "snowflake"
and generated_table_name is not None
):
raise ValueError(
"No snowflake_transient_table specified. Snowflake with a query batch_kwarg will create "
"a transient table, so you must provide a user-selected name."
)
if custom_sql:
self.create_temporary_table(table_name, custom_sql, schema_name=schema)
if (
generated_table_name is not None
and self.engine.dialect.name.lower() == "bigquery"
):
logger.warning(
"Created permanent table {table_name}".format(table_name=table_name)
)
try:
insp = reflection.Inspector.from_engine(self.engine)
self.columns = insp.get_columns(table_name, schema=schema)
except KeyError:
# we will get a KeyError for temporary tables, since
# reflection will not find the temporary schema
self.columns = self.column_reflection_fallback()
# Use fallback because for mssql reflection doesn't throw an error but returns an empty list
if len(self.columns) == 0:
self.columns = self.column_reflection_fallback()
# Only call super once connection is established and table_name and columns known to allow autoinspection
super().__init__(*args, **kwargs)
@property
def sql_engine_dialect(self) -> DefaultDialect:
return self.engine.dialect
def attempt_allowing_relative_error(self):
detected_redshift: bool = (
sqlalchemy_redshift is not None
and check_sql_engine_dialect(
actual_sql_engine_dialect=self.sql_engine_dialect,
candidate_sql_engine_dialect=sqlalchemy_redshift.dialect.RedshiftDialect,
)
)
# noinspection PyTypeChecker
detected_psycopg2: bool = (
sqlalchemy_psycopg2 is not None
and check_sql_engine_dialect(
actual_sql_engine_dialect=self.sql_engine_dialect,
candidate_sql_engine_dialect=sqlalchemy_psycopg2.PGDialect_psycopg2,
)
)
return detected_redshift or detected_psycopg2
def head(self, n=5):
"""Returns a *PandasDataset* with the first *n* rows of the given Dataset"""
try:
df = next(
pd.read_sql_table(
table_name=self._table.name,
schema=self._table.schema,
con=self.engine,
chunksize=n,
)
)
except (ValueError, NotImplementedError):
# it looks like MetaData that is used by pd.read_sql_table
# cannot work on a temp table.
# If it fails, we are trying to get the data using read_sql
head_sql_str = "select * from "
if self._table.schema and self.engine.dialect.name.lower() != "bigquery":
head_sql_str += self._table.schema + "." + self._table.name
elif self.engine.dialect.name.lower() == "bigquery":
head_sql_str += "`" + self._table.name + "`"
else:
head_sql_str += self._table.name
head_sql_str += " limit {:d}".format(n)
# Limit is unknown in mssql! Use top instead!
if self.engine.dialect.name.lower() == "mssql":
head_sql_str = "select top({n}) * from {table}".format(
n=n, table=self._table.name
)
df = pd.read_sql(head_sql_str, con=self.engine)
except StopIteration:
df = pd.DataFrame(columns=self.get_table_columns())
return PandasDataset(
df,
expectation_suite=self.get_expectation_suite(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_catch_exceptions_kwargs=False,
discard_include_config_kwargs=False,
),
)
def get_row_count(self, table_name=None):
if table_name is None:
table_name = self._table
else:
table_name = sa.table(table_name)
count_query = sa.select([sa.func.count()]).select_from(table_name)
return int(self.engine.execute(count_query).scalar())
def get_column_count(self):
return len(self.columns)
def get_table_columns(self) -> List[str]:
return [col["name"] for col in self.columns]
def get_column_nonnull_count(self, column):
ignore_values = [None]
count_query = sa.select(
[
sa.func.count().label("element_count"),
sa.func.sum(
sa.case(
[
(
sa.or_(
sa.column(column).in_(ignore_values),
# Below is necessary b/c sa.in_() uses `==` but None != None
# But we only consider this if None is actually in the list of ignore values
sa.column(column).is_(None)
if None in ignore_values
else False,
),
1,
)
],
else_=0,
)
).label("null_count"),
]
).select_from(self._table)
count_results = dict(self.engine.execute(count_query).fetchone())
element_count = int(count_results.get("element_count") or 0)
null_count = int(count_results.get("null_count") or 0)
return element_count - null_count
def get_column_sum(self, column):
return self.engine.execute(
sa.select([sa.func.sum(sa.column(column))]).select_from(self._table)
).scalar()
def get_column_max(self, column, parse_strings_as_datetimes=False):
if parse_strings_as_datetimes:
raise NotImplementedError
return self.engine.execute(
sa.select([sa.func.max(sa.column(column))]).select_from(self._table)
).scalar()
def get_column_min(self, column, parse_strings_as_datetimes=False):
if parse_strings_as_datetimes:
raise NotImplementedError
return self.engine.execute(
sa.select([sa.func.min(sa.column(column))]).select_from(self._table)
).scalar()
def get_column_value_counts(self, column, sort="value", collate=None):
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'")
query = (
sa.select(
[
sa.column(column).label("value"),
sa.func.count(sa.column(column)).label("count"),
]
)
.where(sa.column(column) != None)
.group_by(sa.column(column))
)
if sort == "value":
# NOTE: depending on the way the underlying database collates columns,
# ordering can vary. postgresql collate "C" matches default sort
# for python and most other systems, but is not universally supported,
# so we use the default sort for the system, unless specifically overridden
if collate is not None:
query = query.order_by(sa.column(column).collate(collate))
else:
query = query.order_by(sa.column(column))
elif sort == "count":
query = query.order_by(sa.column("count").desc())
results = self.engine.execute(query.select_from(self._table)).fetchall()
series = pd.Series(
[row[1] for row in results],
index=pd.Index(data=[row[0] for row in results], name="value"),
name="count",
)
return series
def get_column_mean(self, column):
return self.engine.execute(
sa.select([sa.func.avg(sa.column(column))]).select_from(self._table)
).scalar()
def get_column_unique_count(self, column):
return self.engine.execute(
sa.select([sa.func.count(sa.func.distinct(sa.column(column)))]).select_from(
self._table
)
).scalar()
def get_column_median(self, column):
nonnull_count = self.get_column_nonnull_count(column)
element_values = self.engine.execute(
sa.select([sa.column(column)])
.order_by(sa.column(column))
.where(sa.column(column) != None)
.offset(max(nonnull_count // 2 - 1, 0))
.limit(2)
.select_from(self._table)
)
column_values = list(element_values.fetchall())
if len(column_values) == 0:
column_median = None
elif nonnull_count % 2 == 0:
# An even number of column values: take the average of the two center values
column_median = (
float(
column_values[0][0]
+ column_values[1][0] # left center value # right center value
)
/ 2.0
) # Average center values
else:
# An odd number of column values, we can just take the center value
column_median = column_values[1][0] # True center value
return column_median
def get_column_quantiles(
self, column: str, quantiles: Iterable, allow_relative_error: bool = False
) -> list:
if self.sql_engine_dialect.name.lower() == "mssql":
return self._get_column_quantiles_mssql(column=column, quantiles=quantiles)
elif self.sql_engine_dialect.name.lower() == "bigquery":
return self._get_column_quantiles_bigquery(
column=column, quantiles=quantiles
)
elif self.sql_engine_dialect.name.lower() == "mysql":
return self._get_column_quantiles_mysql(column=column, quantiles=quantiles)
else:
return self._get_column_quantiles_generic_sqlalchemy(
column=column,
quantiles=quantiles,
allow_relative_error=allow_relative_error,
)
def _get_column_quantiles_mssql(self, column: str, quantiles: Iterable) -> list:
# mssql requires over(), so we add an empty over() clause
selects: List[WithinGroup] = [
sa.func.percentile_disc(quantile)
.within_group(sa.column(column).asc())
.over()
for quantile in quantiles
]
quantiles_query: Select = sa.select(selects).select_from(self._table)
try:
quantiles_results: RowProxy = self.engine.execute(
quantiles_query
).fetchone()
return list(quantiles_results)
except ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{str(pe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise pe
def _get_column_quantiles_bigquery(self, column: str, quantiles: Iterable) -> list:
# BigQuery does not support "WITHIN", so we need a special case for it
selects: List[WithinGroup] = [
sa.func.percentile_disc(sa.column(column), quantile).over()
for quantile in quantiles
]
quantiles_query: Select = sa.select(selects).select_from(self._table)
try:
quantiles_results: RowProxy = self.engine.execute(
quantiles_query
).fetchone()
return list(quantiles_results)
except ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{str(pe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise pe
def _get_column_quantiles_mysql(self, column: str, quantiles: Iterable) -> list:
# MySQL does not support "percentile_disc", so we implement it as a compound query.
# Please see https://stackoverflow.com/questions/19770026/calculate-percentile-value-using-mysql for reference.
percent_rank_query: CTE = sa.select(
[
sa.column(column),
sa.cast(
sa.func.percent_rank().over(order_by=sa.column(column).asc()),
sa.dialects.mysql.DECIMAL(18, 15),
).label("p"),
]
).order_by(sa.column("p").asc()).select_from(self._table).cte("t")
selects: List[WithinGroup] = []
for idx, quantile in enumerate(quantiles):
# pymysql cannot handle conversion of numpy float64 to float; convert just in case
if np.issubdtype(type(quantile), np.float_):
quantile = float(quantile)
quantile_column: Label = sa.func.first_value(sa.column(column)).over(
order_by=sa.case(
[
(
percent_rank_query.c.p
<= sa.cast(quantile, sa.dialects.mysql.DECIMAL(18, 15)),
percent_rank_query.c.p,
)
],
else_=None,
).desc()
).label(f"q_{idx}")
selects.append(quantile_column)
quantiles_query: Select = sa.select(selects).distinct().order_by(
percent_rank_query.c.p.desc()
)
try:
quantiles_results: RowProxy = self.engine.execute(
quantiles_query
).fetchone()
return list(quantiles_results)
except ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{str(pe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise pe
# Support for computing the quantiles column for PostGreSQL and Redshift is included in the same method as that for
# the generic sqlalchemy compatible DBMS engine, because users often use the postgresql driver to connect to Redshift
# The key functional difference is that Redshift does not support the aggregate function
# "percentile_disc", but does support the approximate percentile_disc or percentile_cont function version instead.```
def _get_column_quantiles_generic_sqlalchemy(
self, column: str, quantiles: Iterable, allow_relative_error: bool
) -> list:
selects: List[WithinGroup] = [
sa.func.percentile_disc(quantile).within_group(sa.column(column).asc())
for quantile in quantiles
]
quantiles_query: Select = sa.select(selects).select_from(self._table)
try:
quantiles_results: RowProxy = self.engine.execute(
quantiles_query
).fetchone()
return list(quantiles_results)
except ProgrammingError:
# ProgrammingError: (psycopg2.errors.SyntaxError) Aggregate function "percentile_disc" is not supported;
# use approximate percentile_disc or percentile_cont instead.
if self.attempt_allowing_relative_error():
# Redshift does not have a percentile_disc method, but does support an approximate version.
sql_approx: str = get_approximate_percentile_disc_sql(
selects=selects, sql_engine_dialect=self.sql_engine_dialect
)
selects_approx: List[TextClause] = [sa.text(sql_approx)]
quantiles_query_approx: Select = sa.select(selects_approx).select_from(
self._table
)
if allow_relative_error:
try:
quantiles_results: RowProxy = self.engine.execute(
quantiles_query_approx
).fetchone()
return list(quantiles_results)
except ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{str(pe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise pe
else:
raise ValueError(
f'The SQL engine dialect "{str(self.sql_engine_dialect)}" does not support computing quantiles '
"without approximation error; set allow_relative_error to True to allow approximate quantiles."
)
else:
raise ValueError(
f'The SQL engine dialect "{str(self.sql_engine_dialect)}" does not support computing quantiles with '
"approximation error; set allow_relative_error to False to disable approximate quantiles."
)
def get_column_stdev(self, column):
if self.sql_engine_dialect.name.lower() == "mssql":
# Note: "stdev_samp" is not a recognized built-in function name (but "stdev" does exist for "mssql").
# This function is used to compute statistical standard deviation from sample data (per the reference in
# https://sqlserverrider.wordpress.com/2013/03/06/standard-deviation-functions-stdev-and-stdevp-sql-server).
res = self.engine.execute(
sa.select([sa.func.stdev(sa.column(column))])
.select_from(self._table)
.where(sa.column(column) is not None)
).fetchone()
else:
res = self.engine.execute(
sa.select([sa.func.stddev_samp(sa.column(column))])
.select_from(self._table)
.where(sa.column(column) is not None)
).fetchone()
return float(res[0])
def get_column_hist(self, column, bins):
"""return a list of counts corresponding to bins
Args:
column: the name of the column for which to get the histogram
bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching
"""
case_conditions = []
idx = 0
bins = list(bins)
# If we have an infinte lower bound, don't express that in sql
if (
bins[0]
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
bins[0]
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
case_conditions.append(
sa.func.sum(
sa.case([(sa.column(column) < bins[idx + 1], 1)], else_=0)
).label("bin_" + str(idx))
)
idx += 1
for idx in range(idx, len(bins) - 2):
case_conditions.append(
sa.func.sum(
sa.case(
[
(
sa.and_(
bins[idx] <= sa.column(column),
sa.column(column) < bins[idx + 1],
),
1,
)
],
else_=0,
)
).label("bin_" + str(idx))
)
if (
bins[-1]
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
bins[-1]
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
case_conditions.append(
sa.func.sum(
sa.case([(bins[-2] <= sa.column(column), 1)], else_=0)
).label("bin_" + str(len(bins) - 1))
)
else:
case_conditions.append(
sa.func.sum(
sa.case(
[
(
sa.and_(
bins[-2] <= sa.column(column),
sa.column(column) <= bins[-1],
),
1,
)
],
else_=0,
)
).label("bin_" + str(len(bins) - 1))
)
query = (
sa.select(case_conditions)
.where(sa.column(column) != None,)
.select_from(self._table)
)
# Run the data through convert_to_json_serializable to ensure we do not have Decimal types
hist = convert_to_json_serializable(list(self.engine.execute(query).fetchone()))
return hist
def get_column_count_in_range(
self, column, min_val=None, max_val=None, strict_min=False, strict_max=True
):
if min_val is None and max_val is None:
raise ValueError("Must specify either min or max value")
if min_val is not None and max_val is not None and min_val > max_val:
raise ValueError("Min value must be <= to max value")
if (
min_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
min_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
min_val = get_sql_dialect_floating_point_infinity_value(
schema=self.sql_engine_dialect.name.lower(), negative=True
)
if (
min_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
min_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
min_val = get_sql_dialect_floating_point_infinity_value(
schema=self.sql_engine_dialect.name.lower(), negative=False
)
if (
max_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
max_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
max_val = get_sql_dialect_floating_point_infinity_value(
schema=self.sql_engine_dialect.name.lower(), negative=True
)
if (
max_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
max_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
max_val = get_sql_dialect_floating_point_infinity_value(
schema=self.sql_engine_dialect.name.lower(), negative=False
)
min_condition = None
max_condition = None
if min_val is not None:
if strict_min:
min_condition = sa.column(column) > min_val
else:
min_condition = sa.column(column) >= min_val
if max_val is not None:
if strict_max:
max_condition = sa.column(column) < max_val
else:
max_condition = sa.column(column) <= max_val
if min_condition is not None and max_condition is not None:
condition = sa.and_(min_condition, max_condition)
elif min_condition is not None:
condition = min_condition
else:
condition = max_condition
query = (
sa.select([sa.func.count(sa.column(column))])
.where(sa.and_(sa.column(column) != None, condition))
.select_from(self._table)
)
return self.engine.execute(query).scalar()
def create_temporary_table(self, table_name, custom_sql, schema_name=None):
"""
Create Temporary table based on sql query. This will be used as a basis for executing expectations.
WARNING: this feature is new in v0.4.
It hasn't been tested in all SQL dialects, and may change based on community feedback.
:param custom_sql:
"""
###
# NOTE: 20200310 - The update to support snowflake transient table creation revealed several
# import cases that are not fully handled.
# The snowflake-related change updated behavior to allow both custom_sql and schema to be specified. But
# the underlying incomplete handling of schema remains.
#
# Several cases we need to consider:
#
# 1. Distributed backends (e.g. Snowflake and BigQuery) often use a `<database>.<schema>.<table>`
# syntax, but currently we are biased towards only allowing schema.table
#
# 2. In the wild, we see people using several ways to declare the schema they want to use:
# a. In the connection string, the original RFC only specifies database, but schema is supported by some
# backends (Snowflake) as a query parameter.
# b. As a default for a user (the equivalent of USE SCHEMA being provided at the beginning of a session)
# c. As part of individual queries.
#
# 3. We currently don't make it possible to select from a table in one query, but create a temporary table in
# another schema, except for with BigQuery and (now) snowflake, where you can specify the table name (and
# potentially triple of database, schema, table) in the batch_kwargs.
#
# The SqlAlchemyDataset interface essentially predates the batch_kwargs concept and so part of what's going
# on, I think, is a mismatch between those. I think we should rename custom_sql -> "temp_table_query" or
# similar, for example.
###
if self.sql_engine_dialect.name.lower() == "bigquery":
stmt = "CREATE OR REPLACE TABLE `{table_name}` AS {custom_sql}".format(
table_name=table_name, custom_sql=custom_sql
)
elif self.sql_engine_dialect.name.lower() == "snowflake":
logger.info("Creating transient table %s" % table_name)
if schema_name is not None:
table_name = schema_name + "." + table_name
stmt = "CREATE OR REPLACE TRANSIENT TABLE {table_name} AS {custom_sql}".format(
table_name=table_name, custom_sql=custom_sql
)
elif self.sql_engine_dialect.name == "mysql":
# Note: We can keep the "MySQL" clause separate for clarity, even though it is the same as the generic case.
stmt = "CREATE TEMPORARY TABLE {table_name} AS {custom_sql}".format(
table_name=table_name, custom_sql=custom_sql
)
elif self.sql_engine_dialect.name == "mssql":
# Insert "into #{table_name}" in the custom sql query right before the "from" clause
# Split is case sensitive so detect case.
# Note: transforming custom_sql to uppercase/lowercase has uninteded consequences (i.e., changing column names), so this is not an option!
if "from" in custom_sql:
strsep = "from"
else:
strsep = "FROM"
custom_sqlmod = custom_sql.split(strsep, maxsplit=1)
stmt = (
custom_sqlmod[0] + "into {table_name} from" + custom_sqlmod[1]
).format(table_name=table_name)
else:
stmt = 'CREATE TEMPORARY TABLE "{table_name}" AS {custom_sql}'.format(
table_name=table_name, custom_sql=custom_sql
)
self.engine.execute(stmt)
def column_reflection_fallback(self):
"""If we can't reflect the table, use a query to at least get column names."""
col_info_dict_list: List[Dict]
if self.sql_engine_dialect.name.lower() == "mssql":
type_module = self._get_dialect_type_module()
# Get column names and types from the database
# StackOverflow to the rescue: https://stackoverflow.com/a/38634368
col_info_query: TextClause = sa.text(
f"""
SELECT
cols.NAME, ty.NAME
FROM
tempdb.sys.columns AS cols
JOIN
sys.types AS ty
ON
cols.user_type_id = ty.user_type_id
WHERE
object_id = OBJECT_ID('tempdb..{self._table}')
"""
)
col_info_tuples_list = self.engine.execute(col_info_query).fetchall()
col_info_dict_list = [
{"name": col_name, "type": getattr(type_module, col_type.upper())()}
for col_name, col_type in col_info_tuples_list
]
else:
query: Select = sa.select([sa.text("*")]).select_from(self._table).limit(1)
col_names: list = self.engine.execute(query).keys()
col_info_dict_list = [{"name": col_name} for col_name in col_names]
return col_info_dict_list
###
###
###
#
# Table Expectation Implementations
#
###
###
###
# noinspection PyUnusedLocal
@DocInherit
@MetaSqlAlchemyDataset.expectation(["other_table_name"])
def expect_table_row_count_to_equal_other_table(
self,
other_table_name,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of rows in this table to equal the number of rows in a different table.
expect_table_row_count_to_equal is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
other_table_name (str): \
The name of the other table to which to compare.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_row_count_to_be_between
"""
row_count = self.get_row_count()
other_table_row_count = self.get_row_count(table_name=other_table_name)
return {
"success": row_count == other_table_row_count,
"result": {
"observed_value": {"self": row_count, "other": other_table_row_count,}
},
}
###
###
###
#
# Column Map Expectation Implementations
#
###
###
###
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_be_null(
self,
column,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return sa.column(column) == None
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_be_null(
self,
column,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return sa.column(column) != None
def _get_dialect_type_module(self):
if self.dialect is None:
logger.warning(
"No sqlalchemy dialect found; relying in top-level sqlalchemy types."
)
return sa
try:
# Redshift does not (yet) export types to top level; only recognize base SA types
if isinstance(
self.sql_engine_dialect, sqlalchemy_redshift.dialect.RedshiftDialect
):
return self.dialect.sa
except (TypeError, AttributeError):
pass
# Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple
try:
if (
isinstance(
self.sql_engine_dialect,
pybigquery.sqlalchemy_bigquery.BigQueryDialect,
)
and bigquery_types_tuple is not None
):
return bigquery_types_tuple
except (TypeError, AttributeError):
pass
return self.dialect
@DocInherit
@DataAsset.expectation(["column", "type_", "mostly"])
def expect_column_values_to_be_of_type(
self,
column,
type_,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if mostly is not None:
raise ValueError(
"SqlAlchemyDataset does not support column map semantics for column types"
)
try:
col_data = [col for col in self.columns if col["name"] == column][0]
col_type = type(col_data["type"])
except IndexError:
raise ValueError("Unrecognized column: %s" % column)
except KeyError:
raise ValueError("No database type data available for column: %s" % column)
try:
# Our goal is to be as explicit as possible. We will match the dialect
# if that is possible. If there is no dialect available, we *will*
# match against a top-level SqlAlchemy type if that's possible.
#
# This is intended to be a conservative approach.
#
# In particular, we *exclude* types that would be valid under an ORM
# such as "float" for postgresql with this approach
if type_ is None:
# vacuously true
success = True
else:
type_module = self._get_dialect_type_module()
success = issubclass(col_type, getattr(type_module, type_))
return {"success": success, "result": {"observed_value": col_type.__name__}}
except AttributeError:
raise ValueError("Type not recognized by current driver: %s" % type_)
@DocInherit
@DataAsset.expectation(["column", "type_list", "mostly"])
def expect_column_values_to_be_in_type_list(
self,
column,
type_list,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if mostly is not None:
raise ValueError(
"SqlAlchemyDataset does not support column map semantics for column types"
)
try:
col_data = [col for col in self.columns if col["name"] == column][0]
col_type = type(col_data["type"])
except IndexError:
raise ValueError("Unrecognized column: %s" % column)
except KeyError:
raise ValueError("No database type data available for column: %s" % column)
# Our goal is to be as explicit as possible. We will match the dialect
# if that is possible. If there is no dialect available, we *will*
# match against a top-level SqlAlchemy type.
#
# This is intended to be a conservative approach.
#
# In particular, we *exclude* types that would be valid under an ORM
# such as "float" for postgresql with this approach
if type_list is None:
success = True
else:
types = []
type_module = self._get_dialect_type_module()
for type_ in type_list:
try:
type_class = getattr(type_module, type_)
types.append(type_class)
except AttributeError:
logger.debug("Unrecognized type: %s" % type_)
if len(types) == 0:
logger.warning(
"No recognized sqlalchemy types in type_list for current dialect."
)
types = tuple(types)
success = issubclass(col_type, types)
return {"success": success, "result": {"observed_value": col_type.__name__}}
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if value_set is None:
# vacuously true
return True
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return sa.column(column).in_(tuple(parsed_value_set))
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return sa.column(column).notin_(tuple(parsed_value_set))
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False,
allow_cross_type_comparisons=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
if min_value:
min_value = parse(min_value)
if max_value:
max_value = parse(max_value)
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
if min_value is None:
if strict_max:
return sa.column(column) < max_value
else:
return sa.column(column) <= max_value
elif max_value is None:
if strict_min:
return min_value < sa.column(column)
else:
return min_value <= sa.column(column)
else:
if strict_min and strict_max:
return sa.and_(
min_value < sa.column(column), sa.column(column) < max_value
)
elif strict_min:
return sa.and_(
min_value < sa.column(column), sa.column(column) <= max_value
)
elif strict_max:
return sa.and_(
min_value <= sa.column(column), sa.column(column) < max_value
)
else:
return sa.and_(
min_value <= sa.column(column), sa.column(column) <= max_value
)
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_value_lengths_to_equal(
self,
column,
value,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return sa.func.length(sa.column(column)) == value
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_value_lengths_to_be_between(
self,
column,
min_value=None,
max_value=None,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
# Assert that min_value and max_value are integers
try:
if min_value is not None and not float(min_value).is_integer():
raise ValueError("min_value and max_value must be integers")
if max_value is not None and not float(max_value).is_integer():
raise ValueError("min_value and max_value must be integers")
except ValueError:
raise ValueError("min_value and max_value must be integers")
if min_value is not None and max_value is not None:
return sa.and_(
sa.func.length(sa.column(column)) >= min_value,
sa.func.length(sa.column(column)) <= max_value,
)
elif min_value is None and max_value is not None:
return sa.func.length(sa.column(column)) <= max_value
elif min_value is not None and max_value is None:
return sa.func.length(sa.column(column)) >= min_value
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_be_unique(
self,
column,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# Duplicates are found by filtering a group by query
dup_query = (
sa.select([sa.column(column)])
.select_from(self._table)
.group_by(sa.column(column))
.having(sa.func.count(sa.column(column)) > 1)
)
return sa.column(column).notin_(dup_query)
def _get_dialect_regex_expression(self, column, regex, positive=True):
try:
# postgres
if isinstance(self.sql_engine_dialect, sa.dialects.postgresql.dialect):
if positive:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("~")
)
else:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("!~")
)
except AttributeError:
pass
try:
# redshift
if isinstance(
self.sql_engine_dialect, sqlalchemy_redshift.dialect.RedshiftDialect
):
if positive:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("~")
)
else:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("!~")
)
except (
AttributeError,
TypeError,
): # TypeError can occur if the driver was not installed and so is None
pass
try:
# MySQL
if isinstance(self.sql_engine_dialect, sa.dialects.mysql.dialect):
if positive:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("REGEXP")
)
else:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("NOT REGEXP")
)
except AttributeError:
pass
try:
# Snowflake
if isinstance(
self.sql_engine_dialect,
snowflake.sqlalchemy.snowdialect.SnowflakeDialect,
):
if positive:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("RLIKE")
)
else:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("NOT RLIKE")
)
except (
AttributeError,
TypeError,
): # TypeError can occur if the driver was not installed and so is None
pass
try:
# Bigquery
if isinstance(
self.sql_engine_dialect, pybigquery.sqlalchemy_bigquery.BigQueryDialect
):
if positive:
return sa.func.REGEXP_CONTAINS(sa.column(column), literal(regex))
else:
return sa.not_(
sa.func.REGEXP_CONTAINS(sa.column(column), literal(regex))
)
except (
AttributeError,
TypeError,
): # TypeError can occur if the driver was not installed and so is None
pass
return None
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
regex_expression = self._get_dialect_regex_expression(column, regex)
if regex_expression is None:
logger.warning(
"Regex is not supported for dialect %s" % str(self.sql_engine_dialect)
)
raise NotImplementedError
return regex_expression
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
regex_expression = self._get_dialect_regex_expression(
column, regex, positive=False
)
if regex_expression is None:
logger.warning(
"Regex is not supported for dialect %s" % str(self.sql_engine_dialect)
)
raise NotImplementedError
return regex_expression
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_match_regex_list(
self,
column,
regex_list,
match_on="any",
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if match_on not in ["any", "all"]:
raise ValueError("match_on must be any or all")
if len(regex_list) == 0:
raise ValueError("At least one regex must be supplied in the regex_list.")
regex_expression = self._get_dialect_regex_expression(column, regex_list[0])
if regex_expression is None:
logger.warning(
"Regex is not supported for dialect %s" % str(self.sql_engine_dialect)
)
raise NotImplementedError
if match_on == "any":
condition = sa.or_(
*[
self._get_dialect_regex_expression(column, regex)
for regex in regex_list
]
)
else:
condition = sa.and_(
*[
self._get_dialect_regex_expression(column, regex)
for regex in regex_list
]
)
return condition
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_match_regex_list(
self,
column,
regex_list,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if len(regex_list) == 0:
raise ValueError("At least one regex must be supplied in the regex_list.")
regex_expression = self._get_dialect_regex_expression(
column, regex_list[0], positive=False
)
if regex_expression is None:
logger.warning(
"Regex is not supported for dialect %s" % str(self.sql_engine_dialect)
)
raise NotImplementedError
return sa.and_(
*[
self._get_dialect_regex_expression(column, regex, positive=False)
for regex in regex_list
]
)
def _get_dialect_like_pattern_expression(self, column, like_pattern, positive=True):
dialect_supported: bool = False
try:
# Bigquery
if isinstance(
self.sql_engine_dialect, pybigquery.sqlalchemy_bigquery.BigQueryDialect
):
dialect_supported = True
except (
AttributeError,
TypeError,
): # TypeError can occur if the driver was not installed and so is None
pass
if isinstance(
self.sql_engine_dialect,
(
sa.dialects.sqlite.dialect,
sa.dialects.postgresql.dialect,
sa.dialects.mysql.dialect,
sa.dialects.mssql.dialect,
),
):
dialect_supported = True
try:
if isinstance(
self.sql_engine_dialect, sqlalchemy_redshift.dialect.RedshiftDialect
):
dialect_supported = True
except (AttributeError, TypeError):
pass
if dialect_supported:
try:
if positive:
return sa.column(column).like(literal(like_pattern))
else:
return sa.not_(sa.column(column).like(literal(like_pattern)))
except AttributeError:
pass
return None
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_match_like_pattern(
self,
column,
like_pattern,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
like_pattern_expression = self._get_dialect_like_pattern_expression(
column, like_pattern
)
if like_pattern_expression is None:
logger.warning(
"Like patterns are not supported for dialect %s"
% str(self.sql_engine_dialect)
)
raise NotImplementedError
return like_pattern_expression
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_match_like_pattern(
self,
column,
like_pattern,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
like_pattern_expression = self._get_dialect_like_pattern_expression(
column, like_pattern, positive=False
)
if like_pattern_expression is None:
logger.warning(
"Like patterns are not supported for dialect %s"
% str(self.sql_engine_dialect)
)
raise NotImplementedError
return like_pattern_expression
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_match_like_pattern_list(
self,
column,
like_pattern_list,
match_on="any",
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if match_on not in ["any", "all"]:
raise ValueError("match_on must be any or all")
if len(like_pattern_list) == 0:
raise ValueError(
"At least one like_pattern must be supplied in the like_pattern_list."
)
like_pattern_expression = self._get_dialect_like_pattern_expression(
column, like_pattern_list[0]
)
if like_pattern_expression is None:
logger.warning(
"Like patterns are not supported for dialect %s"
% str(self.sql_engine_dialect)
)
raise NotImplementedError
if match_on == "any":
condition = sa.or_(
*[
self._get_dialect_like_pattern_expression(column, like_pattern)
for like_pattern in like_pattern_list
]
)
else:
condition = sa.and_(
*[
self._get_dialect_like_pattern_expression(column, like_pattern)
for like_pattern in like_pattern_list
]
)
return condition
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_match_like_pattern_list(
self,
column,
like_pattern_list,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if len(like_pattern_list) == 0:
raise ValueError(
"At least one like_pattern must be supplied in the like_pattern_list."
)
like_pattern_expression = self._get_dialect_like_pattern_expression(
column, like_pattern_list[0], positive=False
)
if like_pattern_expression is None:
logger.warning(
"Like patterns are not supported for dialect %s"
% str(self.sql_engine_dialect)
)
raise NotImplementedError
return sa.and_(
*[
self._get_dialect_like_pattern_expression(
column, like_pattern, positive=False
)
for like_pattern in like_pattern_list
]
)
| 37.610672 | 159 | 0.574602 |
898a60d341d4f9dc069a14dcba03ec21de0c113f | 2,050 | py | Python | datawarehouse/edw_migrations/versions/74dd3c263cb8_dimprogramoffices.py | bcgov/foi-reporting | 25856ce87b668df964ddd16ac7459fae4aa6a7c5 | [
"Apache-2.0"
] | null | null | null | datawarehouse/edw_migrations/versions/74dd3c263cb8_dimprogramoffices.py | bcgov/foi-reporting | 25856ce87b668df964ddd16ac7459fae4aa6a7c5 | [
"Apache-2.0"
] | 3 | 2022-01-05T18:01:41.000Z | 2022-02-08T21:51:32.000Z | datawarehouse/edw_migrations/versions/74dd3c263cb8_dimprogramoffices.py | bcgov/foi-reporting | 25856ce87b668df964ddd16ac7459fae4aa6a7c5 | [
"Apache-2.0"
] | null | null | null | """dimProgramOffices
Revision ID: 74dd3c263cb8
Revises: 6500637aab43
Create Date: 2022-01-26 23:52:23.698157
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '74dd3c263cb8'
down_revision = '6500637aab43'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('dimProgramOffices',
sa.Column('programofficeid', sa.Integer(), nullable=False),
sa.Column('programofficename', sa.VARCHAR(length=3000)),
sa.Column('corraddress1', sa.VARCHAR(length=150)),
sa.Column('corraddress2', sa.VARCHAR(length=80)),
sa.Column('corrcity', sa.VARCHAR(length=30)),
sa.Column('corrstate', sa.VARCHAR(length=3000)),
sa.Column('corrstatecode', sa.CHAR(length=5)),
sa.Column('corrzipcode', sa.VARCHAR(length=10)),
sa.Column('corrcountry', sa.VARCHAR(length=3000)),
sa.Column('emailid', sa.VARCHAR(length=4000)),
sa.Column('phone', sa.VARCHAR(length=25)),
sa.Column('fax', sa.VARCHAR(length=25)),
sa.Column('tiofficeid', sa.Integer()),
sa.Column('ctype', sa.CHAR(length=1)),
sa.Column('lastname', sa.VARCHAR(length=25)),
sa.Column('firstname', sa.VARCHAR(length=50)),
sa.Column('altphone', sa.VARCHAR(length=25)),
sa.Column('englishtitle', sa.VARCHAR(length=255)),
sa.Column('ccname', sa.VARCHAR(length=25)),
sa.Column('ccemail', sa.VARCHAR(length=4000)),
sa.Column('visibleid', sa.VARCHAR(length=50)),
sa.Column('company', sa.VARCHAR(length=50)),
sa.Column('programoffice', sa.CHAR(length=1)),
sa.Column('consultationoffice', sa.CHAR(length=1)),
sa.Column('cactive', sa.CHAR(length=1)),
sa.Column('remaddress1', sa.VARCHAR(length=150)),
sa.Column('remaddress2', sa.VARCHAR(length=80)),
sa.Column('remcity', sa.VARCHAR(length=30)),
sa.Column('remzipcode', sa.VARCHAR(length=10)),
sa.Column('remstate', sa.VARCHAR(length=3000)),
sa.Column('remstatecode', sa.CHAR(length=5)),
sa.Column('remcountry', sa.VARCHAR(length=3000)),
sa.PrimaryKeyConstraint('programofficeid')
)
def downgrade():
op.drop_table('dimProgramOffices')
| 34.745763 | 61 | 0.709756 |
f3b138dc4e34d7d9bf432dec5cc5f4d51d2386bb | 409 | py | Python | Python/069.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | null | null | null | Python/069.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | 1 | 2018-04-16T21:01:50.000Z | 2018-04-16T21:01:50.000Z | Python/069.py | jaimeliew1/Project_Euler_Solutions | 963c9c6d6571cade8f87341f97a6a2cd1af202bb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 69
Author: Jaime Liew
Date: May 2016
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
from EulerFunctions import primelist
def run():
n = 1000000
primes = primelist(n)
i, result = 0, 1
while result*primes[i] < n:
result *= primes[i]
i+=1
return result
if __name__ == "__main__":
print(run())
| 16.36 | 53 | 0.630807 |
76995ef5dcca6f90d0a82c67ce458437754e7709 | 881 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/iotcloudgateway/models/CloudstorageSpec.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/iotcloudgateway/models/CloudstorageSpec.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/iotcloudgateway/models/CloudstorageSpec.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class CloudstorageSpec(object):
def __init__(self, rsname, size, ):
"""
:param rsname: 副本集名称
:param size: 云盘扩容大小
"""
self.rsname = rsname
self.size = size
| 29.366667 | 75 | 0.703746 |
35d0b61b805672c6d8de07c8ca3c595cecb654b0 | 3,206 | py | Python | FW/edk2-ws/edk2/CryptoPkg/Library/OpensslLib/openssl/krb5/src/tests/t_keyrollover.py | daxx-linux/edk-Lab_Material_FW | 0c92b0b861e33d949a73e3be96929570e6c7ec61 | [
"BSD-2-Clause"
] | 1 | 2022-02-16T01:28:20.000Z | 2022-02-16T01:28:20.000Z | FW/edk2-ws/edk2/CryptoPkg/Library/OpensslLib/openssl/krb5/src/tests/t_keyrollover.py | daxx-linux/edk-Lab_Material_FW | 0c92b0b861e33d949a73e3be96929570e6c7ec61 | [
"BSD-2-Clause"
] | null | null | null | FW/edk2-ws/edk2/CryptoPkg/Library/OpensslLib/openssl/krb5/src/tests/t_keyrollover.py | daxx-linux/edk-Lab_Material_FW | 0c92b0b861e33d949a73e3be96929570e6c7ec61 | [
"BSD-2-Clause"
] | 2 | 2021-07-04T02:59:41.000Z | 2021-07-18T08:07:16.000Z | #!/usr/bin/python
from k5test import *
rollover_krb5_conf = {'libdefaults': {'allow_weak_crypto': 'true'}}
realm = K5Realm(krbtgt_keysalt='des-cbc-crc:normal',
krb5_conf=rollover_krb5_conf)
princ1 = 'host/test1@%s' % (realm.realm,)
princ2 = 'host/test2@%s' % (realm.realm,)
realm.addprinc(princ1)
realm.addprinc(princ2)
realm.run([kvno, realm.host_princ])
# Change key for TGS, keeping old key.
realm.run([kadminl, 'cpw', '-randkey', '-e', 'aes256-cts', '-keepold',
realm.krbtgt_princ])
# Ensure that kvno still works with an old TGT.
realm.run([kvno, princ1])
realm.run([kadminl, 'purgekeys', realm.krbtgt_princ])
# Make sure an old TGT fails after purging old TGS key.
realm.run([kvno, princ2], expected_code=1)
output = realm.run([klist, '-e'])
expected = 'krbtgt/%s@%s\n\tEtype (skey, tkt): des-cbc-crc, des-cbc-crc' % \
(realm.realm, realm.realm)
if expected not in output:
fail('keyrollover: expected TGS enctype not found')
# Check that new key actually works.
realm.kinit(realm.user_princ, password('user'))
realm.run([kvno, realm.host_princ])
output = realm.run([klist, '-e'])
expected = 'krbtgt/%s@%s\n\tEtype (skey, tkt): ' \
'aes256-cts-hmac-sha1-96, aes256-cts-hmac-sha1-96' % \
(realm.realm, realm.realm)
if expected not in output:
fail('keyrollover: expected TGS enctype not found after change')
# Test that the KDC only accepts the first enctype for a kvno, for a
# local-realm TGS request. To set this up, we abuse an edge-case
# behavior of modprinc -kvno. First, set up a DES3 krbtgt entry at
# kvno 1 and cache a krbtgt ticket.
realm.run([kadminl, 'cpw', '-randkey', '-e', 'des3-cbc-sha1',
realm.krbtgt_princ])
realm.run([kadminl, 'modprinc', '-kvno', '1', realm.krbtgt_princ])
realm.kinit(realm.user_princ, password('user'))
# Add an AES krbtgt entry at kvno 2, and then reset it to kvno 1
# (modprinc -kvno sets the kvno on all entries without deleting any).
realm.run([kadminl, 'cpw', '-randkey', '-keepold', '-e', 'aes256-cts',
realm.krbtgt_princ])
realm.run([kadminl, 'modprinc', '-kvno', '1', realm.krbtgt_princ])
out = realm.run([kadminl, 'getprinc', realm.krbtgt_princ])
if 'vno 1, aes256' not in out or 'vno 1, des3' not in out:
fail('keyrollover: setup for TGS enctype test failed')
# Now present the DES3 ticket to the KDC and make sure it's rejected.
realm.run([kvno, realm.host_princ], expected_code=1)
realm.stop()
# Test a cross-realm TGT key rollover scenario where realm 1 mimics
# the Active Directory behavior of always using kvno 0 when issuing
# cross-realm TGTs. The first kvno invocation caches a cross-realm
# TGT with the old key, and the second kvno invocation sends it to
# r2's KDC with no kvno to identify it, forcing the KDC to try
# multiple keys.
r1, r2 = cross_realms(2)
crosstgt_princ = 'krbtgt/%s@%s' % (r2.realm, r1.realm)
r1.run([kadminl, 'modprinc', '-kvno', '0', crosstgt_princ])
r1.run([kvno, r2.host_princ])
r2.run([kadminl, 'cpw', '-pw', 'newcross', '-keepold', crosstgt_princ])
r1.run([kadminl, 'cpw', '-pw', 'newcross', crosstgt_princ])
r1.run([kadminl, 'modprinc', '-kvno', '0', crosstgt_princ])
r1.run([kvno, r2.user_princ])
success('keyrollover')
| 38.626506 | 76 | 0.696195 |
1e7a441159ca922cc2fce1824a4d6d2ae3531d76 | 20,760 | py | Python | colanderalchemy/schema.py | davidjb/ColanderAlchemy | df7224a935a2cbb5f6a8dbb28c16938d35c6236d | [
"MIT"
] | null | null | null | colanderalchemy/schema.py | davidjb/ColanderAlchemy | df7224a935a2cbb5f6a8dbb28c16938d35c6236d | [
"MIT"
] | null | null | null | colanderalchemy/schema.py | davidjb/ColanderAlchemy | df7224a935a2cbb5f6a8dbb28c16938d35c6236d | [
"MIT"
] | null | null | null | # types.py
# Copyright (C) 2012 the ColanderAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of ColanderAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from colander import (Mapping,
null,
required,
SchemaNode,
Sequence)
from inspect import isfunction
from sqlalchemy import (Boolean,
Date,
DateTime,
Enum,
Float,
inspect,
Integer,
String,
Numeric,
Time)
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm import object_mapper
import colander
import logging
__all__ = ['SQLAlchemySchemaNode']
log = logging.getLogger(__name__)
class SQLAlchemySchemaNode(colander.SchemaNode):
""" Build a Colander Schema based on the SQLAlchemy mapped class.
"""
sqla_info_key = 'colanderalchemy'
ca_class_key = '__colanderalchemy_config__'
def __init__(self, class_, includes=None,
excludes=None, overrides=None, unknown='ignore', **kw):
""" Initialise the given mapped schema according to options provided.
Arguments/Keywords
class\_
An ``SQLAlchemy`` mapped class that you want a ``Colander`` schema
to be generated for.
To declaratively customise ``Colander`` ``SchemaNode`` options,
add a ``__colanderalchemy_config__`` attribute to your initial
class declaration like so::
class MyModel(Base):
__colanderalchemy_config__ = {'title': 'Custom title',
'description': 'Sample'}
...
includes
Iterable of attributes to include from the resulting schema. Using
this option will ensure *only* the explicitly mentioned attributes
are included and *all others* are excluded.
Incompatible with :attr:`excludes`. Default: None.
excludes
Iterable of attributes to exclude from the resulting schema. Using
this option will ensure *only* the explicitly mentioned attributes
are excluded and *all others* are included.
Incompatible with :attr:`includes`. Default: None.
overrides
XXX Add something.
unknown
Represents the `unknown` argument passed to
:class:`colander.Mapping`.
From Colander:
``unknown`` controls the behavior of this type when an unknown
key is encountered in the cstruct passed to the deserialize
method of this instance.
Default: 'ignore'
\*\*kw
Represents *all* other options able to be passed to a
:class:`colander.SchemaNode`. Keywords passed will influence the
resulting mapped schema accordingly (for instance, passing
``title='My Model'`` means the returned schema will have its
``title`` attribute set accordingly.
See http://docs.pylonsproject.org/projects/colander/en/latest/basics.html for more information.
"""
log.debug('SQLAlchemySchemaNode.__init__: %s', class_)
self.inspector = inspect(class_)
kwargs = kw.copy()
# Obtain configuration specific from the mapped class
kwargs.update(getattr(self.inspector.class_, self.ca_class_key, {}))
# The default type of this SchemaNode is Mapping.
colander.SchemaNode.__init__(self, Mapping(unknown), **kwargs)
self.class_ = class_
self.includes = includes or {}
self.excludes = excludes or {}
self.overrides = overrides or {}
self.unknown = unknown
self.declarative_overrides = {}
self.kwargs = kwargs or {}
self.add_nodes(self.includes, self.excludes, self.overrides)
def add_nodes(self, includes, excludes, overrides):
for prop in self.inspector.attrs:
name = prop.key
if name in excludes and name in includes:
msg = 'excludes and includes are mutually exclusive.'
raise ValueError(msg)
if name in excludes or (includes and name not in includes):
log.debug('Attribute %s skipped imperatively', name)
continue
try:
getattr(self.inspector.column_attrs, name)
factory = 'get_schema_from_column'
except AttributeError:
getattr(self.inspector.relationships, name)
factory = 'get_schema_from_relationship'
node = getattr(self, factory)(prop, overrides.get(name,{}).copy())
if node is None:
continue
self.add(node)
def get_schema_from_column(self, prop, overrides):
""" Build and return a :class:`colander.SchemaNode` for a given Column.
This method uses information stored in the column within the ``info``
that was passed to the Column on creation. This means that
``Colander`` options can be specified declaratively in
``SQLAlchemy`` models using the ``info`` argument that you can
pass to :class:`sqlalchemy.Column`.
Arguments/Keywords
prop
A given :class:`sqlalchemy.orm.properties.ColumnProperty`
instance that represents the column being mapped.
overrides
XXX Add something.
"""
# The name of the SchemaNode is the ColumnProperty key.
name = prop.key
column = prop.columns[0]
declarative_overrides = column.info.get(self.sqla_info_key, {}).copy()
self.declarative_overrides[name] = declarative_overrides.copy()
key = 'exclude'
if key not in overrides and declarative_overrides.pop(key, False):
log.debug('Column %s skipped due to declarative overrides', name)
return None
if overrides.pop(key, False):
log.debug('Column %s skipped due to imperative overrides', name)
return None
for key in ['name', 'children']:
self.check_overrides(name, key, declarative_overrides, overrides)
# The SchemaNode built using the ColumnProperty has no children.
children = []
# The SchemaNode has no validator.
validator = None
# The type of the SchemaNode will be evaluated using the Column type.
# User can overridden the default type via Column.info or
# imperatively using overrides arg in SQLAlchemySchemaNode.__init__
# Support sqlalchemy.types.TypeDecorator
column_type = getattr(column.type, 'impl', column.type)
imperative_type = overrides.pop('typ', None)
declarative_type = declarative_overrides.pop('typ', None)
if not imperative_type is None:
type_ = imperative_type()
msg = 'Column %s: type overridden imperatively: %s.'
log.debug(msg, name, type_)
elif not declarative_type is None:
type_ = declarative_type()
msg = 'Column %s: type overridden via declarative: %s.'
log.debug(msg, name, type_)
elif isinstance(column_type, Boolean):
type_ = colander.Boolean()
elif isinstance(column_type, Date):
type_ = colander.Date()
elif isinstance(column_type, DateTime):
type_ = colander.DateTime()
elif isinstance(column_type, Enum):
type_ = colander.String()
validator = colander.OneOf(column.type.enums)
elif isinstance(column_type, Float):
type_ = colander.Float()
elif isinstance(column_type, Integer):
type_ = colander.Integer()
elif isinstance(column_type, String):
type_ = colander.String()
validator = colander.Length(0, column.type.length)
elif isinstance(column_type, Numeric):
type_ = colander.Decimal()
elif isinstance(column_type, Time):
type_ = colander.Time()
else:
raise NotImplementedError('Unknown type: %s' % column_type)
# Add default values for missing parameters.
if column.default is None or not hasattr(column.default, 'arg') or \
(isinstance(column_type, Integer) and
column.primary_key and column.autoincrement):
default = None
elif column.default.is_callable:
# Fix: SQLA wraps callables in lambda ctx: fn().
default = column.default.arg(None)
else:
default = column.default.arg
if not column.nullable and \
not (isinstance(column_type, Integer) and
column.primary_key and column.autoincrement):
missing = required
elif not column.default is None and column.default.is_callable and \
not (isinstance(column_type, Integer) and
column.primary_key and column.autoincrement):
# Fix: SQLA wraps default callables in lambda ctx: fn().
missing = column.default.arg(None)
elif not column.default is None and not column.default.is_callable and \
not (isinstance(column_type, Integer) and
column.primary_key and column.autoincrement):
missing = column.default.arg
else:
missing = None
kwargs = dict(name=name,
title=name,
default=default,
missing=missing,
validator=validator)
kwargs.update(declarative_overrides)
kwargs.update(overrides)
return colander.SchemaNode(type_, *children, **kwargs)
def check_overrides(self, name, arg, declarative_overrides, overrides):
msg = None
if arg in declarative_overrides:
msg = '%s: argument %s cannot be overridden via info kwarg.'
elif arg in overrides:
msg = '%s: argument %s cannot be overridden imperatively.'
if msg:
raise ValueError(msg % (name, arg))
def get_schema_from_relationship(self, prop, overrides):
""" Build and return a :class:`colander.SchemaNode` for a relationship.
This method uses information stored in the relationship within
the ``info`` that was passed to the relationship on creation.
This means that ``Colander`` options can be specified
declaratively in ``SQLAlchemy`` models using the ``info``
argument that you can pass to
:meth:`sqlalchemy.orm.relationship`.
Arguments/Keywords
prop
A given :class:`sqlalchemy.orm.properties.RelationshipProperty`
instance that represents the relationship being mapped.
overrides
XXX Add something.
"""
# The name of the SchemaNode is the ColumnProperty key.
name = prop.key
declarative_overrides = prop.info.get(self.sqla_info_key, {}).copy()
self.declarative_overrides[name] = declarative_overrides.copy()
if isfunction(prop.argument):
class_ = prop.argument()
else:
class_ = prop.argument
if declarative_overrides.pop('exclude', False):
log.debug('Relationship %s skipped due to declarative overrides',
name)
return None
for key in ['name', 'typ']:
self.check_overrides(name, key, declarative_overrides, overrides)
key = 'children'
imperative_children = overrides.pop(key, None)
declarative_children = declarative_overrides.pop(key, None)
if not imperative_children is None:
children = imperative_children
msg = 'Relationship %s: %s overridden imperatively.'
log.debug(msg, name, key)
elif not declarative_children is None:
children = declarative_children
msg = 'Relationship %s: %s overridden via declarative.'
log.debug(msg, name, key)
else:
children = None
key = 'includes'
imperative_includes = overrides.pop(key, None)
declarative_includes = declarative_overrides.pop(key, None)
if not imperative_includes is None:
includes = imperative_includes
msg = 'Relationship %s: %s overridden imperatively.'
log.debug(msg, name, key)
elif not declarative_includes is None:
includes = declarative_includes
msg = 'Relationship %s: %s overridden via declarative.'
log.debug(msg, name, key)
else:
includes = None
key = 'excludes'
imperative_excludes = overrides.pop(key, None)
declarative_excludes = declarative_overrides.pop(key, None)
if not imperative_excludes is None:
excludes = imperative_excludes
msg = 'Relationship %s: %s overridden imperatively.'
log.debug(msg, name, key)
elif not declarative_excludes is None:
excludes = declarative_excludes
msg = 'Relationship %s: %s overridden via declarative.'
log.debug(msg, name, key)
else:
excludes = None
if includes is None and excludes is None:
includes = [p.key for p in inspect(class_).column_attrs]
key = 'overrides'
imperative_rel_overrides = overrides.pop(key, None)
declarative_rel_overrides = declarative_overrides.pop(key, None)
if not imperative_rel_overrides is None:
rel_overrides = imperative_rel_overrides
msg = 'Relationship %s: %s overridden imperatively.'
log.debug(msg, name, key)
elif not declarative_rel_overrides is None:
rel_overrides = declarative_rel_overrides
msg = 'Relationship %s: %s overridden via declarative.'
log.debug(msg, name, key)
else:
rel_overrides = None
# Add default values for missing parameters.
if prop.innerjoin:
#Inner joined relationships imply it is mandatory
missing = required
else:
#Any other join is thus optional
if prop.uselist:
missing = []
else:
missing = None
kwargs = dict(name=name,
missing=missing)
kwargs.update(declarative_overrides)
kwargs.update(overrides)
if not children is None and prop.uselist:
# xToMany relationships.
return SchemaNode(Sequence(), *children, **kwargs)
if not children is None and not prop.uselist:
# xToOne relationships.
return SchemaNode(Mapping(), *children, **kwargs)
node = SQLAlchemySchemaNode(class_,
name=name,
includes=includes,
excludes=excludes,
overrides=rel_overrides,
missing=missing)
if prop.uselist:
node = SchemaNode(Sequence(), node, **kwargs)
node.name = name
return node
def dictify(self, obj):
""" Return a dictified version of `obj` using schema information.
The schema will be used to choose what attributes will be
included in the returned dict.
Thus, the return value of this function is suitable for consumption
as a ``Deform`` ``appstruct`` and can be used to pre-populate
forms in this specific use case.
Arguments/Keywords
obj
An object instance to be converted to a ``dict`` structure.
This object should conform to the given schema. For
example, ``obj`` should be an instance of this schema's
mapped class, an instance of a sub-class, or something that
has the same attributes.
"""
dict_ = {}
for node in self:
name = node.name
try:
getattr(self.inspector.column_attrs, name)
value = getattr(obj, name)
except AttributeError:
try:
prop = getattr(self.inspector.relationships, name)
if prop.uselist:
value = [self[name].children[0].dictify(o)
for o in getattr(obj, name)]
else:
o = getattr(obj, name)
value = None if o is None else self[name].dictify(o)
except AttributeError:
# The given node isn't part of the SQLAlchemy model
msg = 'SQLAlchemySchemaNode.dictify: %s not found on %s'
log.debug(msg, name, self)
continue
dict_[name] = value
return dict_
def objectify(self, dict_, context=None):
""" Return an object represting ``dict_`` using schema information.
The schema will be used to choose how the data in the structure
will be restored into SQLAlchemy model objects.
The incoming ``dict_`` structure corresponds with one that may be
created from the :meth:`dictify` method on the same schema.
Relationships and backrefs will be restored in accordance with their
specific configurations.
The return value of this function will be suitable for
adding into an SQLAlchemy session to be committed to a database.
Arguments/Keywords
dict\_
An dictionary or similar data structure to be converted to a
an SQLAlchemy object. This data structure should conform to
the given schema. For example, ``dict_`` should be an
appstruct (such as that returned from a Deform form
submission), result of a call to this schema's
:meth:`dictify` method, or a matching structure with
relevant keys and nesting, if applicable.
context
Optional keyword argument that, if supplied, becomes the base
object, with attributes and objects being applied to it.
Specify a ``context`` in the situtation where you already have
an object that exists already, such as when you have a pre-existing
instance of an SQLAlchemy model. If your model is already bound to
a session, then this facilitates directly updating the database --
just pass in your dict or appstruct, and your existing SQLAlchemy
instance as ``context`` and this method will update all of its
attributes.
This is a perfect fit for something like a CRUD environment.
Default: ``None``. Defaults to instantiating a new instance of the
mapped class associated with this schema.
"""
mapper = self.inspector
context = mapper.class_() if context is None else context
for attr in dict_:
if mapper.has_property(attr):
prop = mapper.get_property(attr)
value = dict_[attr]
# Convert value into objects if property has a mapper
if hasattr(prop, 'mapper'):
cls = prop.mapper.class_
if prop.uselist:
# Sequence of objects
value = [self[attr].children[0].objectify(obj)
for obj in value]
else:
# Single object
value = self[attr].objectify(value)
setattr(context, attr, value)
else:
# Ignore attributes if they are not mapped
msg = 'SQLAlchemySchemaNode.objectify: %s not found on %s. ' \
'This property has been ignored.'
log.debug(msg, attr, self)
continue
return context
def clone(self):
cloned = self.__class__(self.class_,
self.includes,
self.excludes,
self.overrides,
self.unknown,
**self.kwargs)
cloned.__dict__.update(self.__dict__)
cloned.children = [node.clone() for node in self.children]
return cloned
| 37.137746 | 106 | 0.587283 |
b2fc8abaa7aa4fba5abb5aa57ad3b3b16fe43b74 | 270 | py | Python | app.py | vyahello/flask-deploy-template | ca785b8ecfcaa277755d9ec0336ab95ff82237e8 | [
"MIT"
] | null | null | null | app.py | vyahello/flask-deploy-template | ca785b8ecfcaa277755d9ec0336ab95ff82237e8 | [
"MIT"
] | null | null | null | app.py | vyahello/flask-deploy-template | ca785b8ecfcaa277755d9ec0336ab95ff82237e8 | [
"MIT"
] | null | null | null | import flask
__site = flask.Flask(__name__)
@__site.route('/')
def home() -> str:
return flask.render_template('home.html')
@__site.route('/about')
def about() -> str:
return flask.render_template('about.html')
if __name__ == '__main__':
__site.run()
| 15 | 46 | 0.662963 |
706f343368fdf2391bf3a4523bb14a2217a79393 | 3,605 | py | Python | filer/admin/forms.py | maykinmedia/django-filer | 1b7c6b16f7fb19d89f9bd9df624d2de666837b67 | [
"BSD-3-Clause"
] | 1 | 2019-04-15T10:28:46.000Z | 2019-04-15T10:28:46.000Z | filer/admin/forms.py | techdragon/django-filer | 085b880aa6bb738fe582a3ce40c25bd97f5bbe06 | [
"BSD-3-Clause"
] | 10 | 2015-04-08T14:16:52.000Z | 2021-12-15T16:17:57.000Z | filer/admin/forms.py | techdragon/django-filer | 085b880aa6bb738fe582a3ce40c25bd97f5bbe06 | [
"BSD-3-Clause"
] | 3 | 2016-12-28T03:32:44.000Z | 2018-05-28T15:13:38.000Z | from django import forms
from django.db import models
from django.contrib.admin import widgets
from filer.utils.files import get_valid_filename
from django.utils.translation import ugettext as _
from django.core.exceptions import ValidationError
from django.conf import settings
if 'cmsplugin_filer_image' in settings.INSTALLED_APPS:
from cmsplugin_filer_image.models import ThumbnailOption
class AsPWithHelpMixin(object):
def as_p_with_help(self):
"Returns this form rendered as HTML <p>s with help text formated for admin."
return self._html_output(
normal_row='<p%(html_class_attr)s>%(label)s %(field)s</p>%(help_text)s',
error_row='%s',
row_ender='</p>',
help_text_html='<p class="help">%s</p>',
errors_on_separate_row=True)
class CopyFilesAndFoldersForm(forms.Form, AsPWithHelpMixin):
suffix = forms.CharField(required=False, help_text=_("Suffix which will be appended to filenames of copied files."))
# TODO: We have to find a way to overwrite files with different storage backends first.
#overwrite_files = forms.BooleanField(required=False, help_text=_("Overwrite a file if there already exists a file with the same filename?"))
def clean_suffix(self):
valid = get_valid_filename(self.cleaned_data['suffix'])
if valid != self.cleaned_data['suffix']:
raise forms.ValidationError(_('Suffix should be a valid, simple and lowercase filename part, like "%(valid)s".') % {'valid': valid})
return self.cleaned_data['suffix']
class RenameFilesForm(forms.Form, AsPWithHelpMixin):
rename_format = forms.CharField(required=True)
def clean_rename_format(self):
try:
self.cleaned_data['rename_format'] % {
'original_filename': 'filename',
'original_basename': 'basename',
'original_extension': 'ext',
'current_filename': 'filename',
'current_basename': 'basename',
'current_extension': 'ext',
'current_folder': 'folder',
'counter': 42,
'global_counter': 42,
}
except KeyError as e:
raise forms.ValidationError(_('Unknown rename format value key "%(key)s".') % {'key': e.args[0]})
except Exception as e:
raise forms.ValidationError(_('Invalid rename format: %(error)s.') % {'error': e})
return self.cleaned_data['rename_format']
class ResizeImagesForm(forms.Form, AsPWithHelpMixin):
if 'cmsplugin_filer_image' in settings.INSTALLED_APPS:
thumbnail_option = models.ForeignKey(ThumbnailOption, null=True, blank=True, verbose_name=_("thumbnail option")).formfield()
width = models.PositiveIntegerField(_("width"), null=True, blank=True).formfield(widget=widgets.AdminIntegerFieldWidget)
height = models.PositiveIntegerField(_("height"), null=True, blank=True).formfield(widget=widgets.AdminIntegerFieldWidget)
crop = models.BooleanField(_("crop"), default=True).formfield()
upscale = models.BooleanField(_("upscale"), default=True).formfield()
def clean(self):
if not (self.cleaned_data.get('thumbnail_option') or ((self.cleaned_data.get('width') or 0) + (self.cleaned_data.get('height') or 0))):
if 'cmsplugin_filer_image' in settings.INSTALLED_APPS:
raise ValidationError(_('Thumbnail option or resize parameters must be choosen.'))
else:
raise ValidationError(_('Resize parameters must be choosen.'))
return self.cleaned_data
| 48.066667 | 145 | 0.678225 |
f3a70e8256c665451f7d14aa3b3e3fc224ef4e17 | 277 | py | Python | cacreader/swig-4.0.2/Examples/test-suite/python/python_varargs_typemap_runme.py | kyletanyag/LL-Smartcard | 02abea9de5a13f8bae4d7832ab34cb7f0d9514c9 | [
"BSD-3-Clause"
] | 1,031 | 2015-01-02T14:08:47.000Z | 2022-03-29T02:25:27.000Z | cacreader/swig-4.0.2/Examples/test-suite/python/python_varargs_typemap_runme.py | kyletanyag/LL-Smartcard | 02abea9de5a13f8bae4d7832ab34cb7f0d9514c9 | [
"BSD-3-Clause"
] | 240 | 2015-01-11T04:27:19.000Z | 2022-03-30T00:35:57.000Z | cacreader/swig-4.0.2/Examples/test-suite/python/python_varargs_typemap_runme.py | kyletanyag/LL-Smartcard | 02abea9de5a13f8bae4d7832ab34cb7f0d9514c9 | [
"BSD-3-Clause"
] | 224 | 2015-01-05T06:13:54.000Z | 2022-02-25T14:39:51.000Z | import python_varargs_typemap
if (python_varargs_typemap.testfunc(1, 2.0, "three") != "three"):
raise RuntimeError("testfunc failed!")
if (python_varargs_typemap.testfunc(1, 2.0, "three", "four", "five") != "threefourfive"):
raise RuntimeError("testfunc failed! {}")
| 34.625 | 89 | 0.707581 |
6258118c28c1df5d49310f34182db3e5a730d003 | 601 | py | Python | .history/List of Capstone Projects/prime_factorization_20200516163532.py | EvanthiosPapadopoulos/Python3 | ab773fd458e365c1510f98ecac65965234c881e8 | [
"MIT"
] | 1 | 2020-05-18T17:50:00.000Z | 2020-05-18T17:50:00.000Z | .history/List of Capstone Projects/prime_factorization_20200516163532.py | EvanthiosPapadopoulos/Python3 | ab773fd458e365c1510f98ecac65965234c881e8 | [
"MIT"
] | null | null | null | .history/List of Capstone Projects/prime_factorization_20200516163532.py | EvanthiosPapadopoulos/Python3 | ab773fd458e365c1510f98ecac65965234c881e8 | [
"MIT"
] | null | null | null | '''
Prime Factorization - Have the user enter a number and find all Prime Factors (if there are any) and display them.
'''
import HeaderOfFiles
def prime_factor(number):
'''
Finding and display all Prime Factors
'''
my_list = []
for i in range(2, number):
if number%i == 0:
my_list.append(i)
number = number/i
i = 2
print(my_list)
# while True:
# try:
# x = int(input("Give me a number to find all Prime Factors: "))
# break
# except ValueError:
# print("Give a number please!")
prime_factor(120) | 22.259259 | 114 | 0.584027 |
c63290259074f83cecb267eaf04b12a08c277200 | 573 | py | Python | config/celery_app.py | Egor4ik325/demanage | f5fd1d2ad264f6d2dc3c95322962569dc591f30c | [
"MIT"
] | null | null | null | config/celery_app.py | Egor4ik325/demanage | f5fd1d2ad264f6d2dc3c95322962569dc591f30c | [
"MIT"
] | 1 | 2022-03-01T11:21:05.000Z | 2022-03-01T11:21:05.000Z | config/celery_app.py | Egor4ik325/demanage | f5fd1d2ad264f6d2dc3c95322962569dc591f30c | [
"MIT"
] | null | null | null | import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
app = Celery("demanage")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
| 31.833333 | 72 | 0.78185 |
7e8103b0ccddb2442f14a38944cefe2df0b5f1dd | 1,706 | py | Python | profile_project/profile_api/migrations/0001_initial.py | Prajnasbhat/profile-rest-api | 23e5fa1c0f60fc00be068f80dd33c73b7bf9183e | [
"MIT"
] | null | null | null | profile_project/profile_api/migrations/0001_initial.py | Prajnasbhat/profile-rest-api | 23e5fa1c0f60fc00be068f80dd33c73b7bf9183e | [
"MIT"
] | 5 | 2020-06-06T01:28:49.000Z | 2022-02-10T11:17:41.000Z | profile_project/profile_api/migrations/0001_initial.py | Prajnasbhat/profile-rest-api | 23e5fa1c0f60fc00be068f80dd33c73b7bf9183e | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-02-23 16:22
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.176471 | 266 | 0.638921 |
6dadf9550fc4ed5efa77d228bbd0845fec839d62 | 16,296 | py | Python | sdk/python/kfp/components/component_factory.py | johnmacnamararseg/pipelines | 340318625c527836af1c9abc0fd0d76c0a466333 | [
"Apache-2.0"
] | null | null | null | sdk/python/kfp/components/component_factory.py | johnmacnamararseg/pipelines | 340318625c527836af1c9abc0fd0d76c0a466333 | [
"Apache-2.0"
] | 1 | 2020-02-06T12:53:44.000Z | 2020-02-06T12:53:44.000Z | sdk/python/kfp/components/component_factory.py | johnmacnamararseg/pipelines | 340318625c527836af1c9abc0fd0d76c0a466333 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021-2022 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import inspect
import itertools
import pathlib
import re
import textwrap
import warnings
from typing import Callable, List, Optional, Tuple
import docstring_parser
from kfp.components import python_component
from kfp.components import structures
from kfp.components.types import artifact_types
from kfp.components.types import type_annotations
from kfp.components.types import type_utils
_DEFAULT_BASE_IMAGE = 'python:3.7'
@dataclasses.dataclass
class ComponentInfo():
"""A dataclass capturing registered components.
This will likely be subsumed/augmented with BaseComponent.
"""
name: str
function_name: str
func: Callable
target_image: str
module_path: pathlib.Path
component_spec: structures.ComponentSpec
output_component_file: Optional[str] = None
base_image: str = _DEFAULT_BASE_IMAGE
# A map from function_name to components. This is always populated when a
# module containing KFP components is loaded. Primarily used by KFP CLI
# component builder to package components in a file into containers.
REGISTERED_MODULES = None
def _python_function_name_to_component_name(name):
name_with_spaces = re.sub(' +', ' ', name.replace('_', ' ')).strip(' ')
return name_with_spaces[0].upper() + name_with_spaces[1:]
def _make_index_url_options(pip_index_urls: Optional[List[str]]) -> str:
if not pip_index_urls:
return ''
index_url = pip_index_urls[0]
extra_index_urls = pip_index_urls[1:]
options = [f'--index-url {index_url} --trusted-host {index_url} ']
options.extend(
f'--extra-index-url {extra_index_url} --trusted-host {extra_index_url} '
for extra_index_url in extra_index_urls)
return ' '.join(options)
_install_python_packages_script_template = '''
if ! [ -x "$(command -v pip)" ]; then
python3 -m ensurepip || python3 -m ensurepip --user || apt-get install python3-pip
fi
PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet \
--no-warn-script-location {index_url_options}{concat_package_list} && "$0" "$@"
'''
def _get_packages_to_install_command(
package_list: Optional[List[str]] = None,
pip_index_urls: Optional[List[str]] = None) -> List[str]:
if not package_list:
return []
concat_package_list = ' '.join(
[repr(str(package)) for package in package_list])
index_url_options = _make_index_url_options(pip_index_urls)
install_python_packages_script = _install_python_packages_script_template.format(
index_url_options=index_url_options,
concat_package_list=concat_package_list)
return ['sh', '-c', install_python_packages_script]
def _get_default_kfp_package_path() -> str:
import kfp
return 'kfp=={}'.format(kfp.__version__)
def _get_function_source_definition(func: Callable) -> str:
func_code = inspect.getsource(func)
# Function might be defined in some indented scope (e.g. in another
# function). We need to handle this and properly dedent the function source
# code
func_code = textwrap.dedent(func_code)
func_code_lines = func_code.split('\n')
# Removing possible decorators (can be multiline) until the function
# definition is found
func_code_lines = itertools.dropwhile(lambda x: not x.startswith('def'),
func_code_lines)
if not func_code_lines:
raise ValueError(
'Failed to dedent and clean up the source of function "{}". '
'It is probably not properly indented.'.format(func.__name__))
return '\n'.join(func_code_lines)
def _annotation_to_type_struct(annotation):
if not annotation or annotation == inspect.Parameter.empty:
return None
if hasattr(annotation, 'to_dict'):
annotation = annotation.to_dict()
if isinstance(annotation, dict):
return annotation
if isinstance(annotation, type):
type_struct = type_utils.get_canonical_type_name_for_type(annotation)
if type_struct:
return type_struct
if issubclass(annotation, artifact_types.Artifact
) and not annotation.TYPE_NAME.startswith('system.'):
# For artifact classes not under the `system` namespace,
# use its TYPE_NAME as-is.
type_name = annotation.TYPE_NAME
else:
type_name = str(annotation.__name__)
elif hasattr(annotation,
'__forward_arg__'): # Handling typing.ForwardRef('Type_name')
type_name = str(annotation.__forward_arg__)
else:
type_name = str(annotation)
# It's also possible to get the converter by type name
type_struct = type_utils.get_canonical_type_name_for_type(type_name)
if type_struct:
return type_struct
return type_name
def _maybe_make_unique(name: str, names: List[str]):
if name not in names:
return name
for i in range(2, 100):
unique_name = '{}_{}'.format(name, i)
if unique_name not in names:
return unique_name
raise RuntimeError('Too many arguments with the name {}'.format(name))
def extract_component_interface(func: Callable) -> structures.ComponentSpec:
single_output_name_const = 'Output'
signature = inspect.signature(func)
parameters = list(signature.parameters.values())
parsed_docstring = docstring_parser.parse(inspect.getdoc(func))
inputs = {}
outputs = {}
input_names = set()
output_names = set()
for parameter in parameters:
parameter_type = type_annotations.maybe_strip_optional_from_annotation(
parameter.annotation)
passing_style = None
io_name = parameter.name
if type_annotations.is_artifact_annotation(parameter_type):
# passing_style is either type_annotations.InputAnnotation or
# type_annotations.OutputAnnotation.
passing_style = type_annotations.get_io_artifact_annotation(
parameter_type)
# parameter_type is type_annotations.Artifact or one of its subclasses.
parameter_type = type_annotations.get_io_artifact_class(
parameter_type)
if not issubclass(parameter_type, artifact_types.Artifact):
raise ValueError(
'Input[T] and Output[T] are only supported when T is a '
'subclass of Artifact. Found `{} with type {}`'.format(
io_name, parameter_type))
if parameter.default is not inspect.Parameter.empty:
raise ValueError(
'Default values for Input/Output artifacts are not supported.'
)
elif isinstance(
parameter_type,
(type_annotations.InputPath, type_annotations.OutputPath)):
passing_style = type(parameter_type)
parameter_type = parameter_type.type
if parameter.default is not inspect.Parameter.empty and not (
passing_style == type_annotations.InputPath and
parameter.default is None):
raise ValueError(
'Path inputs only support default values of None. Default'
' values for outputs are not supported.')
type_struct = _annotation_to_type_struct(parameter_type)
if type_struct is None:
raise TypeError('Missing type annotation for argument: {}'.format(
parameter.name))
if passing_style in [
type_annotations.OutputAnnotation, type_annotations.OutputPath
]:
io_name = _maybe_make_unique(io_name, output_names)
output_names.add(io_name)
output_spec = structures.OutputSpec(type=type_struct)
outputs[io_name] = output_spec
else:
io_name = _maybe_make_unique(io_name, input_names)
input_names.add(io_name)
if parameter.default is not inspect.Parameter.empty:
input_spec = structures.InputSpec(
type=type_struct,
default=parameter.default,
)
else:
input_spec = structures.InputSpec(type=type_struct)
inputs[io_name] = input_spec
#Analyzing the return type annotations.
return_ann = signature.return_annotation
if hasattr(return_ann, '_fields'): #NamedTuple
# Getting field type annotations.
# __annotations__ does not exist in python 3.5 and earlier
# _field_types does not exist in python 3.9 and later
field_annotations = getattr(return_ann,
'__annotations__', None) or getattr(
return_ann, '_field_types', None)
for field_name in return_ann._fields:
type_struct = None
if field_annotations:
type_struct = _annotation_to_type_struct(
field_annotations.get(field_name, None))
output_name = _maybe_make_unique(field_name, output_names)
output_names.add(output_name)
output_spec = structures.OutputSpec(type=type_struct)
outputs[output_name] = output_spec
# Deprecated dict-based way of declaring multiple outputs. Was only used by
# the @component decorator
elif isinstance(return_ann, dict):
warnings.warn(
'The ability to specify multiple outputs using the dict syntax'
' has been deprecated. It will be removed soon after release'
' 0.1.32. Please use typing.NamedTuple to declare multiple'
' outputs.')
for output_name, output_type_annotation in return_ann.items():
output_type_struct = _annotation_to_type_struct(
output_type_annotation)
output_spec = structures.OutputSpec(type=output_type_struct)
outputs[name] = output_spec
elif signature.return_annotation is not None and signature.return_annotation != inspect.Parameter.empty:
output_name = _maybe_make_unique(single_output_name_const, output_names)
# Fixes exotic, but possible collision:
# `def func(output_path: OutputPath()) -> str: ...`
output_names.add(output_name)
type_struct = _annotation_to_type_struct(signature.return_annotation)
output_spec = structures.OutputSpec(type=type_struct)
outputs[output_name] = output_spec
# Component name and description are derived from the function's name and
# docstring. The name can be overridden by setting setting func.__name__
# attribute (of the legacy func._component_human_name attribute). The
# description can be overridden by setting the func.__doc__ attribute (or
# the legacy func._component_description attribute).
component_name = getattr(func, '_component_human_name',
None) or _python_function_name_to_component_name(
func.__name__)
description = getattr(func, '_component_description',
None) or parsed_docstring.short_description
if description:
description = description.strip()
component_spec = structures.ComponentSpec(
name=component_name,
description=description,
inputs=inputs if inputs else None,
outputs=outputs if outputs else None,
# Dummy implementation to bypass model validation.
implementation=structures.Implementation(),
)
return component_spec
EXECUTOR_INPUT_PLACEHOLDER = "{{$}}"
def _get_command_and_args_for_lightweight_component(
func: Callable) -> Tuple[List[str], List[str]]:
imports_source = [
'import kfp',
'from kfp import dsl',
'from kfp.dsl import *',
'from typing import *',
]
func_source = _get_function_source_definition(func)
source = textwrap.dedent('''
{imports_source}
{func_source}\n''').format(
imports_source='\n'.join(imports_source), func_source=func_source)
command = [
'sh',
'-ec',
textwrap.dedent('''\
program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main \
--component_module_path \
"$program_path/ephemeral_component.py" \
"$@"
'''),
source,
]
args = [
'--executor_input',
EXECUTOR_INPUT_PLACEHOLDER,
'--function_to_execute',
func.__name__,
]
return command, args
def _get_command_and_args_for_containerized_component(
function_name: str) -> Tuple[List[str], List[str]]:
command = [
'python3',
'-m',
'kfp.components.executor_main',
]
args = [
'--executor_input',
EXECUTOR_INPUT_PLACEHOLDER,
'--function_to_execute',
function_name,
]
return command, args
def create_component_from_func(func: Callable,
base_image: Optional[str] = None,
target_image: Optional[str] = None,
packages_to_install: List[str] = None,
pip_index_urls: Optional[List[str]] = None,
output_component_file: Optional[str] = None,
install_kfp_package: bool = True,
kfp_package_path: Optional[str] = None):
"""Implementation for the @component decorator.
The decorator is defined under component_decorator.py. See the
decorator for the canonical documentation for this function.
"""
packages_to_install = packages_to_install or []
if install_kfp_package and target_image is None:
if kfp_package_path is None:
kfp_package_path = _get_default_kfp_package_path()
packages_to_install.append(kfp_package_path)
packages_to_install_command = _get_packages_to_install_command(
package_list=packages_to_install, pip_index_urls=pip_index_urls)
command = []
args = []
if base_image is None:
base_image = _DEFAULT_BASE_IMAGE
component_image = base_image
if target_image:
component_image = target_image
command, args = _get_command_and_args_for_containerized_component(
function_name=func.__name__,)
else:
command, args = _get_command_and_args_for_lightweight_component(
func=func)
component_spec = extract_component_interface(func)
component_spec.implementation = structures.Implementation(
container=structures.ContainerSpec(
image=component_image,
command=packages_to_install_command + command,
args=args,
))
module_path = pathlib.Path(inspect.getsourcefile(func))
module_path.resolve()
component_name = _python_function_name_to_component_name(func.__name__)
component_info = ComponentInfo(
name=component_name,
function_name=func.__name__,
func=func,
target_image=target_image,
module_path=module_path,
component_spec=component_spec,
output_component_file=output_component_file,
base_image=base_image)
if REGISTERED_MODULES is not None:
REGISTERED_MODULES[component_name] = component_info
if output_component_file:
component_spec.save_to_component_yaml(output_component_file)
return python_component.PythonComponent(
component_spec=component_spec, python_func=func)
| 37.036364 | 108 | 0.661512 |
60dce09f48575bce9b4307b3956de979f47ce0dc | 12,138 | py | Python | dvmvs/fusionnet/run-testing-online.py | hashi0203/deep-video-mvs | b3943a9249d522dca3e6cd603e427f611cc7bad5 | [
"MIT"
] | null | null | null | dvmvs/fusionnet/run-testing-online.py | hashi0203/deep-video-mvs | b3943a9249d522dca3e6cd603e427f611cc7bad5 | [
"MIT"
] | null | null | null | dvmvs/fusionnet/run-testing-online.py | hashi0203/deep-video-mvs | b3943a9249d522dca3e6cd603e427f611cc7bad5 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import torch
from path import Path
from tqdm import tqdm
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.fusionnet.model import FeatureExtractor, FeatureShrinker, CostVolumeEncoder, LSTMFusion, CostVolumeDecoder
from dvmvs.keyframe_buffer import KeyframeBuffer
from dvmvs.utils import cost_volume_fusion, save_results, visualize_predictions, InferenceTimer, get_non_differentiable_rectangle_depth_estimation, \
get_warp_grid_for_cost_volume_calculation
def predict(evaluate):
dataset_name = Config.test_online_scene_path.split("/")[-2]
system_name = "keyframe_{}_{}_{}_{}_dvmvs_fusionnet_online".format(dataset_name,
Config.test_image_width,
Config.test_image_height,
Config.test_n_measurement_frames)
print("Predicting with System:", system_name)
print("# of Measurement Frames:", Config.test_n_measurement_frames)
device = torch.device("cuda")
feature_extractor = FeatureExtractor()
feature_shrinker = FeatureShrinker()
cost_volume_encoder = CostVolumeEncoder()
lstm_fusion = LSTMFusion()
cost_volume_decoder = CostVolumeDecoder()
feature_extractor = feature_extractor.to(device)
feature_shrinker = feature_shrinker.to(device)
cost_volume_encoder = cost_volume_encoder.to(device)
lstm_fusion = lstm_fusion.to(device)
cost_volume_decoder = cost_volume_decoder.to(device)
model = [feature_extractor, feature_shrinker, cost_volume_encoder, lstm_fusion, cost_volume_decoder]
for i in range(len(model)):
try:
checkpoint = sorted(Path(Config.fusionnet_test_weights).files())[i]
weights = torch.load(checkpoint)
model[i].load_state_dict(weights)
model[i].eval()
print("Loaded weights for", checkpoint)
except Exception as e:
print(e)
print("Could not find the checkpoint for module", i)
exit(1)
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
lstm_fusion = model[3]
cost_volume_decoder = model[4]
warp_grid = get_warp_grid_for_cost_volume_calculation(width=int(Config.test_image_width / 2),
height=int(Config.test_image_height / 2),
device=device)
scale_rgb = 255.0
mean_rgb = [0.485, 0.456, 0.406]
std_rgb = [0.229, 0.224, 0.225]
min_depth = 0.25
max_depth = 20.0
n_depth_levels = 64
scene_folder = Path(Config.test_online_scene_path)
scene = scene_folder.split("/")[-1]
print("Predicting for scene:", scene)
keyframe_buffer = KeyframeBuffer(buffer_size=Config.test_keyframe_buffer_size,
keyframe_pose_distance=Config.test_keyframe_pose_distance,
optimal_t_score=Config.test_optimal_t_measure,
optimal_R_score=Config.test_optimal_R_measure,
store_return_indices=False)
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
inference_timer = InferenceTimer()
lstm_state = None
previous_depth = None
previous_pose = None
predictions = []
if evaluate:
reference_depths = []
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
else:
# if None the system will not be evaluated and errors will not be calculated
reference_depths = None
depth_filenames = None
with torch.no_grad():
for i in tqdm(range(0, len(poses))):
reference_pose = poses[i]
reference_image = load_image(image_filenames[i])
# POLL THE KEYFRAME BUFFER
response = keyframe_buffer.try_new_keyframe(reference_pose, reference_image)
if response == 0 or response == 2 or response == 4 or response == 5:
continue
elif response == 3:
previous_depth = None
previous_pose = None
lstm_state = None
continue
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=Config.test_image_width,
new_height=Config.test_image_height,
distortion_crop=Config.test_distortion_crop,
perform_crop=Config.test_perform_crop)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
if reference_depths is not None:
reference_depth = cv2.imread(depth_filenames[i], -1).astype(float) / 1000.0
reference_depth = preprocessor.apply_depth(reference_depth)
reference_depths.append(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
half_K_torch = full_K_torch.clone().cuda()
half_K_torch[:, 0:2, :] = half_K_torch[:, 0:2, :] / 2.0
lstm_K_bottom = full_K_torch.clone().cuda()
lstm_K_bottom[:, 0:2, :] = lstm_K_bottom[:, 0:2, :] / 32.0
measurement_poses_torch = []
measurement_images_torch = []
measurement_frames = keyframe_buffer.get_best_measurement_frames(Config.test_n_measurement_frames)
for (measurement_pose, measurement_image) in measurement_frames:
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(measurement_pose).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
inference_timer.record_start_time()
measurement_feature_halfs = []
for measurement_image_torch in measurement_images_torch:
measurement_feature_half, _, _, _ = feature_shrinker(*feature_extractor(measurement_image_torch))
measurement_feature_halfs.append(measurement_feature_half)
reference_feature_half, reference_feature_quarter, \
reference_feature_one_eight, reference_feature_one_sixteen = feature_shrinker(*feature_extractor(reference_image_torch))
cost_volume = cost_volume_fusion(image1=reference_feature_half,
image2s=measurement_feature_halfs,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=half_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=True)
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=reference_feature_half,
features_quarter=reference_feature_quarter,
features_one_eight=reference_feature_one_eight,
features_one_sixteen=reference_feature_one_sixteen,
cost_volume=cost_volume)
if previous_depth is not None:
depth_estimation = get_non_differentiable_rectangle_depth_estimation(reference_pose_torch=reference_pose_torch,
measurement_pose_torch=previous_pose,
previous_depth_torch=previous_depth,
full_K_torch=full_K_torch,
half_K_torch=half_K_torch,
original_height=Config.test_image_height,
original_width=Config.test_image_width)
depth_estimation = torch.nn.functional.interpolate(input=depth_estimation,
scale_factor=(1.0 / 16.0),
mode="nearest")
else:
depth_estimation = torch.zeros(size=(1, 1, int(Config.test_image_height / 32.0), int(Config.test_image_width / 32.0))).to(device)
lstm_state = lstm_fusion(current_encoding=bottom,
current_state=lstm_state,
previous_pose=previous_pose,
current_pose=reference_pose_torch,
estimated_current_depth=depth_estimation,
camera_matrix=lstm_K_bottom)
prediction, _, _, _, _ = cost_volume_decoder(reference_image_torch, skip0, skip1, skip2, skip3, lstm_state[0])
previous_depth = prediction.view(1, 1, Config.test_image_height, Config.test_image_width)
previous_pose = reference_pose_torch
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb,
depth_multiplier_for_visualization=5000)
inference_timer.print_statistics()
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene,
save_folder=".")
if __name__ == '__main__':
predict(evaluate=True)
| 51.432203 | 149 | 0.550091 |
1234de9c3b6f1810b6112087250519dee9d7227b | 612 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractAnimeMangaTranslations.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractAnimeMangaTranslations.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractAnimeMangaTranslations.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z | def extractAnimeMangaTranslations(item):
"""
Anime, manga, translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
bad = ['Read Online',
'Download',
'comic',
'Anime',
'Manga',
'Robotech',
'Alpen Rose',
'Watch Online',
'Generation Tank',
'Noboru Miyama',
'Godo',
]
if any([(tmp in item['tags']) for tmp in bad]):
return None
if '[Chang Sheng] BABY' in item['title']:
return None
if '[RAW]' in item['title']:
return None
return False | 21.103448 | 74 | 0.616013 |
3cf6259d4b09604e85ef9ef4c2f6a274bc6383b7 | 250 | py | Python | thenewboston_node/project/celery.py | olegtropinin/thenewboston-node | 2de4e14ef6855646121840224a82fcfc505b213c | [
"MIT"
] | 30 | 2021-03-05T22:08:17.000Z | 2021-09-23T02:45:45.000Z | thenewboston_node/project/celery.py | olegtropinin/thenewboston-node | 2de4e14ef6855646121840224a82fcfc505b213c | [
"MIT"
] | 148 | 2021-03-05T23:37:50.000Z | 2021-11-02T02:18:58.000Z | thenewboston_node/project/celery.py | olegtropinin/thenewboston-node | 2de4e14ef6855646121840224a82fcfc505b213c | [
"MIT"
] | 14 | 2021-03-05T21:58:46.000Z | 2021-10-15T17:27:52.000Z | import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'thenewboston_node.project.settings')
app = Celery('thenewboston_node')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
| 27.777778 | 85 | 0.816 |
dbeb9bcc835323170e5d6fed001f0dbd3978b9bd | 207 | py | Python | currencyware/__init__.py | un33k/django-currencyware | 1ed93a53bc5156eae3f362dabda3763161794ae8 | [
"MIT"
] | null | null | null | currencyware/__init__.py | un33k/django-currencyware | 1ed93a53bc5156eae3f362dabda3763161794ae8 | [
"MIT"
] | null | null | null | currencyware/__init__.py | un33k/django-currencyware | 1ed93a53bc5156eae3f362dabda3763161794ae8 | [
"MIT"
] | null | null | null | default_app_config = 'currencyware.apps.CurrencywareConfig'
__author__ = 'Val Neekman @ Neekware Inc. [@vneekman]'
__description__ = "A Django application to translate currency names"
__version__ = '0.1.3'
| 34.5 | 68 | 0.782609 |
ceaf539a3e50265c3cf9e39bf7e02511a22844cd | 2,415 | py | Python | utils/spacy/__init__.py | natsheh/sensem | 6c2fff4659a1ffd39dbb737f0227ab98b2e5a185 | [
"BSD-3-Clause"
] | 14 | 2017-03-03T19:01:35.000Z | 2020-10-13T15:34:38.000Z | utils/spacy/__init__.py | natsheh/sensem | 6c2fff4659a1ffd39dbb737f0227ab98b2e5a185 | [
"BSD-3-Clause"
] | 2 | 2017-10-21T15:53:19.000Z | 2018-04-04T14:40:12.000Z | utils/spacy/__init__.py | natsheh/sensem | 6c2fff4659a1ffd39dbb737f0227ab98b2e5a185 | [
"BSD-3-Clause"
] | 3 | 2018-08-09T10:54:46.000Z | 2020-12-01T08:32:37.000Z | # -*- coding: utf-8 -*-
#
# This file is part of sensim
"""Helpers for sentence semantic similarity model.
.. Author:: Hussein AL-NATSHEH <hussein.al-natsheh@ish-lyon.cnrs.fr>
"""
"""Helper functions."""
from .spacy_wrapper import spacy_organizations
from .spacy_wrapper import spacy_persons
from .spacy_wrapper import spacy_locations
from .spacy_wrapper import spacy_groups
from .spacy_wrapper import spacy_facilities
from .spacy_wrapper import spacy_geo_locations
from .spacy_wrapper import spacy_products
from .spacy_wrapper import spacy_events
from .spacy_wrapper import spacy_work_of_arts
from .spacy_wrapper import spacy_laws
from .spacy_wrapper import spacy_languages
from .spacy_wrapper import PairSpacyVecTransformer
from .spacy_wrapper import spacy_tokens
from .spacy_wrapper import spacy_adj
from .spacy_wrapper import spacy_adp
from .spacy_wrapper import spacy_adv
from .spacy_wrapper import spacy_aux
from .spacy_wrapper import spacy_conj
from .spacy_wrapper import spacy_det
from .spacy_wrapper import spacy_intj
from .spacy_wrapper import spacy_noun
from .spacy_wrapper import spacy_num
from .spacy_wrapper import spacy_part
from .spacy_wrapper import spacy_pron
from .spacy_wrapper import spacy_propn
from .spacy_wrapper import spacy_punct
from .spacy_wrapper import spacy_sconj
from .spacy_wrapper import spacy_sym
from .spacy_wrapper import spacy_verb
from .spacy_wrapper import spacy_x
from .spacy_wrapper import spacy_eol
from .spacy_wrapper import spacy_space
from .spacy_wrapper import sense_tokens
__all__ = ("spacy_organizations",
"spacy_persons",
"spacy_locations",
"spacy_groups",
"spacy_facilities",
"spacy_geo_locations",
"spacy_products",
"spacy_events",
"spacy_work_of_arts",
"spacy_laws",
"spacy_languages",
"PairSpacyVecTransformer",
"spacy_tokens",
"spacy_adj",
"spacy_adp",
"spacy_adv",
"spacy_aux",
"spacy_conj",
"spacy_det",
"spacy_intj",
"spacy_noun",
"spacy_num",
"spacy_part",
"spacy_pron",
"spacy_propn",
"spacy_punct",
"spacy_sconj",
"spacy_sym",
"spacy_verb",
"spacy_x",
"spacy_eol",
"spacy_space",
"sense_tokens")
| 30.1875 | 68 | 0.700207 |
c6a71408d4d5f9ca40e1aee6963984d03df21829 | 1,922 | py | Python | doc/examples/usersguide/brick_stl.py | markendr/esys-escript.github.io | 0023eab09cd71f830ab098cb3a468e6139191e8d | [
"Apache-2.0"
] | null | null | null | doc/examples/usersguide/brick_stl.py | markendr/esys-escript.github.io | 0023eab09cd71f830ab098cb3a468e6139191e8d | [
"Apache-2.0"
] | 1 | 2019-01-14T03:07:43.000Z | 2019-01-14T03:07:43.000Z | doc/examples/usersguide/brick_stl.py | markendr/esys-escript.github.io | 0023eab09cd71f830ab098cb3a468e6139191e8d | [
"Apache-2.0"
] | null | null | null | from __future__ import division
##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
__copyright__="""Copyright (c) 2003-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
from esys.escript import *
from esys.pycad import *
from esys.pycad.gmsh import Design
p0=Point(0.,0.,0.)
p1=Point(1.,0.,0.)
p2=Point(0.,1.,0.)
p3=Point(1.,1.,0.)
p4=Point(0.,0.,1.)
p5=Point(1.,0.,1.)
p6=Point(0.,1.,1.)
p7=Point(1.,1.,1.)
l01=Line(p0,p1)
l13=Line(p1,p3)
l32=Line(p3,p2)
l20=Line(p2,p0)
l45=Line(p4,p5)
l57=Line(p5,p7)
l76=Line(p7,p6)
l64=Line(p6,p4)
l15=Line(p1,p5)
l40=Line(p4,p0)
l37=Line(p3,p7)
l62=Line(p6,p2)
bottom=PlaneSurface(CurveLoop(l01,l13,l32,l20))
top=PlaneSurface(CurveLoop(l45,l57,l76,l64))
front=PlaneSurface(CurveLoop(l01,l15,-l45,l40))
back=PlaneSurface(CurveLoop(l32,-l62,-l76,-l37))
left=PlaneSurface(CurveLoop(-l40,-l64,l62,l20))
right=PlaneSurface(CurveLoop(-l15,l13,l37,-l57))
v=Volume(SurfaceLoop(top,-bottom,front,back,left,right))
des=Design(dim=3, order=2, element_size = 0.1, keep_files=True)
des.setScriptFileName("brick.geo")
des.addItems(v, top, bottom, back, front, left , right)
des.setFileFormat(des.STL)
des.setMeshFileName("brick.stl")
des.generate()
| 28.264706 | 78 | 0.67846 |
dcc491beaaac12e9349b1032d9f9ae8a6f50d918 | 3,664 | py | Python | logdweb/django_jinja2.py | hiidef/logdweb | c80d47f4c5759cadeb3088b9f7fa093c30e11696 | [
"MIT"
] | 1 | 2015-08-30T02:36:13.000Z | 2015-08-30T02:36:13.000Z | logdweb/django_jinja2.py | hiidef/logdweb | c80d47f4c5759cadeb3088b9f7fa093c30e11696 | [
"MIT"
] | null | null | null | logdweb/django_jinja2.py | hiidef/logdweb | c80d47f4c5759cadeb3088b9f7fa093c30e11696 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Django jinja2 integration, inspired by:
http://djangosnippets.org/snippets/1061/
"""
from datetime import datetime
from logdweb import settings, util
from jinja2 import FileSystemLoader, FileSystemBytecodeCache, Environment, \
PackageLoader, ChoiceLoader
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.template import RequestContext
from django.utils.importlib import import_module
from django.http import HttpResponse
from django.conf import settings as dsettings
# logdweb should be an installed app, so the loader can find our templates
loaders = [FileSystemLoader(path) for path in settings.TEMPLATE_DIRS]
loaders += [PackageLoader(app) for app in settings.INSTALLED_APPS]
cache = FileSystemBytecodeCache(settings.JINJA_BYTECODE_CACHE_DIR, '%s.cache')
def datetimeformat(value, format='%d %b %H:%M:%S'):
if isinstance(value, (long,int,float)):
value = datetime.fromtimestamp(value)
return value.strftime(format)
env = Environment(
extensions=settings.JINJA_EXTENSIONS,
loader=ChoiceLoader(loaders),
bytecode_cache=cache,
cache_size=settings.JINJA_CACHE_SIZE,
)
env.globals.update({
'reverse': reverse,
})
env.globals.update(__builtins__)
env.filters['datetimeformat'] = datetimeformat
env.filters['render_msg'] = util.render_msg
env.filters['jsclass'] = util.jsclass
env.globals['settings'] = dsettings
for name in settings.JINJA_FILTERS:
path = settings.JINJA_FILTERS[ name ]
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing Jinja filter module %s: "%s"' % (module, e))
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" callable Jinja filter' % (module, attr))
env.filters[ name ] = func
for name in settings.JINJA_TESTS:
path = settings.JINJA_TESTS[ name ]
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing Jinja filter module %s: "%s"' % (module, e))
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" callable Jinja test' % (module, attr))
env.tests[ name ] = func
for name in settings.JINJA_GLOBALS:
path = settings.JINJA_GLOBALS[ name ]
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing Jinja filter module %s: "%s"' % (module, e))
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" callable Jinja global' % (module, attr))
env.globals[name] = func
def render_to_string(filename, context=None, request=None):
"""Render to string, similar to django's, but uses jinja."""
context = {} if context is None else context
template = env.get_template(filename)
context['request'] = request
rendered = template.render(**context)
return rendered
def render_to_response(filename, context=None, request=None, mimetype=settings.DEFAULT_CONTENT_TYPE):
"""Render to response, similar to django's, but uses jinja."""
rendered = render_to_string(filename, context, request)
settings.timer.clear()
return HttpResponse(rendered,mimetype=mimetype)
| 34.566038 | 111 | 0.706605 |
81ffdbdcba471ca35dbffae33895c344e9c9bf6c | 60,972 | py | Python | src/nevow/test/test_athena.py | winjer/squeal | 20401986e0d1698776f5b482b28e14c57b11833c | [
"Apache-2.0"
] | 2 | 2015-01-30T10:22:12.000Z | 2015-11-05T15:37:23.000Z | src/nevow/test/test_athena.py | winjer/squeal | 20401986e0d1698776f5b482b28e14c57b11833c | [
"Apache-2.0"
] | null | null | null | src/nevow/test/test_athena.py | winjer/squeal | 20401986e0d1698776f5b482b28e14c57b11833c | [
"Apache-2.0"
] | null | null | null |
import os, sets
from itertools import izip
from xml.dom.minidom import parseString
from twisted.trial import unittest
from twisted.python import util
from twisted.internet.defer import Deferred
from twisted.application.service import IServiceMaker
from twisted.application.internet import TCPServer
from twisted.python.reflect import qual
from twisted.python.usage import UsageError
from twisted.plugin import IPlugin
from nevow import athena, rend, tags, flat, loaders, url
from nevow.loaders import stan
from nevow.athena import LiveElement
from nevow.appserver import NevowSite
from nevow.inevow import IRequest
from nevow.context import WovenContext
from nevow.testutil import FakeRequest, renderPage, renderLivePage, CSSModuleTestMixin
from nevow._widget_plugin import WidgetPluginRoot
from nevow._widget_plugin import ElementRenderingLivePage
from twisted.plugins.nevow_widget import widgetServiceMaker
class MappingResourceTests(unittest.TestCase):
"""
Tests for L{athena.MappingResource}.
"""
def test_renderMapping(self):
"""
L{athena.MappingResource} isn't directly renderable.
"""
m = athena.MappingResource({})
self.failUnless(isinstance(m.renderHTTP(None), rend.FourOhFour))
def test_lookupNonExistentKey(self):
"""
L{athena.MappingResource} should return L{rend.NotFound} when asked
for a non-existent key.
"""
m = athena.MappingResource({'name': 'value'})
self.assertEquals(m.locateChild(None, ('key',)), rend.NotFound)
def test_lookupKey(self):
"""
L{athena.MappingResource} should return whatever the
C{resourceFactory} method products when supplied a valid key.
"""
m = athena.MappingResource({'name': 'value'})
m.resourceFactory = sets.Set
resource, segments = m.locateChild(None, ('name',))
self.assertEquals(segments, [])
self.assertEquals(resource, sets.Set('value'))
class ModuleRegistryTestMixin:
"""
Mixin for testing module registry objects.
"""
def test_getModuleForName(self):
"""
C{getModuleForName} should return the right kind of module.
"""
moduleName = u'test_getModuleForName'
mapping = {moduleName: self.mktemp()}
reg = self.registryClass(mapping)
mod = reg.getModuleForName(moduleName)
self.assertTrue(isinstance(mod, self.moduleClass))
self.assertEqual(mod.name, moduleName)
self.assertIdentical(mod.mapping, mapping)
def test_getModuleForNameUnknown(self):
"""
C{getModuleForName} should get angry if we ask for a module which
doesn't exist.
"""
moduleName = u'test_getModuleForName'
reg = self.registryClass({})
self.assertRaises(
RuntimeError,
reg.getModuleForName,
moduleName)
class CSSRegistryTests(unittest.TestCase, ModuleRegistryTestMixin):
"""
Tests for L{athena.CSSRegistry}.
"""
registryClass = athena.CSSRegistry
moduleClass = athena.CSSModule
def test_getModuleForNameLoad(self):
"""
L{athena.CSSRegistry} should initialize its mapping from
L{athena.allCSSPackages} as needed.
"""
moduleName = u'test_getModuleForNameLoad'
origAllCSSPackages = athena.allCSSPackages
theCSSPackages = {moduleName: self.mktemp()}
athena.allCSSPackages = lambda: theCSSPackages
reg = athena.CSSRegistry()
try:
mod = reg.getModuleForName(moduleName)
finally:
athena.allCSSPackages = origAllCSSPackages
self.assertEqual(mod.name, moduleName)
self.assertEqual(mod.mapping, theCSSPackages)
class JSDependenciesTests(unittest.TestCase, ModuleRegistryTestMixin):
"""
Tests for L{athena.JSDependencies}.
"""
registryClass = athena.JSDependencies
moduleClass = athena.JSModule
def test_getModuleForNameLoad(self):
"""
L{athena.JSDependencies} should initialize its mapping from
L{athena.allCSSPackages} as needed.
"""
moduleName = u'test_getModuleForNameLoad'
origAllJavascriptPackages = athena.allJavascriptPackages
theJavascriptPackages = {moduleName: self.mktemp()}
athena.allJavascriptPackages = lambda: theJavascriptPackages
reg = athena.JSDependencies()
try:
mod = reg.getModuleForName(moduleName)
finally:
athena.allJavascriptPackages = origAllJavascriptPackages
self.assertEqual(mod.name, moduleName)
self.assertEqual(mod.mapping, theJavascriptPackages)
class AthenaModuleTestMixin:
"""
Mixin for testing L{athena.AthenaModule} and derived classes.
"""
testModuleImpl = """\
lalal this is javascript honest
// uh oh! a comment! gee I wish javascript had an import system
// import ExampleModule
here is some more javascript code
// import Another
// import Module
the end
"""
moduleClass = athena.AthenaModule
def setUp(self):
"""
Write L{testModuleImpl} to a file.
"""
self.testModuleFilename = self.mktemp()
testModule = file(self.testModuleFilename, 'w')
testModule.write(self.testModuleImpl)
testModule.close()
def test_getOrCreate(self):
"""
L{athena.AthenaModule.getOrCreate} shouldn't make two instances of the
same module.
"""
modules = {'testmodule': self.testModuleFilename}
m1 = self.moduleClass.getOrCreate('testmodule', modules)
m2 = self.moduleClass.getOrCreate('testmodule', modules)
self.assertTrue(isinstance(m1, self.moduleClass))
self.assertEquals(m1.name, 'testmodule')
self.assertIdentical(m1, m2)
def _doDependencySetup(self):
"""
Create a complicated network of module dependencies.
"""
emptyModulePath = self.mktemp()
file(emptyModulePath, 'w').close()
modules = {
'testmodule': self.testModuleFilename,
'Another': self.mktemp(),
'ExampleModule': self.mktemp(),
'Module': emptyModulePath,
'SecondaryDependency': emptyModulePath,
'ExampleDependency': emptyModulePath}
anotherModule = file(modules['Another'], 'w')
anotherModule.write('// import SecondaryDependency\n')
anotherModule.close()
exampleModule = file(modules['ExampleModule'], 'w')
exampleModule.write('// import ExampleDependency\n')
exampleModule.close()
return modules
def test_dependencies(self):
"""
L{athena.AthenaModule.dependencies} should return the direct
dependencies of the module.
"""
modules = self._doDependencySetup()
m = self.moduleClass.getOrCreate('testmodule', modules)
deps = [d.name for d in m.dependencies()]
deps.sort()
self.assertEquals(deps, ['Another', 'ExampleModule', 'Module'])
def test_allDependencies(self):
"""
L{athena.AthenaModule.allDependencies} should return all dependencies
of the module.
"""
depgraph = {
'Another': ['SecondaryDependency'],
'ExampleModule': ['ExampleDependency'],
'Module': [],
'testmodule': ['Another', 'ExampleModule', 'Module'],
'SecondaryDependency': [],
'ExampleDependency': []}
modules = self._doDependencySetup()
m = self.moduleClass.getOrCreate('testmodule', modules)
allDeps = [d.name for d in m.allDependencies()]
for depMod in allDeps:
modDeps = depgraph[depMod]
for d in modDeps:
# All dependencies should be loaded before the module
# that depends upon them.
self.assertIn(d, allDeps)
self.assertIn(depMod, allDeps)
self.failUnless(allDeps.index(d) < allDeps.index(depMod))
def test_crlfNewlines(self):
"""
L{athena.AthenaModule} should correctly ignore the CR after a module
name when CR LF newlines are used in a JavaScript source file.
"""
fooModuleFilename = self.mktemp()
fooModule = file(fooModuleFilename, 'wb')
fooModule.write('// import Bar\r\n')
fooModule.close()
barModuleFilename = self.mktemp()
barModule = file(barModuleFilename, 'wb')
barModule.close()
modules = {
'Foo': fooModuleFilename,
'Bar': barModuleFilename}
module = self.moduleClass('Foo', modules)
fooDependencies = list(module.dependencies())
self.assertEqual(len(fooDependencies), 1)
self.assertEqual(fooDependencies[0].name, u'Bar')
def test_dependencyCaching(self):
"""
L{athena.AthenaModule} should cache module dependencies.
"""
testModuleFilename = self.mktemp()
testModule = file(testModuleFilename, 'w')
testModule.write('')
testModule.close()
modules = {'testmodule': testModuleFilename}
m = self.moduleClass('testmodule', modules)
m.extractCounter = 0
origExtractImports = m._extractImports
def _extractImports(x):
m.extractCounter += 1
return origExtractImports(x)
m._extractImports = _extractImports
deps = list(m.dependencies())
self.assertEquals(m.extractCounter, 1)
deps2 = list(m.dependencies())
self.assertEquals(m.extractCounter, 1)
newTime = m.lastModified
os.utime(testModuleFilename, (newTime + 1, newTime + 1))
deps3 = list(m.dependencies())
self.assertEquals(m.extractCounter, 2)
def test_packageDependencies(self):
"""
L{athena.AthenaModule} should include a module's package in its
dependencies.
"""
modules = {u'Foo': self.mktemp(), u'Foo.Bar': self.mktemp()}
file(modules[u'Foo'], 'wb').close()
file(modules[u'Foo.Bar'], 'wb').close()
foo = self.moduleClass.getOrCreate(u'Foo', modules)
bar = self.moduleClass.getOrCreate(u'Foo.Bar', modules)
self.assertIn(foo, bar.allDependencies())
def test_repr(self):
"""
L{athena.AthenaModule} should C{repr} to something helpful.
"""
moduleName = u'Foo.Bar'
module = self.moduleClass(
moduleName, {moduleName: self.mktemp()})
self.assertEqual(
repr(module),
'%s(%r)' % (self.moduleClass.__name__, moduleName))
class AthenaModuleTests(AthenaModuleTestMixin, unittest.TestCase):
"""
Tests for L{athena.AthenaModule}.
"""
moduleClass = athena.AthenaModule
class JSModuleTests(AthenaModuleTestMixin, unittest.TestCase):
"""
Tests for L{athena.JSModule}.
"""
moduleClass = athena.JSModule
class CSSModuleTests(AthenaModuleTestMixin, unittest.TestCase):
"""
Tests for L{athena.CSSModule}.
"""
moduleClass = athena.CSSModule
class _CountingAthenaModule(athena.AthenaModule):
"""
Instrumented version of L{athena.AthenaModule} for testing.
"""
count = 0
def dependencies(self):
self.count += 1
return super(_CountingAthenaModule, self).dependencies()
class MemoizationTests(unittest.TestCase):
"""
Tests for dependency memoization.
"""
def _outputToTempFile(self, s):
"""
Write the contents of string C{s} to a tempfile and return the
filename that was used
@param s: file contents
@type s: C{str}
@return: filename
@rtype: C{str}
"""
fname = self.mktemp()
fObj = file(fname, 'w')
fObj.write(s)
fObj.close()
return fname
def setUp(self):
# AthenaModule keeps a global mapping of module names to module objects
# in a class attribute; we overwrite this here (on
# _CountingAthenaModule) to sure that all modules will be loaded for
# every test run.
_CountingAthenaModule._modules = {}
empty = self._outputToTempFile('')
quux = self._outputToTempFile('// import Foo')
top = self._outputToTempFile('// import Quux\n'
'// import Quux2')
self.modules = {'Top': top,
'Quux': quux,
'Quux2': quux,
'Foo': empty}
def test_noGlobalMemo(self):
"""
L{AthenaModule.allDependencies} with no memo argument will retrieve its
own dependencies (via L{AthenaModule.dependencies}) exactly once per
invocation.
"""
foo = _CountingAthenaModule.getOrCreate('Foo', self.modules)
top = _CountingAthenaModule.getOrCreate('Top', self.modules)
self.assertEqual(top.count, 0)
deps = list(top.allDependencies())
self.assertEqual(top.count, 1)
deps = list(top.allDependencies())
self.assertEqual(top.count, 2)
def test_withGlobalMemo(self):
"""
Direct dependencies for a particular module should only be retrieved
once across multiple C{allDependencies()} calls if a memo is reused.
"""
foo = _CountingAthenaModule.getOrCreate('Foo', self.modules)
top = _CountingAthenaModule.getOrCreate('Top', self.modules)
memo = {}
self.assertEqual(top.count, 0)
deps = list(top.allDependencies(memo))
self.assertEqual(top.count, 1)
deps = list(top.allDependencies(memo))
self.assertEqual(top.count, 1)
class ModuleInteractionTests(unittest.TestCase):
"""
Tests for JS/CSS module interactions.
"""
def test_separateModuleNamespace(self):
"""
L{athena.CSSModule} and L{athena.JSModule} should use separate module
namespaces.
"""
cssModule = athena.CSSModule.getOrCreate(
u'test_separateModuleNamespace',
{u'test_separateModuleNamespace': self.mktemp()})
jsModule = athena.JSModule.getOrCreate(
u'test_separateModuleNamespace',
{u'test_separateModuleNamespace': self.mktemp()})
self.assertNotIdentical(cssModule, jsModule)
self.assertTrue(isinstance(cssModule, athena.CSSModule))
self.assertTrue(isinstance(jsModule, athena.JSModule))
class _AutoPackageTestMixin:
"""
Mixin for testing L{athena.AutoJSPackage} and L{athena.AutoCSSPackage}.
"""
packageFactory = None
moduleExtension = None
def test_package(self):
"""
L{packageFactory} should correctly construct its mapping from a
filesystem package layout.
"""
packageDir = self.mktemp()
os.makedirs(os.path.join(packageDir, 'Foo', 'Baz'))
def childPath(*a):
path = os.path.join(packageDir, *a)
file(path, 'w').close()
return path
expected = {
u'Foo': childPath('Foo', '__init__.' + self.moduleExtension),
u'Foo.Bar': childPath('Foo', 'Bar.' + self.moduleExtension),
u'Foo.Baz': util.sibpath(athena.__file__, 'empty-module.' + self.moduleExtension),
u'Foo.Baz.Quux': childPath('Foo', 'Baz', 'Quux.' + self.moduleExtension)}
childPath('Foo', '.foo.' + self.moduleExtension)
os.mkdir(os.path.join(packageDir, 'Foo', '.test'))
childPath('Foo', '.test', 'Foo.' + self.moduleExtension)
childPath('Foo', 'Bar.other')
childPath('Foo', 'Zot.other')
package = self.packageFactory(packageDir)
for module, path in expected.iteritems():
m = package.mapping.pop(module)
self.assertEquals(m, path)
self.assertEquals(package.mapping, {})
class AutoJSPackageTests(unittest.TestCase, _AutoPackageTestMixin):
"""
Tests for L{athena.AutoJSPackage}.
"""
packageFactory = athena.AutoJSPackage
moduleExtension = 'js'
class AutoCSSPackageTests(unittest.TestCase, _AutoPackageTestMixin):
"""
Tests for L{athena.AutoCSSPackage}.
"""
packageFactory = athena.AutoCSSPackage
moduleExtension = 'css'
class UtilitiesTests(unittest.TestCase):
"""
Tests for misc. Athena utilities.
"""
def test_preprocessorCollection(self):
"""
Test that preprocessors from all the base classes of an instance are
found, and that a preprocessor instance attribute overrides all of
these.
"""
a, b, c = object(), object(), object()
class Base(object):
preprocessors = [a]
class OtherBase(object):
preprocessors = [b]
class Derived(Base, OtherBase):
preprocessors = [c]
inst = Derived()
self.assertEqual(
rend._getPreprocessors(inst),
[a, b, c])
d = object()
inst.preprocessors = [d]
self.assertEqual(
rend._getPreprocessors(inst),
[d])
def test_handlerMacro(self):
"""
Test that the handler macro rewrites athena:handler nodes to the
appropriate JavaScript.
"""
expectedOutput = (
'return Nevow.Athena.Widget.handleEvent('
'this, "onclick", "bar");')
tag = tags.span[athena.handler(event='onclick', handler='bar')]
mutated = athena._rewriteEventHandlerToAttribute(tag)
output = flat.flatten(mutated)
self.assertEquals(
output,
'<span onclick="' + expectedOutput + '"></span>')
def test_handlerMacroAgainstList(self):
"""
Macros need to be runnable on lists of things. Make sure the handler
macro is.
"""
tag = ["hello", " ", "world"]
self.assertEquals(
athena._rewriteEventHandlerToAttribute(tag),
tag)
def test_athenaIdRewriting(self):
"""
Test that IDs are correctly rewritten in id, for, and headers
attributes.
"""
tag = [tags.label(_for='foo'),
tags.input(id='foo'),
tags.th(headers=''),
tags.th(headers='foo'),
tags.td(headers='foo bar'),
tags.td(headers='foo bar baz')]
element = athena.LiveElement(docFactory=loaders.stan(tag))
page = athena.LivePage(docFactory=loaders.stan(element))
element.setFragmentParent(page)
def _verifyRendering(result):
self.assertIn('<input id="athenaid:%s-foo"' % (element._athenaID,), result)
self.assertIn('<label for="athenaid:%s-foo"' % (element._athenaID,), result)
self.assertIn('<th headers=""', result)
self.assertIn('<th headers="athenaid:%s-foo"' % (
element._athenaID,), result)
self.assertIn('<td headers="athenaid:%s-foo athenaid:%s-bar"' % (
element._athenaID, element._athenaID), result)
self.assertIn('<td headers="athenaid:%s-foo athenaid:%s-bar athenaid:%s-baz"' % (
element._athenaID, element._athenaID, element._athenaID), result)
return renderLivePage(page).addCallback(_verifyRendering)
def test_elementPreprocessors(self):
"""
Make sure that LiveElements have their preprocessors applied to their
document.
"""
preprocessed = []
tag = tags.span
element = athena.LiveElement(docFactory=loaders.stan(tag))
page = athena.LivePage(docFactory=loaders.stan(element))
element.preprocessors = [preprocessed.append]
element.setFragmentParent(page)
renderDeferred = renderPage(page)
def rendered(result):
page.action_close(None)
self.assertEquals(preprocessed, [[tag]])
renderDeferred.addCallback(rendered)
return renderDeferred
def test_userAgentDetection(self):
"""
C{LivePage._supportedBrowser} should return True for User-Agent strings
which are not known to be supported and False for those which are known
to be unsupported.
"""
page = athena.LivePage()
supported = ["Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.3)"
" Gecko/20061201 Firefox/2.0.0.3 (Ubuntu-feisty)",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; sv-SE;"
" rv:1.8.0.8) Gecko/20061025 Firefox 1.5.0.8",
"Opera/9.20 (Windows NT 6.0; U; en)"]
unsupported = ["Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en)"
" AppleWebKit/418.9.1 (KHTML, like Gecko) Safari/419.3",
"Opera/8.5 (Windows NT 6.0; U; en)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US;"
" rv:1.7.10) Gecko/20050716 Firefox/1.0.6",
"Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)"]
for ua in supported:
req = FakeRequest()
req.received_headers['user-agent'] = ua
self.assertTrue(page._supportedBrowser(req))
for ua in unsupported:
req = FakeRequest()
req.received_headers['user-agent'] = ua
self.assertFalse(page._supportedBrowser(req))
def test_unsupportedBrowserPage(self):
"""
Test that unsupported browsers get told they're unsupported.
"""
ctx = WovenContext()
page = athena.LivePage()
req = FakeRequest()
req.received_headers['user-agent'] = "Mozilla/4.0 (compatible; MSIE 2.0; Windows NT 5.1)"
ctx.remember(req, IRequest)
d = renderPage(page, reqFactory=lambda: req)
d.addCallback(
self.assertEqual,
flat.flatten(page.unsupportedBrowserLoader))
return d
class StandardLibraryTestCase(unittest.TestCase):
"""
Test all the Nevow JavaScript "standard library" modules.
"""
def setUp(self):
self.deps = athena.JSDependencies()
def _importTest(self, moduleName):
mod = self.deps.getModuleForName(moduleName)
inspect = [dep for dep in mod.allDependencies() if dep.name == moduleName]
self.failUnless(inspect)
def test_divmodImport(self):
"""
Test that Divmod can be imported.
"""
return self._importTest('Divmod')
def test_baseImport(self):
"""
Test that Divmod.Base can be imported.
"""
return self._importTest('Divmod.Base')
def test_deferImport(self):
"""
Test that Divmod.Defer can be imported.
"""
return self._importTest('Divmod.Defer')
def test_inspectImport(self):
"""
Test that Divmod.Inspect can be imported.
"""
return self._importTest('Divmod.Inspect')
def test_runtimeImport(self):
"""
Test that Divmod.Runtime can be imported.
"""
return self._importTest('Divmod.Runtime')
def test_xmlImport(self):
"""
Test that Divmod.XML can be imported.
"""
return self._importTest('Divmod.XML')
def test_nevowImport(self):
"""
Test that Nevow can be imported.
"""
return self._importTest('Nevow')
def test_athenaImport(self):
"""
Test that Nevow.Athena can be imported.
"""
return self._importTest('Nevow.Athena')
def test_testImport(self):
"""
Test that Nevow.Athena can be imported.
"""
return self._importTest('Nevow.Athena.Test')
def test_tagLibraryImport(self):
"""
Test that Nevow.TagLibrary can be imported.
"""
return self._importTest('Nevow.TagLibrary')
def test_tabbedPaneImport(self):
"""
Test that Nevow.TagLibrary.TabbedPane can be imported.
"""
return self._importTest('Nevow.TagLibrary.TabbedPane')
class TestFragment(athena.LiveFragment):
pass
class Nesting(unittest.TestCase):
def testFragmentNesting(self):
lp = athena.LivePage()
tf1 = TestFragment()
tf2 = TestFragment()
tf1.setFragmentParent(lp)
tf2.setFragmentParent(tf1)
self.assertEquals(lp.liveFragmentChildren, [tf1])
self.assertEquals(tf1.liveFragmentChildren, [tf2])
self.assertEquals(tf2.liveFragmentChildren, [])
self.assertEquals(tf2.fragmentParent, tf1)
self.assertEquals(tf1.fragmentParent, lp)
self.assertEquals(tf2.page, lp)
self.assertEquals(tf1.page, lp)
def testInsideOutFragmentNesting(self):
"""
Test that even if LiveFragments have their parents assigned from the
inside out, parent/child relationships still end up correct.
"""
innerFragment = TestFragment()
outerFragment = TestFragment()
page = athena.LivePage()
innerFragment.setFragmentParent(outerFragment)
outerFragment.setFragmentParent(page)
self.assertEquals(page.liveFragmentChildren, [outerFragment])
self.assertEquals(outerFragment.fragmentParent, page)
self.assertEquals(outerFragment.page, page)
self.assertEquals(outerFragment.liveFragmentChildren, [innerFragment])
self.assertEquals(innerFragment.fragmentParent, outerFragment)
self.assertEquals(innerFragment.page, page)
class Tracebacks(unittest.TestCase):
frames = (('Error()', '', 0),
('someFunction()', 'http://somesite.com:8080/someFile', 42),
('anotherFunction([object Object])', 'http://user:pass@somesite.com:8080/someOtherFile', 69))
stack = '\n'.join(['%s@%s:%d' % frame for frame in frames])
exc = {u'name': 'SomeError',
u'message': 'An error occurred.',
u'stack': stack}
def testStackParsing(self):
p = athena.parseStack(self.stack)
for iframe, oframe in izip(self.frames[::-1], p):
self.assertEquals(oframe, iframe)
def testStackLengthAndOrder(self):
f = athena.getJSFailure(self.exc, {})
self.assertEqual(len(f.frames), len(self.frames))
self.assertEqual(f.frames[0][0], self.frames[-1][0])
class _DelayedCall(object):
def __init__(self, container, element):
self.container = container
self.element = element
def cancel(self):
self.container.remove(self.element)
def mappend(transport):
def send((ack, messages)):
transport.append(messages[:])
return send
class Transport(unittest.TestCase):
"""
Test the various states and events which can occur that are related to the
server's ability to convey a message to the client.
This includes things such as the receipt of a new request or the depletion
of an existing request.
"""
theMessage = "Immediately Send This Message"
connectTimeout = 1
transportlessTimeout = 2
idleTimeout = 3
clientID = 'FAKE ATHENA PAGE'
def liveTransportMessageReceived(self, ctx, outgoingMessage):
self.outgoingMessages.append((ctx, outgoingMessage))
def setUp(self):
self.transport = []
self.scheduled = []
self.events = []
self.outgoingMessages = []
self.rdm = athena.ReliableMessageDelivery(
self,
connectTimeout=self.connectTimeout,
transportlessTimeout=self.transportlessTimeout,
idleTimeout=self.idleTimeout,
connectionLost=lambda reason: self.events.append(reason),
scheduler=self._schedule)
def _schedule(self, n, f, *a, **kw):
"""
Deterministic, rigidly controlled stand-in for reactor.callLater().
"""
t = (n, f, a, kw)
self.scheduled.append(t)
return _DelayedCall(self.scheduled, t)
def testSendMessageImmediately(self):
"""
Test that if there is an output channel for messages, trying to send a
message immediately does so, consuming the output channel.
"""
self.rdm.addOutput(mappend(self.transport))
self.rdm.addMessage(self.theMessage)
self.assertEquals(self.transport, [[(0, self.theMessage)]])
self.rdm.addMessage(self.theMessage)
self.assertEquals(self.transport, [[(0, self.theMessage)]])
def testSendMessageQueued(self):
"""
Test that if there is no output channel when a message is sent, it will
be sent once an output channel becomes available.
"""
self.rdm.addMessage(self.theMessage)
self.rdm.addOutput(mappend(self.transport))
self.assertEquals(self.transport, [[(0, self.theMessage)]])
def testMultipleQueuedMessages(self):
"""
Test that if there are several messages queued they are all sent at
once when an output channel becomes available.
"""
self.rdm.addMessage(self.theMessage)
self.rdm.addMessage(self.theMessage.encode('hex'))
self.rdm.addOutput(mappend(self.transport))
self.assertEquals(self.transport, [[(0, self.theMessage), (1, self.theMessage.encode('hex'))]])
def testMultipleQueuedOutputs(self):
"""
Test that if there are several output channels available, each message
only consumes the first of them.
"""
secondTransport = []
self.rdm.addOutput(mappend(self.transport))
self.rdm.addOutput(mappend(secondTransport))
self.rdm.addMessage(self.theMessage)
self.assertEquals(self.transport, [[(0, self.theMessage)]])
self.assertEquals(secondTransport, [])
def testMessageRedelivery(self):
"""
Test that outputs added while there are unacknowledged messages result
in re-transmits of those messages.
"""
secondMessage = self.theMessage + '-2'
secondTransport = []
thirdTransport = []
fourthTransport = []
self.rdm.addMessage(self.theMessage)
self.rdm.addMessage(secondMessage)
self.rdm.addOutput(mappend(self.transport))
self.assertEquals(self.transport, [[(0, self.theMessage), (1, secondMessage)]])
self.rdm.addOutput(mappend(secondTransport))
self.assertEquals(secondTransport, [[(0, self.theMessage), (1, secondMessage)]])
self.rdm.basketCaseReceived(None, [0, []])
self.rdm.addOutput(mappend(thirdTransport))
self.assertEquals(thirdTransport, [[(1, secondMessage)]])
self.rdm.basketCaseReceived(None, [1, []])
self.rdm.addOutput(mappend(fourthTransport))
self.assertEquals(fourthTransport, [])
def testConnectTimeout(self):
"""
Test that a connection timeout is set up which, if allowed to expire,
will cause notification of the fact that the connection was never
established.
"""
n, f, a, kw = self.scheduled.pop()
self.failIf(self.scheduled, "Too many tasks scheduled.")
self.assertEquals(n, self.connectTimeout)
f(*a, **kw)
self.assertEquals(len(self.events), 1)
self.events[0].trap(athena.ConnectFailed)
self.failIf(self.scheduled, "Unexpected task scheduled after connect failed.")
def testConnectSucceeds(self):
"""
Test that the connection timeout is cancelled when an output channel is
added.
"""
self.failUnless(self.scheduled, "No connect timeout scheduled.") # Sanity check
self.rdm.addOutput(mappend(self.transport))
n, f, a, kw = self.scheduled.pop()
self.assertEquals(n, self.idleTimeout)
self.failIf(self.scheduled, "Output channel added but there is still a task pending.")
self.assertEquals(self.transport, [], "Received unexpected output.")
def testOutputConsumedMessageTimeout(self):
"""
Test that a timeout is set up when the last output is used and that if
it expires, notification of the connection being lost is delivered. In
particular, test that if there is a message waiting and a new output is
added, the timeout behavior is correct.
"""
self.rdm.addMessage(self.theMessage)
self.rdm.addOutput(mappend(self.transport))
n, f, a, kw = self.scheduled.pop()
self.failIf(self.scheduled, "Too many tasks scheduled.")
self.assertEquals(n, self.transportlessTimeout)
f(*a, **kw)
self.assertEquals(len(self.events), 1)
self.events[0].trap(athena.ConnectionLost)
self.failIf(self.scheduled, "Unexpected task scheduled after connection lost.")
def testMessageConsumedOutputTimeout(self):
"""
Very similar to testOutputConsumedMessageTimeout, but test the case
where there is an existing output and a message is added, causing it
to be used.
"""
self.rdm.addOutput(mappend(self.transport))
self.rdm.addMessage(self.theMessage)
n, f, a, kw = self.scheduled.pop()
self.failIf(self.scheduled, "Too many tasks scheduled.")
self.assertEquals(n, self.transportlessTimeout)
f(*a, **kw)
self.assertEquals(len(self.events), 1)
self.events[0].trap(athena.ConnectionLost)
self.failIf(self.scheduled, "Unexpected task scheduled after connection lost.")
def testOutputConnectionAdded(self):
"""
Test that the timeout created when the last output is used is cancelled
when a new output is added.
"""
self.rdm.addMessage(self.theMessage)
self.rdm.addOutput(mappend(self.transport))
self.assertEquals(len(self.scheduled), 1, "Transportless timeout not created.")
n, f, a, kw = self.scheduled[0]
self.assertEquals(n, self.transportlessTimeout, "Unexpected task still scheduled after output added.")
self.rdm.basketCaseReceived(None, [0, []])
n, f, a, kw = self.scheduled.pop()
self.assertEquals(n, self.idleTimeout)
self.failIf(self.scheduled, "Unexpected task still scheduled after output added.")
self.failIf(self.events, "Unexpectedly received some kind of event.")
def testIdleOutputTimeout(self):
"""
Test that outputs are discarded with an empty message list if they are
not used within the specified interval.
"""
self.rdm.addOutput(mappend(self.transport))
n, f, a, kw = self.scheduled.pop()
self.assertEquals(n, self.idleTimeout)
self.failIf(self.scheduled, "Unexpected tasks still scheduled in addition to idle timeout task.")
f(*a, **kw)
self.assertEquals(self.transport, [[]])
def testIdleTimeoutStartsOutputlessTimeout(self):
"""
Test that if the last output is removed due to idleness that another
timeout for the lack of any outputs is started.
"""
self.rdm.addOutput(mappend(self.transport))
n, f, a, kw = self.scheduled.pop()
self.assertEquals(n, self.idleTimeout)
f(*a, **kw)
self.failIf(self.events, "Unexpectedly received some events.")
n, f, a, kw = self.scheduled.pop()
self.assertEquals(n, self.transportlessTimeout)
f(*a, **kw)
self.assertEquals(len(self.events), 1)
self.events[0].trap(athena.ConnectionLost)
def testPreConnectPause(self):
"""
Test that no outputs are used while the reliable message
deliverer is paused before the first connection is made.
"""
self.rdm.pause()
self.rdm.addOutput(mappend(self.transport))
# The connection timeout should have been cancelled and
# replaced with an idle timeout.
self.assertEquals(len(self.scheduled), 1)
n, f, a, kw = self.scheduled[0]
self.assertEquals(n, self.idleTimeout)
self.rdm.addMessage(self.theMessage)
self.assertEquals(self.transport, [])
self.rdm.unpause()
self.assertEquals(self.transport, [[(0, self.theMessage)]])
def testTransportlessPause(self):
"""
Test that if the message deliverer is paused while it has no
transports, it remains so and does not use an output which is
added to it.
"""
self.rdm.addOutput(mappend(self.transport))
self.rdm.pause()
self.rdm.addMessage(self.theMessage)
self.assertEquals(self.transport, [])
self.rdm.unpause()
self.assertEquals(self.transport, [[(0, self.theMessage)]])
def testMessagelessPause(self):
"""
Test that if the message deliverer is paused while it has no
messages, it remains so and does not use an output when a
message is added.
"""
self.rdm.addOutput(mappend(self.transport))
self.rdm.pause()
self.rdm.addMessage(self.theMessage)
self.assertEquals(self.transport, [])
self.rdm.unpause()
self.assertEquals(self.transport, [[(0, self.theMessage)]])
def testStaleMessages(self):
"""
Test that if an older basket case with fewer messages in it arrives
after a more recent, complete basket case is processed, that it is
properly disregarded.
"""
self.rdm.basketCaseReceived(
None,
[-1, [[0, self.theMessage],
[1, self.theMessage + "-1"],
[2, self.theMessage + "-2"]]])
self.assertEquals(
self.outgoingMessages,
[(None, self.theMessage),
(None, self.theMessage + "-1"),
(None, self.theMessage + "-2")])
self.outgoingMessages = []
self.rdm.basketCaseReceived(
None,
[-1, [[1, self.theMessage + "-1"]]])
self.assertEquals(
self.outgoingMessages,
[])
self.rdm.basketCaseReceived(
None,
[-1, [[2, self.theMessage + "-2"]]])
self.assertEquals(
self.outgoingMessages,
[])
def testClosing(self):
"""
Test that closing a reliable message deliverer causes all of outs
remaining outputs to be used up with a close message and that any
future outputs added to it are immediately used in a similar
manner.
"""
self.rdm.addOutput(mappend(self.transport))
self.rdm.addOutput(mappend(self.transport))
self.rdm.close()
self.assertEquals(self.transport, [[(0, (athena.CLOSE, []))], [(0, (athena.CLOSE, []))]])
self.transport = []
self.rdm.addOutput(mappend(self.transport))
self.assertEquals(self.transport, [[(0, (athena.CLOSE, []))]])
def testCloseBeforeConnect(self):
"""
Test that closing the reliable message deliverer before a connection is
ever established properly cleans up any timeouts.
"""
self.rdm.close()
self.failIf(self.scheduled, "Expected no scheduled calls.")
def test_closeExcessOnReceived(self):
"""
Test that any excess idle transports are closed when a message is received.
"""
secondTransport = []
self.rdm.addOutput(mappend(self.transport))
self.rdm.addOutput(mappend(secondTransport))
d = self.rdm.basketCaseReceived(None, [0, []])
self.assertEquals(self.transport, [[]])
self.assertEquals(secondTransport, [[]])
self.failIf(d.called)
def test_closeExcessOnUnpaused(self):
"""
Test that any excess idle transports are closed when the message
deliverer is unpaused.
"""
secondTransport = []
self.rdm.pause()
self.rdm.pause()
self.rdm.addOutput(mappend(self.transport))
self.rdm.addOutput(mappend(secondTransport))
self.rdm.unpause()
self.assertEqual(self.transport, [])
self.assertEqual(secondTransport, [])
self.rdm.unpause()
self.assertEqual(self.transport, [[]])
self.assertEqual(secondTransport, [])
def test_specialUnloadSequence(self):
"""
When the page is unloading in the brosser, it needs to pre-empt its own
message queue and send a special identifier to express that this is a
"last gasp" message and must be dealt with immediately.
"""
self.rdm.basketCaseReceived(None, [0, [[athena.UNLOAD, [athena.CLOSE, []]]]])
self.assertEqual(self.outgoingMessages, [(None, [athena.CLOSE, []])])
class LiveMixinTestsMixin(CSSModuleTestMixin):
"""
Test-method defining mixin class for L{LiveElement} and L{LiveFragment}
testing.
@ivar elementFactory: No-argument callable which returns an object against
which tests will be run.
@ivar liveGlueRenderer: The name of the live glue renderer on objects
returned from L{elementFactory}.
"""
liveGlueRenderer = None
def elementFactory(self):
raise NotImplementedError("%s did not implement elementFactory" % (self,))
def test_localDetach(self):
"""
Verify that L{_athenaDetachServer} removes the element from its parent
and disassociates it from the page locally.
"""
page = athena.LivePage()
element = self.elementFactory()
element.setFragmentParent(page)
element._athenaDetachServer()
self.assertNotIn(element, page.liveFragmentChildren)
self.assertIdentical(element.fragmentParent, None)
self.assertIdentical(element.page, None)
def test_localDetachWithChildren(self):
"""
Similar to L{test_localDetach}, but cover the case where the removed
element has a child of its own and verify that that child is also
detached.
"""
page = athena.LivePage()
element = self.elementFactory()
element.setFragmentParent(page)
child = self.elementFactory()
child.setFragmentParent(element)
element._athenaDetachServer()
self.assertNotIn(element, page.liveFragmentChildren)
self.assertIdentical(element.fragmentParent, None)
self.assertIdentical(element.page, None)
self.assertNotIn(child, element.liveFragmentChildren)
self.assertIdentical(child.fragmentParent, None)
self.assertIdentical(child.page, None)
def test_localDetachOrphaned(self):
"""
L{_athenaDetachServer} should raise L{athena.OrphanedFragment} if the
element is not attached.
"""
element = self.elementFactory()
self.assertRaises(athena.OrphanedFragment, element._athenaDetachServer)
page = athena.LivePage()
element.setFragmentParent(page)
element._athenaDetachServer()
self.assertRaises(athena.OrphanedFragment, element._athenaDetachServer)
def test_detach(self):
"""
Verify that L{detach} informs the client of the event and returns a
Deferred which fires when the client acknowledges this.
"""
page = athena.LivePage()
element = self.elementFactory()
element.setFragmentParent(page)
calls = []
def callRemote(methodName):
d = Deferred()
calls.append((methodName, d))
return d
element.callRemote = callRemote
d = element.detach()
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0][0], '_athenaDetachClient')
calls[0][1].callback(None)
self.assertNotIn(element, page.liveFragmentChildren)
self.assertIdentical(element.fragmentParent, None)
self.assertIdentical(element.page, None)
def test_detachWithChildren(self):
"""
Similar to L{test_detach}, but cover the case where the removed element
has a child of its own and verify that that child is also detached.
"""
page = athena.LivePage()
element = self.elementFactory()
element.setFragmentParent(page)
child = self.elementFactory()
child.setFragmentParent(element)
calls = []
def callRemote(methodName):
d = Deferred()
calls.append((methodName, d))
return d
element.callRemote = callRemote
child.callRemote = callRemote
d = element.detach()
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0][0], '_athenaDetachClient')
calls[0][1].callback(None)
self.assertNotIn(element, page.liveFragmentChildren)
self.assertIdentical(element.fragmentParent, None)
self.assertIdentical(element.page, None)
self.assertNotIn(child, element.liveFragmentChildren)
self.assertIdentical(child.fragmentParent, None)
self.assertIdentical(child.page, None)
def test_localDetachCallback(self):
"""
Verify that C{detached} is called when C{_athenaDetachServer} is
called.
"""
page = athena.LivePage()
element = self.elementFactory()
element.setFragmentParent(page)
detachCall = []
def detached():
detachCall.append((element.fragmentParent, element.page))
element.detached = detached
element._athenaDetachServer()
self.assertEqual(detachCall, [(None, None)])
def test_detachCallback(self):
"""
Verify that C{detached} is called C{detach} is called locally.
"""
page = athena.LivePage()
element = self.elementFactory()
element.setFragmentParent(page)
detachCall = []
def detached():
detachCall.append((element.fragmentParent, element.page))
element.detached = detached
calls = []
def callRemote(methodName):
d = Deferred()
calls.append(d)
return d
element.callRemote = callRemote
d = element.detach()
self.assertEqual(detachCall, [])
calls[0].callback(None)
self.assertEqual(detachCall, [(None, None)])
def test_glueIncludesStylesheets(self):
"""
Our element's glue should include inline stylesheet references.
"""
element = self.elementFactory()
element.cssModule = u'TestCSSModuleDependencies.Dependor'
element.docFactory = loaders.stan(
tags.div(render=tags.directive(self.liveGlueRenderer)))
page = ElementRenderingLivePage(element)
page.cssModules = self._makeCSSRegistry()
D = renderLivePage(page)
def cbRendered(result):
expected = flat.flatten(
page.getStylesheetStan(
[page.getCSSModuleURL(u'TestCSSModuleDependencies.Dependee'),
page.getCSSModuleURL(u'TestCSSModuleDependencies.Dependor')]))
self.assertIn(expected, result)
D.addCallback(cbRendered)
return D
def test_glueIncludesStylesheetsOnce(self):
"""
Our element's glue shouldn't include redundant stylesheet references.
"""
element = self.elementFactory()
element.cssModule = u'TestCSSModuleDependencies.Dependor'
element.docFactory = loaders.stan(
tags.div(render=tags.directive(self.liveGlueRenderer)))
page = ElementRenderingLivePage(element)
page.docFactory = loaders.stan(tags.invisible[
tags.invisible(render=tags.directive('liveglue')),
tags.invisible(render=tags.directive('element')),
tags.invisible(render=tags.directive('element'))])
page.cssModules = self._makeCSSRegistry()
D = renderLivePage(page)
def cbRendered(result):
expected = flat.flatten(
page.getStylesheetStan(
[page.getCSSModuleURL(u'TestCSSModuleDependencies.Dependee'),
page.getCSSModuleURL(u'TestCSSModuleDependencies.Dependor')]))
self.assertIn(expected, result)
D.addCallback(cbRendered)
return D
class LiveElementTests(LiveMixinTestsMixin, unittest.TestCase):
"""
Tests for L{nevow.athena.LiveElement}.
"""
elementFactory = athena.LiveElement
liveGlueRenderer = 'liveElement'
class LiveFragmentTests(LiveMixinTestsMixin, unittest.TestCase):
"""
Tests for L{nevow.athena.LiveFragment}.
"""
elementFactory = athena.LiveFragment
liveGlueRenderer = 'liveFragment'
class DummyLiveElement(LiveElement):
"""
A "counting" Athena element used for tests involving the plugin system
(e.g., supplied as the argument to the "--element" option).
"""
classCounter = 0
def __init__(self):
"""
Create a L{DummyLiveElement} with a 'counter' attribute set to a
unique, incremented ID, used for comparing instances.
"""
LiveElement.__init__(self)
DummyLiveElement.classCounter += 1
self.counter = DummyLiveElement.classCounter
class LivePageTests(unittest.TestCase, CSSModuleTestMixin):
"""
Tests for L{nevow.athena.LivePage}
"""
def setUp(self):
"""
Create and remember a L{LivePage} instance.
"""
self.page = athena.LivePage()
def tearDown(self):
"""
Shut this test's L{LivePage} timers down, if the test started them up.
"""
if hasattr(self.page, '_messageDeliverer'):
self.page._messageDeliverer.close()
def test_bootstrapCall(self):
"""
L{LivePage.bootstrapCall} should generate a JSON-serialized string for
calling a single JavaScript function.
"""
bc = self.page._bootstrapCall(
"SomeModule.someMethod", [u"one", 2, {u"three": 4.1}])
self.assertEqual(
bc, 'SomeModule.someMethod("one", 2, {"three":4.1});')
def test_pageJsClassDependencies(self):
"""
L{LivePage.render_liveglue} should include modules that the
L{LivePage}'s jsClass depends on.
"""
self.page.jsClass = u'PythonTestSupport.Dependor.PageTest'
freq = FakeRequest()
self.page._becomeLive(url.URL.fromRequest(freq))
ctx = WovenContext(tag=tags.div())
ctx.remember(freq, IRequest)
self.assertEqual(self.page.render_liveglue(ctx, None), ctx.tag)
expectDependor = flat.flatten(self.page.getImportStan(u'PythonTestSupport.Dependor'))
expectDependee = flat.flatten(self.page.getImportStan(u'PythonTestSupport.Dependee'))
result = flat.flatten(ctx.tag, ctx)
self.assertIn(expectDependor, result)
self.assertIn(expectDependee, result)
def test_pageCSSModuleDependencies(self):
"""
L{athena.LivePage.render_liveglue} should include CSS modules that
the top-level C{cssModule} depends on.
"""
self.page.cssModule = u'TestCSSModuleDependencies.Dependor'
self.page.cssModules = self._makeCSSRegistry()
self.page._becomeLive(url.URL())
ctx = WovenContext(tag=tags.div())
ctx.remember(FakeRequest(), IRequest)
self.assertEqual(self.page.render_liveglue(ctx, None), ctx.tag)
expected = flat.flatten(
self.page.getStylesheetStan(
[self.page.getCSSModuleURL(u'TestCSSModuleDependencies.Dependee'),
self.page.getCSSModuleURL(u'TestCSSModuleDependencies.Dependor')]))
self.assertIn(expected, flat.flatten(ctx.tag, ctx))
def test_bootstraps(self):
"""
L{LivePage._bootstraps} should return a list of 2-tuples of
(initialization method, arguments) of methods to call in JavaScript.
Specifically, it should invoke Divmod.bootstrap with the page's own
URL, and Nevow.Athena.bootstrap with the name of the client-side Page
class to instantiate and the URL to instantiate it with.
"""
SEG = "'" + '"'
URI = "http://localhost/" + SEG
req = FakeRequest(uri='/' + SEG, currentSegments=[SEG])
ctx = WovenContext()
ctx.remember(req, IRequest)
self.page.clientID = 'asdf'
self.assertEqual(
self.page._bootstraps(ctx),
[("Divmod.bootstrap",
# Nevow's URL quoting rules are weird, but this is the URL
# flattener's fault, not mine. Adjust to taste if that changes
# (it won't) -glyph
[u"http://localhost/'%22"]),
("Nevow.Athena.bootstrap",
[u'Nevow.Athena.PageWidget', u'asdf'])])
def test_renderReconnect(self):
"""
L{LivePage.renderHTTP} should render a JSON-encoded version of its
clientID rather than a rendered version of its template when provided
with a special __athena_reconnect__ parameter.
"""
req = FakeRequest(args={athena.ATHENA_RECONNECT: ["1"]})
ctx = WovenContext()
ctx.remember(req, IRequest)
string = self.page.renderHTTP(ctx)
jsonifiedID = '"%s"' % (self.page.clientID,)
self.assertEqual(string, jsonifiedID)
def test_cssModules(self):
"""
L{athena.LivePage.cssModules} should default to
L{athena._theCSSRegistry}.
"""
self.assertIdentical(
athena.LivePage().cssModules, athena._theCSSRegistry)
def test_cssmoduleChild(self):
"""
L{athena.LivePage}'s C{cssmodule} child should return a correctly
initialized L{athena.MappingResource}.
"""
theCSSMapping = {}
class MyCSSModules:
mapping = theCSSMapping
page = athena.LivePage()
page.cssModules = MyCSSModules()
(res, segments) = page.locateChild(None, ('cssmodule',))
self.assertTrue(isinstance(res, athena.MappingResource))
self.assertIdentical(res.mapping, theCSSMapping)
def test_cssModuleRoot(self):
"""
L{athena.LivePage}'s C{cssModuleRoot} argument should be observed by
L{athena.LivePage.getCSSModuleURL}.
"""
theCSSModuleRoot = url.URL.fromString('/test_cssModuleRoot')
page = athena.LivePage(
cssModuleRoot=theCSSModuleRoot)
self.assertEqual(
page.getCSSModuleURL(u'X.Y'),
theCSSModuleRoot.child('X.Y'))
class WidgetSubcommandTests(unittest.TestCase):
"""
Test the twistd subcommand which runs a server to render a single Athena
widget.
"""
def test_portOption(self):
"""
Verify that the --port option adds an integer to the Options' port key.
"""
options = widgetServiceMaker.options()
options['element'] = DummyLiveElement()
options.parseOptions(['--port', '3874'])
self.assertEqual(options['port'], 3874)
options.parseOptions(['--port', '65535'])
self.assertEqual(options['port'], 65535)
def test_invalidPortOption(self):
"""
Verify that non-integer and out-of-range port numbers are rejected.
"""
options = widgetServiceMaker.options()
options['element'] = DummyLiveElement()
self.assertRaises(UsageError, options.parseOptions, ['--port', 'hello world'])
self.assertRaises(UsageError, options.parseOptions, ['--port', '-7'])
self.assertRaises(UsageError, options.parseOptions, ['--port', '70000'])
self.assertRaises(UsageError, options.parseOptions, ['--port', '65536'])
def test_widgetOption(self):
"""
Verify that the --element option adds a class to the Options' element
key.
"""
options = widgetServiceMaker.options()
options.parseOptions(['--element', qual(DummyLiveElement)])
self.assertEquals(options['element'], DummyLiveElement)
def test_invalidWidgetOption(self):
"""
Verify that specifying a non-existent class is rejected.
"""
options = widgetServiceMaker.options()
self.assertRaises(
UsageError,
options.parseOptions, ['--element', qual(DummyLiveElement) + 'xxx'])
self.assertRaises(
UsageError,
options.parseOptions, ['--element', '-'])
def test_invalidMissingWidget(self):
"""
Verify that a missing widget class is rejected.
"""
options = widgetServiceMaker.options()
self.assertRaises(UsageError, options.parseOptions, [])
def test_defaultPort(self):
"""
Verify that the default port number is 8080.
"""
options = widgetServiceMaker.options()
options['element'] = DummyLiveElement
options.parseOptions([])
self.assertEqual(options['port'], 8080)
def test_providesInterfaces(self):
"""
Verify that the necessary interfaces for the object to be found as a
twistd subcommand plugin are provided.
"""
self.failUnless(IPlugin.providedBy(widgetServiceMaker))
self.failUnless(IServiceMaker.providedBy(widgetServiceMaker))
def test_makeService(self):
"""
Verify that the L{IService} creation function returns a service which
will run a Nevow site.
"""
service = widgetServiceMaker.makeService({
'element': DummyLiveElement,
'port': 8080,
})
self.failUnless(isinstance(service, TCPServer))
self.assertEqual(service.args[0], 8080)
self.failUnless(isinstance(service.args[1], NevowSite))
self.failUnless(isinstance(service.args[1].resource, WidgetPluginRoot))
self.failUnless(isinstance(service.args[1].resource.elementFactory(),
DummyLiveElement))
def test_livePageRendering(self):
"""
Verify that an L{ElementRenderingLivePage} instantiated with a
particular LiveElement properly renders that element.
"""
element = DummyLiveElement()
element.jsClass = u'Dummy.ClassName'
element.docFactory = stan('the element')
page = ElementRenderingLivePage(element)
renderDeferred = renderLivePage(page)
def cbRendered(result):
document = parseString(result)
titles = document.getElementsByTagName('title')
self.assertEqual(len(titles), 1)
self.assertEqual(titles[0].firstChild.nodeValue, DummyLiveElement.__name__)
divs = document.getElementsByTagName('div')
self.assertEqual(len(divs), 1)
self.assertEqual(divs[0].firstChild.nodeValue, 'the element')
renderDeferred.addCallback(cbRendered)
return renderDeferred
def test_multipleRendersMultipleWidgets(self):
"""
Each hit to the top-level page created by makeService should result in a
new element being created by the specified element factory, so that it
can be rendered multiple times.
"""
w = WidgetPluginRoot(DummyLiveElement)
page1, seg = w.locateChild(None, [''])
page2, seg = w.locateChild(None, [''])
# Make sure the pages aren't the same.
self.failUnless(isinstance(page1, ElementRenderingLivePage))
self.failUnless(isinstance(page2, ElementRenderingLivePage))
self.assertNotIdentical(page1, page2)
# Make sure the elements aren't the same.
self.assertNotEqual(page1.element.counter, page2.element.counter)
def test_transportHookup(self):
"""
When a LivePage is rendered, it needs to hook up to its transport,
which is a special resource (associated with the particular LivePage
object in memory). This hookup is done by some special logic in
LivePage.locateChild, among other places. Let's make sure that we can
look up the live page by its client ID with the default transport root.
Athena's default transport root is whatever URL the page is rendered
at. In the case of this plugin, that will usually be
http://localhost:8080/
"""
w = WidgetPluginRoot(DummyLiveElement)
page1, seg = w.locateChild(None, [''])
page1.element.docFactory = stan('the element')
page1.element.jsClass = u'Dummy.ClassName'
def cbCheckPageByClientID(result):
req = FakeRequest()
ctx = WovenContext()
ctx.remember(req, IRequest)
page1prime, seg = w.locateChild(ctx, [page1.clientID])
self.assertIdentical(page1prime, page1)
renderDeferred = renderLivePage(page1)
renderDeferred.addCallback(cbCheckPageByClientID)
return renderDeferred
| 33.779501 | 110 | 0.626812 |
264b7f89559d3e950d8bb460bcb8a65baf009ab3 | 27,293 | py | Python | tests/models/luke/test_modeling_luke.py | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 | [
"Apache-2.0"
] | 5 | 2020-09-01T09:15:48.000Z | 2020-09-15T03:25:05.000Z | tests/models/luke/test_modeling_luke.py | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 | [
"Apache-2.0"
] | null | null | null | tests/models/luke/test_modeling_luke.py | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 | [
"Apache-2.0"
] | 3 | 2020-08-20T04:46:25.000Z | 2020-10-14T08:39:13.000Z | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch LUKE model. """
import unittest
from transformers import LukeConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeModel,
LukeTokenizer,
)
from transformers.models.luke.modeling_luke import LUKE_PRETRAINED_MODEL_ARCHIVE_LIST
class LukeModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
entity_length=3,
mention_length=5,
use_attention_mask=True,
use_token_type_ids=True,
use_entity_ids=True,
use_entity_attention_mask=True,
use_entity_token_type_ids=True,
use_entity_position_ids=True,
use_labels=True,
vocab_size=99,
entity_vocab_size=10,
entity_emb_size=6,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_entity_classification_labels=9,
num_entity_pair_classification_labels=6,
num_entity_span_classification_labels=4,
use_entity_aware_attention=True,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.entity_length = entity_length
self.mention_length = mention_length
self.use_attention_mask = use_attention_mask
self.use_token_type_ids = use_token_type_ids
self.use_entity_ids = use_entity_ids
self.use_entity_attention_mask = use_entity_attention_mask
self.use_entity_token_type_ids = use_entity_token_type_ids
self.use_entity_position_ids = use_entity_position_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.entity_vocab_size = entity_vocab_size
self.entity_emb_size = entity_emb_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_entity_classification_labels = num_entity_classification_labels
self.num_entity_pair_classification_labels = num_entity_pair_classification_labels
self.num_entity_span_classification_labels = num_entity_span_classification_labels
self.scope = scope
self.use_entity_aware_attention = use_entity_aware_attention
self.encoder_seq_length = seq_length
self.key_length = seq_length
self.num_hidden_states_types = 2 # hidden_states and entity_hidden_states
def prepare_config_and_inputs(self):
# prepare words
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
# prepare entities
entity_ids = ids_tensor([self.batch_size, self.entity_length], self.entity_vocab_size)
entity_attention_mask = None
if self.use_entity_attention_mask:
entity_attention_mask = random_attention_mask([self.batch_size, self.entity_length])
entity_token_type_ids = None
if self.use_token_type_ids:
entity_token_type_ids = ids_tensor([self.batch_size, self.entity_length], self.type_vocab_size)
entity_position_ids = None
if self.use_entity_position_ids:
entity_position_ids = ids_tensor(
[self.batch_size, self.entity_length, self.mention_length], self.mention_length
)
sequence_labels = None
labels = None
entity_labels = None
entity_classification_labels = None
entity_pair_classification_labels = None
entity_span_classification_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
entity_labels = ids_tensor([self.batch_size, self.entity_length], self.entity_vocab_size)
entity_classification_labels = ids_tensor([self.batch_size], self.num_entity_classification_labels)
entity_pair_classification_labels = ids_tensor(
[self.batch_size], self.num_entity_pair_classification_labels
)
entity_span_classification_labels = ids_tensor(
[self.batch_size, self.entity_length], self.num_entity_span_classification_labels
)
config = self.get_config()
return (
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
)
def get_config(self):
return LukeConfig(
vocab_size=self.vocab_size,
entity_vocab_size=self.entity_vocab_size,
entity_emb_size=self.entity_emb_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
use_entity_aware_attention=self.use_entity_aware_attention,
)
def create_and_check_model(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
model = LukeModel(config=config)
model.to(torch_device)
model.eval()
# test with words + entities
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(
result.entity_last_hidden_state.shape, (self.batch_size, self.entity_length, self.hidden_size)
)
# test with words only
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_lm(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_classification_labels
model = LukeForMaskedLM(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=labels,
entity_labels=entity_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
if entity_ids is not None:
self.parent.assertEqual(
result.entity_logits.shape, (self.batch_size, self.entity_length, self.entity_vocab_size)
)
else:
self.parent.assertIsNone(result.entity_logits)
def create_and_check_for_entity_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_classification_labels
model = LukeForEntityClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=entity_classification_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_entity_classification_labels))
def create_and_check_for_entity_pair_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_pair_classification_labels
model = LukeForEntityClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=entity_pair_classification_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_entity_pair_classification_labels))
def create_and_check_for_entity_span_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_span_classification_labels
model = LukeForEntitySpanClassification(config)
model.to(torch_device)
model.eval()
entity_start_positions = ids_tensor([self.batch_size, self.entity_length], self.seq_length)
entity_end_positions = ids_tensor([self.batch_size, self.entity_length], self.seq_length)
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
entity_start_positions=entity_start_positions,
entity_end_positions=entity_end_positions,
labels=entity_span_classification_labels,
)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.entity_length, self.num_entity_span_classification_labels)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
"entity_ids": entity_ids,
"entity_token_type_ids": entity_token_type_ids,
"entity_attention_mask": entity_attention_mask,
"entity_position_ids": entity_position_ids,
}
return config, inputs_dict
@require_torch
class LukeModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
LukeModel,
LukeForMaskedLM,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
)
if is_torch_available()
else ()
)
test_pruning = False
test_torchscript = False
test_resize_embeddings = True
test_head_masking = True
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if model_class == LukeForEntitySpanClassification:
inputs_dict["entity_start_positions"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device
)
inputs_dict["entity_end_positions"] = torch.ones(
(self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device
)
if return_labels:
if model_class in (LukeForEntityClassification, LukeForEntityPairClassification):
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
elif model_class == LukeForEntitySpanClassification:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.entity_length),
dtype=torch.long,
device=torch_device,
)
elif model_class == LukeForMaskedLM:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length),
dtype=torch.long,
device=torch_device,
)
inputs_dict["entity_labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.entity_length),
dtype=torch.long,
device=torch_device,
)
return inputs_dict
def setUp(self):
self.model_tester = LukeModelTester(self)
self.config_tester = ConfigTester(self, config_class=LukeConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in LUKE_PRETRAINED_MODEL_ARCHIVE_LIST:
model = LukeModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_masked_lm_with_word_only(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
config_and_inputs = (*config_and_inputs[:4], *((None,) * len(config_and_inputs[4:])))
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_entity_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_entity_classification(*config_and_inputs)
def test_for_entity_pair_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_entity_pair_classification(*config_and_inputs)
def test_for_entity_span_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_entity_span_classification(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_length = self.model_tester.seq_length
entity_length = self.model_tester.entity_length
key_length = seq_length + entity_length
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length + entity_length, key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = self.model_tester.num_hidden_states_types
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length + entity_length, key_length],
)
def test_entity_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
entity_hidden_states = outputs.entity_hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(entity_hidden_states), expected_num_layers)
entity_length = self.model_tester.entity_length
self.assertListEqual(
list(entity_hidden_states[0].shape[-2:]),
[entity_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_entity_hidden_states(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
entity_hidden_states = outputs.entity_hidden_states[0]
entity_hidden_states.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(entity_hidden_states.grad)
@require_torch
class LukeModelIntegrationTests(unittest.TestCase):
@slow
def test_inference_base_model(self):
model = LukeModel.from_pretrained("studio-ousia/luke-base").eval()
model.to(torch_device)
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", task="entity_classification")
text = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped"
" the new world number one avoid a humiliating second- round exit at Wimbledon ."
)
span = (39, 42)
encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt")
# move all values to device
for key, value in encoding.items():
encoding[key] = encoding[key].to(torch_device)
outputs = model(**encoding)
# Verify word hidden states
expected_shape = torch.Size((1, 42, 768))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]]
).to(torch_device)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
# Verify entity hidden states
expected_shape = torch.Size((1, 1, 768))
self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]]).to(torch_device)
self.assertTrue(torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_large_model(self):
model = LukeModel.from_pretrained("studio-ousia/luke-large").eval()
model.to(torch_device)
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large", task="entity_classification")
text = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped"
" the new world number one avoid a humiliating second- round exit at Wimbledon ."
)
span = (39, 42)
encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt")
# move all values to device
for key, value in encoding.items():
encoding[key] = encoding[key].to(torch_device)
outputs = model(**encoding)
# Verify word hidden states
expected_shape = torch.Size((1, 42, 1024))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]]
).to(torch_device)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
# Verify entity hidden states
expected_shape = torch.Size((1, 1, 1024))
self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]]).to(torch_device)
self.assertTrue(torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
| 39.101719 | 118 | 0.670245 |
2e4b480115c829fcbf4f667202a38dbc2c0273df | 14,550 | py | Python | kubernetes_asyncio/client/models/v1beta1_storage_class.py | icamposrivera/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1beta1_storage_class.py | icamposrivera/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1beta1_storage_class.py | icamposrivera/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.19.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1beta1StorageClass(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'allow_volume_expansion': 'bool',
'allowed_topologies': 'list[V1TopologySelectorTerm]',
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'mount_options': 'list[str]',
'parameters': 'dict(str, str)',
'provisioner': 'str',
'reclaim_policy': 'str',
'volume_binding_mode': 'str'
}
attribute_map = {
'allow_volume_expansion': 'allowVolumeExpansion',
'allowed_topologies': 'allowedTopologies',
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'mount_options': 'mountOptions',
'parameters': 'parameters',
'provisioner': 'provisioner',
'reclaim_policy': 'reclaimPolicy',
'volume_binding_mode': 'volumeBindingMode'
}
def __init__(self, allow_volume_expansion=None, allowed_topologies=None, api_version=None, kind=None, metadata=None, mount_options=None, parameters=None, provisioner=None, reclaim_policy=None, volume_binding_mode=None, local_vars_configuration=None): # noqa: E501
"""V1beta1StorageClass - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._allow_volume_expansion = None
self._allowed_topologies = None
self._api_version = None
self._kind = None
self._metadata = None
self._mount_options = None
self._parameters = None
self._provisioner = None
self._reclaim_policy = None
self._volume_binding_mode = None
self.discriminator = None
if allow_volume_expansion is not None:
self.allow_volume_expansion = allow_volume_expansion
if allowed_topologies is not None:
self.allowed_topologies = allowed_topologies
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if mount_options is not None:
self.mount_options = mount_options
if parameters is not None:
self.parameters = parameters
self.provisioner = provisioner
if reclaim_policy is not None:
self.reclaim_policy = reclaim_policy
if volume_binding_mode is not None:
self.volume_binding_mode = volume_binding_mode
@property
def allow_volume_expansion(self):
"""Gets the allow_volume_expansion of this V1beta1StorageClass. # noqa: E501
AllowVolumeExpansion shows whether the storage class allow volume expand # noqa: E501
:return: The allow_volume_expansion of this V1beta1StorageClass. # noqa: E501
:rtype: bool
"""
return self._allow_volume_expansion
@allow_volume_expansion.setter
def allow_volume_expansion(self, allow_volume_expansion):
"""Sets the allow_volume_expansion of this V1beta1StorageClass.
AllowVolumeExpansion shows whether the storage class allow volume expand # noqa: E501
:param allow_volume_expansion: The allow_volume_expansion of this V1beta1StorageClass. # noqa: E501
:type: bool
"""
self._allow_volume_expansion = allow_volume_expansion
@property
def allowed_topologies(self):
"""Gets the allowed_topologies of this V1beta1StorageClass. # noqa: E501
Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature. # noqa: E501
:return: The allowed_topologies of this V1beta1StorageClass. # noqa: E501
:rtype: list[V1TopologySelectorTerm]
"""
return self._allowed_topologies
@allowed_topologies.setter
def allowed_topologies(self, allowed_topologies):
"""Sets the allowed_topologies of this V1beta1StorageClass.
Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature. # noqa: E501
:param allowed_topologies: The allowed_topologies of this V1beta1StorageClass. # noqa: E501
:type: list[V1TopologySelectorTerm]
"""
self._allowed_topologies = allowed_topologies
@property
def api_version(self):
"""Gets the api_version of this V1beta1StorageClass. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1StorageClass. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1StorageClass.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1StorageClass. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1beta1StorageClass. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1StorageClass. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1StorageClass.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1StorageClass. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1StorageClass. # noqa: E501
:return: The metadata of this V1beta1StorageClass. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1StorageClass.
:param metadata: The metadata of this V1beta1StorageClass. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def mount_options(self):
"""Gets the mount_options of this V1beta1StorageClass. # noqa: E501
Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid. # noqa: E501
:return: The mount_options of this V1beta1StorageClass. # noqa: E501
:rtype: list[str]
"""
return self._mount_options
@mount_options.setter
def mount_options(self, mount_options):
"""Sets the mount_options of this V1beta1StorageClass.
Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid. # noqa: E501
:param mount_options: The mount_options of this V1beta1StorageClass. # noqa: E501
:type: list[str]
"""
self._mount_options = mount_options
@property
def parameters(self):
"""Gets the parameters of this V1beta1StorageClass. # noqa: E501
Parameters holds the parameters for the provisioner that should create volumes of this storage class. # noqa: E501
:return: The parameters of this V1beta1StorageClass. # noqa: E501
:rtype: dict(str, str)
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1beta1StorageClass.
Parameters holds the parameters for the provisioner that should create volumes of this storage class. # noqa: E501
:param parameters: The parameters of this V1beta1StorageClass. # noqa: E501
:type: dict(str, str)
"""
self._parameters = parameters
@property
def provisioner(self):
"""Gets the provisioner of this V1beta1StorageClass. # noqa: E501
Provisioner indicates the type of the provisioner. # noqa: E501
:return: The provisioner of this V1beta1StorageClass. # noqa: E501
:rtype: str
"""
return self._provisioner
@provisioner.setter
def provisioner(self, provisioner):
"""Sets the provisioner of this V1beta1StorageClass.
Provisioner indicates the type of the provisioner. # noqa: E501
:param provisioner: The provisioner of this V1beta1StorageClass. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and provisioner is None: # noqa: E501
raise ValueError("Invalid value for `provisioner`, must not be `None`") # noqa: E501
self._provisioner = provisioner
@property
def reclaim_policy(self):
"""Gets the reclaim_policy of this V1beta1StorageClass. # noqa: E501
Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete. # noqa: E501
:return: The reclaim_policy of this V1beta1StorageClass. # noqa: E501
:rtype: str
"""
return self._reclaim_policy
@reclaim_policy.setter
def reclaim_policy(self, reclaim_policy):
"""Sets the reclaim_policy of this V1beta1StorageClass.
Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete. # noqa: E501
:param reclaim_policy: The reclaim_policy of this V1beta1StorageClass. # noqa: E501
:type: str
"""
self._reclaim_policy = reclaim_policy
@property
def volume_binding_mode(self):
"""Gets the volume_binding_mode of this V1beta1StorageClass. # noqa: E501
VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature. # noqa: E501
:return: The volume_binding_mode of this V1beta1StorageClass. # noqa: E501
:rtype: str
"""
return self._volume_binding_mode
@volume_binding_mode.setter
def volume_binding_mode(self, volume_binding_mode):
"""Sets the volume_binding_mode of this V1beta1StorageClass.
VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature. # noqa: E501
:param volume_binding_mode: The volume_binding_mode of this V1beta1StorageClass. # noqa: E501
:type: str
"""
self._volume_binding_mode = volume_binding_mode
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1StorageClass):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1StorageClass):
return True
return self.to_dict() != other.to_dict()
| 38.903743 | 321 | 0.67079 |
eb7010698508988dc184c6076b22253f156f6733 | 3,175 | py | Python | res_mlp/core.py | jaketae/res-mlp | 6c957e4fe67a2f13d9b4fd3fa36b7eddcf5323fd | [
"MIT"
] | null | null | null | res_mlp/core.py | jaketae/res-mlp | 6c957e4fe67a2f13d9b4fd3fa36b7eddcf5323fd | [
"MIT"
] | null | null | null | res_mlp/core.py | jaketae/res-mlp | 6c957e4fe67a2f13d9b4fd3fa36b7eddcf5323fd | [
"MIT"
] | null | null | null | import torch
from torch import nn
from torch.nn import functional as F
class AffineTransform(nn.Module):
def __init__(self, num_features):
super().__init__()
self.alpha = nn.Parameter(torch.ones(1, 1, num_features))
self.beta = nn.Parameter(torch.zeros(1, 1, num_features))
def forward(self, x):
return self.alpha * x + self.beta
class CommunicationLayer(nn.Module):
def __init__(self, num_features, num_patches):
super().__init__()
self.aff1 = AffineTransform(num_features)
self.fc1 = nn.Linear(num_patches, num_patches)
self.aff2 = AffineTransform(num_features)
def forward(self, x):
x = self.aff1(x)
residual = x
x = self.fc1(x.transpose(1, 2)).transpose(1, 2)
x = self.aff2(x)
out = x + residual
return out
class FeedForward(nn.Module):
def __init__(self, num_features, expansion_factor):
super().__init__()
num_hidden = num_features * expansion_factor
self.aff1 = AffineTransform(num_features)
self.fc1 = nn.Linear(num_features, num_hidden)
self.fc2 = nn.Linear(num_hidden, num_features)
self.aff2 = AffineTransform(num_features)
def forward(self, x):
x = self.aff1(x)
residual = x
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
x = self.aff2(x)
out = x + residual
return out
class ResMLPLayer(nn.Module):
def __init__(self, num_features, num_patches, expansion_factor):
super().__init__()
self.cl = CommunicationLayer(num_features, num_patches)
self.ff = FeedForward(num_features, expansion_factor)
def forward(self, x):
x = self.cl(x)
out = self.ff(x)
return out
def check_sizes(image_size, patch_size):
sqrt_num_patches, remainder = divmod(image_size, patch_size)
assert remainder == 0, "`image_size` must be divisibe by `patch_size`"
num_patches = sqrt_num_patches ** 2
return num_patches
class ResMLP(nn.Module):
def __init__(
self,
image_size=256,
patch_size=16,
in_channels=3,
num_features=128,
expansion_factor=2,
num_layers=8,
num_classes=10,
):
num_patches = check_sizes(image_size, patch_size)
super().__init__()
self.patcher = nn.Conv2d(
in_channels, num_features, kernel_size=patch_size, stride=patch_size
)
self.mlps = nn.Sequential(
*[
ResMLPLayer(num_features, num_patches, expansion_factor)
for _ in range(num_layers)
]
)
self.classifier = nn.Linear(num_features, num_classes)
def forward(self, x):
patches = self.patcher(x)
batch_size, num_features, _, _ = patches.shape
patches = patches.permute(0, 2, 3, 1)
patches = patches.view(batch_size, -1, num_features)
# patches.shape == (batch_size, num_patches, num_features)
embedding = self.mlps(patches)
embedding = torch.mean(embedding, dim=1)
logits = self.classifier(embedding)
return logits
| 29.95283 | 80 | 0.620157 |
f3ab80f8c3ad49a40b44b66e26dd3c47485dc772 | 10,189 | py | Python | meiduoshop/apps/user/views.py | 1572990942/meiduoshop | 64f4fe04fbcec8ceecf9fa0ce24afe41388f6926 | [
"MIT"
] | null | null | null | meiduoshop/apps/user/views.py | 1572990942/meiduoshop | 64f4fe04fbcec8ceecf9fa0ce24afe41388f6926 | [
"MIT"
] | null | null | null | meiduoshop/apps/user/views.py | 1572990942/meiduoshop | 64f4fe04fbcec8ceecf9fa0ce24afe41388f6926 | [
"MIT"
] | null | null | null | import json
import logging
import re
import jwt
from QQLoginTool.QQtool import OAuthQQ
from django.conf import settings
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.decorators import login_required
from django.core.cache import caches
from django.http import HttpRequest, JsonResponse, HttpResponseForbidden, HttpResponseNotAllowed
from django.views import View
from apps.user.models import User, QQUser
# Create your views here.
# AJAX查询用户名是否可用
from apps.user.tasks import async_send_active_email
from utils.converters import MobileConverter, UserEmailConverter
from utils.functions import jwt_decode, jwt_encode
class UsernameCountView(View):
def get(self, request: HttpRequest, username):
return JsonResponse({
"code": 200,
"count": int(bool(User.objects.filter(username=username)))
})
# AJAX查询用户电话是否可用
class MobileCountView(View):
def get(self, request: HttpRequest, mobile):
return JsonResponse({
"code": 200,
"count": int(bool(User.objects.filter(mobile=mobile)))
})
# 提交注册按钮后 处理注册请求
class RegisterView(View):
def post(self, request: HttpRequest):
# 获取数据
register_data = json.loads(request.body)
username = register_data.get("username")
password = register_data.get("password")
password2 = register_data.get("password2")
mobile = register_data.get("mobile")
allow = register_data.get("allow")
sms_code = register_data.get("sms_code")
# 校验数据
if not all((
re.match('^[a-zA-Z0-9-_]{5,20}$', username),
re.match('^\S{8,20}$', password),
password == password2,
re.match('^1[345789]\d{9}$', mobile),
allow,
str(caches['sms_code'].get(mobile)) == str(sms_code)
)):
return JsonResponse({
"code": 400,
"errmsg": "用户信息参数出错"
})
# 数据通过 创建用户 注意并发时候可能会失败
else:
try:
user = User.objects.create_user(
username=username,
password=password,
mobile=mobile,
)
except Exception as e:
print(e)
return JsonResponse({
"code": 400,
"errmsg": "用户名出错或该用户已存在"
})
else:
# 保持登陆状态 默认SESSION保存2周
login(request, user)
response = JsonResponse({"code": 0})
max_age = settings.SESSION_COOKIE_AGE
response.set_cookie('username', username, max_age)
return response
# 用户登陆视图函数 用户名+密码 或 手机号+密码
class LoginView(View):
def post(self, request):
# 获取数据
data = json.loads(request.body)
username = data.get("username")
password = data.get("password")
remembered = data.get("remembered")
# 校验数据
if not all([username, password]):
return JsonResponse({"code": 400, "errmsg": "登陆时缺少必要参数"})
if re.match(MobileConverter.regex, username): # 如果是手机号登陆
User.USERNAME_FIELD = 'mobile'
else: # 否则为用户名登陆
User.USERNAME_FIELD = 'username'
if not (user := authenticate(username=username, password=password)):
return JsonResponse({"code": 400, "errmsg": "用户名或密码错误"})
login(request, user)
# 是否记住用户 不记住关闭浏览器 要重新登陆 记住保存用户的session和cookie固定时间
max_age = (0, settings.SESSION_COOKIE_AGE)[remembered] # 0 表示浏览器关闭时session过期
request.session.set_expiry(max_age)
# 设置cookie 响应结果
response = JsonResponse({"code": 0})
response.set_cookie('username', username, max_age or None) # None 表示浏览器关闭时cookie过期
return response
# 退出登陆
class LogoutView(View):
def delete(self, request):
logout(request) # 自动处理session
response = JsonResponse({"code": 200})
response.delete_cookie('username')
return response
class UserCenterView(View):
"""如果未登陆 返回重定向 否则返回json数据"""
def get(self, request):
user = request.user
print(user)
return JsonResponse({
"code": 200,
"errmsg": "个人中心",
"info_data": {
"username": user.username,
"mobile": user.mobile,
"email": user.email,
"email_active": ""
}
})
# 前端qq登陆按钮发送AJAX请求 获取第三方登陆页面
class GetQQLoginUriView(View):
def get(self, request: HttpRequest):
# next表示从哪个页面进入到的登录页面,将来登录成功后,就自动回到那个页面
next_uri = request.GET.get('next') # client端的状态值。用于第三方应用防止CSRF攻击,成功授权后回调时会原样带回
# 这里是前端传回来的前调地址
qq = OAuthQQ(
client_id=settings.QQ_CLIENT_ID,
client_secret=settings.QQ_CLIENT_SECRET,
redirect_uri=settings.QQ_REDIRECT_URI, # 回调地址 成功授权后的回调地址
state=next_uri
)
qq_login_url = qq.get_qq_url()
return JsonResponse({"code": 0, 'login_url': qq_login_url})
# 前端成功登陆qq后,向之前的回调地址发送code 后端根据code获取access_token,再根据access_token获取openid
class GetQQAccessTokenView(View):
def get(self, request: HttpRequest):
""" http://www.meiduo.site:8000/oauth_callback/?code=A623D1E33FEFD88213A05BC26C290736
"""
# 获取用户成功登陆qq后的code
code = request.GET.get('code')
if not code:
return JsonResponse({"code": 400, "errmsg": "参数不全"})
# 使用code换取access_token 使用access_token换取openid
qq = OAuthQQ(
client_id=settings.QQ_CLIENT_ID,
client_secret=settings.QQ_CLIENT_SECRET,
redirect_uri=settings.QQ_REDIRECT_URI,
)
access_token = qq.get_access_token(code=code)
openid = qq.get_open_id(access_token)
# 判断是否已经绑定过美多用户 登陆成功 回到首页 这里qquser的user必须有
if qquser := (QQUser.objects.filter(openid=openid).first()): # 已经绑定过
user = qquser.user
login(request, user)
response = JsonResponse({"code": 0})
response.set_cookie("username", user.username, max_age=settings.SESSION_COOKIE_AGE)
return response
else: # 如果没有绑定过
# access_token = generate_access_token({'openid':openid})
jwt_openid = jwt_encode({"openid": openid}, max_age=settings.SESSION_COOKIE_AGE) # 使用jwt加密防止被篡改
return JsonResponse({"code": 300, "access_token": jwt_openid})
def post(self, request: HttpRequest):
"""qq登陆成功后 若未绑定美多账户 引导至注册绑定页面 填写完数据 发送post请求"""
# 获取数据
data = json.loads(request.body)
password = data.get("password")
mobile = data.get("mobile")
sms_code = data.get("sms_code")
jwt_openid = data.get("access_token") # 就是openid
# 校验数据
if not all((
re.match('^\S{8,20}$', password),
re.match('^1[345789]\d{9}$', mobile),
str(caches['sms_code'].get(mobile)) == str(sms_code),
jwt_openid
)):
return JsonResponse({"code": 400, "errmsg": "用户信息参数出错"})
# 判断mobile是否已经注册过 但密码错误
if (user := User.objects.filter(mobile=mobile).first()) and (not user.check_password(password)):
return JsonResponse({"code": 400, "errmsg": "用户名或密码错误"})
# mobile对应用户不存在 创建一个新的user 因为用户名 这里逻辑有问题 以手机号作为用户名可能会重名
else:
try:
user = User.objects.create_user(username=mobile, password=password, mobile=mobile)
except Exception as e:
logging.error(e)
return JsonResponse({"code": 400, "errmsg": "用户已存在"})
# 创建qquer
try: # 这里access_token可能会有问题,不应该由前端返回
openid = jwt_decode(jwt_openid).get("openid") # 使用jwt加密防止被篡改
qquser = QQUser.objects.create(user=user, openid=openid)
except Exception as e:
logging.error(e)
return JsonResponse({"code": 400, "errmsg": "第三方登陆出错"})
# 保持状态
login(request, user)
response = JsonResponse({"code": 200})
response.set_cookie("username", user.username, max_age=settings.SESSION_COOKIE_AGE)
return response
# 用户中心
@login_required(login_url='/login/')
def get_user_info(request: HttpRequest):
"""提供个人信息页面"""
user = request.user
data = {
"username": user.username,
"mobile": user.mobile,
"email": user.email,
"email_active": user.email_active
}
return JsonResponse({
"code": 200,
"info_data": data
})
# 添加用户邮箱信息
@login_required(login_url='/login/')
def user_email(request: HttpRequest):
user = request.user
def put():
data = json.loads(request.body)
email = data.get("email")
if not re.match(UserEmailConverter.regex, email):
return JsonResponse({"code": 400, "errmsg": "邮箱格式有误"})
try:
user.email = email
user.save()
except Exception as e:
logging.error(e)
return JsonResponse({"code": 400, "errmsg": "添加邮箱失败"})
# 发送异步激活邮件
async_send_active_email.delay(user.username, user.email)
return JsonResponse({"code": 0})
return locals().get(request.method.lower(), HttpResponseNotAllowed)()
# 激活邮箱
# @login_required(login_url='/login/') 千万不可写登陆装饰器 因为是从邮箱网页跳转过去 这里必定会不是登陆状态
def user_email_verify(request: HttpRequest):
def put():
# 获取数据
verify_code: str = request.GET.get("verifyCode")
if not verify_code or ('_' not in verify_code):
return JsonResponse({"code": 400, "errmsg": "验证参数错误"})
# 校验数据
username, verify_num = verify_code.split("_")
if (not username) or (not (user := User.objects.filter(username=username).first())) or (str(caches['email_active_code'].get(username)) != verify_num):
return JsonResponse({"code": 400, "errmsg": "验证参数错误"})
# 激活邮箱 删除缓存 响应请求
user.email_active = True
user.save()
caches['email_active_code'].delete(username)
return JsonResponse({"code": 0})
return locals().get(request.method.lower(), HttpResponseNotAllowed)() | 35.378472 | 158 | 0.600451 |
dfcf1816b7b9b25f969db88188d589b900a279f8 | 5,717 | py | Python | homeassistant/components/switch/scsgate.py | dotlambda/home-assistant | 68d2851ecf2dcd05bd5197240a31980d4fee8d2e | [
"Apache-2.0"
] | 1 | 2019-02-15T00:07:18.000Z | 2019-02-15T00:07:18.000Z | homeassistant/components/switch/scsgate.py | dotlambda/home-assistant | 68d2851ecf2dcd05bd5197240a31980d4fee8d2e | [
"Apache-2.0"
] | 5 | 2022-03-01T06:31:03.000Z | 2022-03-31T07:20:45.000Z | homeassistant/components/switch/scsgate.py | dotlambda/home-assistant | 68d2851ecf2dcd05bd5197240a31980d4fee8d2e | [
"Apache-2.0"
] | 1 | 2019-12-06T21:18:54.000Z | 2019-12-06T21:18:54.000Z | """
Support for SCSGate switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.scsgate/
"""
import logging
import voluptuous as vol
import homeassistant.components.scsgate as scsgate
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_STATE, CONF_NAME, CONF_DEVICES)
import homeassistant.helpers.config_validation as cv
ATTR_SCENARIO_ID = 'scenario_id'
DEPENDENCIES = ['scsgate']
CONF_TRADITIONAL = 'traditional'
CONF_SCENARIO = 'scenario'
CONF_SCS_ID = 'scs_id'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEVICES): vol.Schema({cv.slug: scsgate.SCSGATE_SCHEMA}),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the SCSGate switches."""
logger = logging.getLogger(__name__)
_setup_traditional_switches(
logger=logger, config=config, add_devices_callback=add_devices)
_setup_scenario_switches(logger=logger, config=config, hass=hass)
def _setup_traditional_switches(logger, config, add_devices_callback):
"""Add traditional SCSGate switches."""
traditional = config.get(CONF_TRADITIONAL)
switches = []
if traditional:
for _, entity_info in traditional.items():
if entity_info[scsgate.CONF_SCS_ID] in scsgate.SCSGATE.devices:
continue
name = entity_info[CONF_NAME]
scs_id = entity_info[scsgate.CONF_SCS_ID]
logger.info("Adding %s scsgate.traditional_switch", name)
switch = SCSGateSwitch(name=name, scs_id=scs_id, logger=logger)
switches.append(switch)
add_devices_callback(switches)
scsgate.SCSGATE.add_devices_to_register(switches)
def _setup_scenario_switches(logger, config, hass):
"""Add only SCSGate scenario switches."""
scenario = config.get(CONF_SCENARIO)
if scenario:
for _, entity_info in scenario.items():
if entity_info[scsgate.CONF_SCS_ID] in scsgate.SCSGATE.devices:
continue
name = entity_info[CONF_NAME]
scs_id = entity_info[scsgate.CONF_SCS_ID]
logger.info("Adding %s scsgate.scenario_switch", name)
switch = SCSGateScenarioSwitch(
name=name, scs_id=scs_id, logger=logger, hass=hass)
scsgate.SCSGATE.add_device(switch)
class SCSGateSwitch(SwitchDevice):
"""Representation of a SCSGate switch."""
def __init__(self, scs_id, name, logger):
"""Initialize the switch."""
self._name = name
self._scs_id = scs_id
self._toggled = False
self._logger = logger
@property
def scs_id(self):
"""Return the SCS ID."""
return self._scs_id
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self._toggled
def turn_on(self, **kwargs):
"""Turn the device on."""
from scsgate.tasks import ToggleStatusTask
scsgate.SCSGATE.append_task(
ToggleStatusTask(target=self._scs_id, toggled=True))
self._toggled = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
from scsgate.tasks import ToggleStatusTask
scsgate.SCSGATE.append_task(
ToggleStatusTask(target=self._scs_id, toggled=False))
self._toggled = False
self.schedule_update_ha_state()
def process_event(self, message):
"""Handle a SCSGate message related with this switch."""
if self._toggled == message.toggled:
self._logger.info(
"Switch %s, ignoring message %s because state already active",
self._scs_id, message)
# Nothing changed, ignoring
return
self._toggled = message.toggled
self.schedule_update_ha_state()
command = "off"
if self._toggled:
command = "on"
self.hass.bus.fire(
'button_pressed', {
ATTR_ENTITY_ID: self._scs_id,
ATTR_STATE: command}
)
class SCSGateScenarioSwitch(object):
"""Provides a SCSGate scenario switch.
This switch is always in a 'off" state, when toggled it's used to trigger
events.
"""
def __init__(self, scs_id, name, logger, hass):
"""Initialize the scenario."""
self._name = name
self._scs_id = scs_id
self._logger = logger
self._hass = hass
@property
def scs_id(self):
"""Return the SCS ID."""
return self._scs_id
@property
def name(self):
"""Return the name of the device if any."""
return self._name
def process_event(self, message):
"""Handle a SCSGate message related with this switch."""
from scsgate.messages import StateMessage, ScenarioTriggeredMessage
if isinstance(message, StateMessage):
scenario_id = message.bytes[4]
elif isinstance(message, ScenarioTriggeredMessage):
scenario_id = message.scenario
else:
self._logger.warn("Scenario switch: received unknown message %s",
message)
return
self._hass.bus.fire(
'scenario_switch_triggered', {
ATTR_ENTITY_ID: int(self._scs_id),
ATTR_SCENARIO_ID: int(scenario_id, 16)
}
)
| 28.873737 | 78 | 0.641245 |
d31055cac80a98fe6992054f443ae04c2e091ada | 730 | py | Python | ledDrop.py | scivision/power-harvesting-voltage-multiplier | ff592e87a641a4e7221f4ab3d9505f7c8f5a7d16 | [
"MIT"
] | 1 | 2022-03-24T22:32:22.000Z | 2022-03-24T22:32:22.000Z | ledDrop.py | scivision/power-harvesting-voltage-multiplier | ff592e87a641a4e7221f4ab3d9505f7c8f5a7d16 | [
"MIT"
] | null | null | null | ledDrop.py | scivision/power-harvesting-voltage-multiplier | ff592e87a641a4e7221f4ab3d9505f7c8f5a7d16 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import pandas
import io
import subprocess
from matplotlib.pyplot import figure, show
from scipy.interpolate import interp1d
# %% brightness from XHP50 datasheet
B = [.2, .4, .6, .8, 1, 1.2]
I = [.1, .25, .4, .55, .7, .85]
f = interp1d(I, B, 'cubic')
# %% run sim
ret = subprocess.check_output(
['gnucap', '-b', 'ledDrop.net'], universal_newlines=True)
dat = pandas.read_csv(io.StringIO(ret), header=10, delimiter='\s+').squeeze()
ind = ['i(D1)', 'i(D6)', 'i(D12)']
names = ['D1', 'D6', 'D12']
Iled = dat[ind]
bi = f(Iled)
ax = figure(1).gca()
ax.stem(dat.index.values, dat.values)
ax = figure(2).gca()
ax.set_title('LED brightness')
ax.set_ylabel('brightness (normalized)')
ax.stem(names, bi)
show()
| 24.333333 | 77 | 0.652055 |
b22d06af55ec0adcde92f9e2ccc2ce4041d5ddb9 | 1,258 | py | Python | applications/airflow/dags/kubernetes_operator.py | sjorsvanderleeuwen/dsri-documentation | 8864c5fa88dede65a4a6b5fa7a857417b48d2580 | [
"CC-BY-4.0"
] | 14 | 2019-09-27T16:18:19.000Z | 2022-02-01T16:14:12.000Z | applications/airflow/dags/kubernetes_operator.py | sjorsvanderleeuwen/dsri-documentation | 8864c5fa88dede65a4a6b5fa7a857417b48d2580 | [
"CC-BY-4.0"
] | 32 | 2019-10-23T08:53:51.000Z | 2022-03-25T09:17:01.000Z | applications/airflow/dags/kubernetes_operator.py | sjorsvanderleeuwen/dsri-documentation | 8864c5fa88dede65a4a6b5fa7a857417b48d2580 | [
"CC-BY-4.0"
] | 8 | 2019-09-28T20:43:58.000Z | 2021-12-02T13:01:55.000Z | from airflow import DAG
from datetime import datetime, timedelta
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.operators.dummy_operator import DummyOperator
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.utcnow(),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
dag = DAG(
'kubernetes_pod_operator',
default_args=default_args,
schedule_interval=None
# schedule_interval=timedelta(minutes=10)
)
start = DummyOperator(task_id='run_this_first', dag=dag)
passing = KubernetesPodOperator(
namespace='bio2kg',
image="python:3.6",
cmds=["python","-c"],
arguments=["print('hello world')"],
labels={"app": "airflow"},
name="passing-test",
task_id="passing-task",
get_logs=True,
dag=dag
)
failing = KubernetesPodOperator(
namespace='bio2kg',
image="ubuntu",
cmds=["python","-c"],
arguments=["print('hello world')"],
labels={"app": "airflow"},
name="fail",
task_id="failing-task",
get_logs=True,
dag=dag
)
passing.set_upstream(start)
failing.set_upstream(start) | 24.192308 | 83 | 0.683625 |
f01a719d4406c1705293e3dd8f0f5695ee44f426 | 2,747 | py | Python | Finals Practice/9.py | ikramulkayes/Python_season2 | d057460d07c5d2d218ecd52e08c1d355add44df2 | [
"MIT"
] | null | null | null | Finals Practice/9.py | ikramulkayes/Python_season2 | d057460d07c5d2d218ecd52e08c1d355add44df2 | [
"MIT"
] | null | null | null | Finals Practice/9.py | ikramulkayes/Python_season2 | d057460d07c5d2d218ecd52e08c1d355add44df2 | [
"MIT"
] | null | null | null | class Squad:
playerCount=0
def __init__(self,name,squadType):
self.name = name
self.squadType = squadType
def formSquad(self):
if Squad.playerCount >= 5:
return True
else:
return False
def __str__(self):
s = f"{self.name} is a {self.squadType} team.\n"
s+= f"Number of players in {self.name} is {Squad.playerCount}\n"
return s
#Write your code here
class HogwartsQuidditchSquad(Squad):
def __init__(self, name, squadType):
super().__init__(name, squadType)
self.dic = {}
def addPlayer(self,*args):
for elm in args:
if elm[1] not in self.dic.keys():
self.dic[elm[1]] = [[elm[0],elm[2]]]
else:
temp = self.dic[elm[1]]
temp.append([elm[0],elm[2]])
self.dic[elm[1]] = temp
Squad.playerCount += 1
def __str__(self):
print(super().__str__().rstrip("\n"))
s = ""
for k,v in self.dic.items():
s += f"{k}s:\n"
for elm in v:
s+= f"Player name: {elm[0]}, House: {elm[1]}\n"
s = s.rstrip("\n")
return(s)
def formSquad(self):
if super().formSquad():
print("We have enough players to form a squad.")
flag = False
if len(self.dic["Seeker"])>=1:
if len(self.dic["Beater"])>=2:
if len(self.dic["Chaser"])>=1:
if len(self.dic["Keeper"])>=1:
flag = True
if flag:
print("Also we can form a perfect squad!!")
else:
print("But we cannot form a perfect squad.")
else:
print("We do not have enough players to form a squad.")
# Do not change the following lines of code.
f = HogwartsQuidditchSquad("Hogwart's Dragons","Quidditch")
f.addPlayer(["Harry Potter","Seeker","Gryffindor"],["Katie Bell","Chaser","Gryffindor"])
print("1.====================================")
print(f)
print("2.====================================")
f.formSquad()
print("3.====================================")
f.addPlayer(["Vincent Crabbe","Beater","Slytherin"])
f.addPlayer(["Miles Bletchley","Keeper","Slytherin"])
print("4.====================================")
print(f)
print("5.====================================")
f.formSquad()
print("6.====================================")
f.addPlayer(["Ethan Humberstone","Keeper","Hufflepuff"])
f.formSquad()
print("7.====================================")
f.addPlayer(["Fred Weasley","Beater","Gryffindor"])
print(f)
print("8.====================================")
f.formSquad() | 32.702381 | 88 | 0.470695 |
5398f7925424dc1cb2ec59a83056ba6f4a7aa292 | 11,360 | py | Python | exps/algos/BOHB.py | EM-AutoML/AutoDL-Projects | 8ff416fe5d6cb1b310b885fe376e6f2790fbda14 | [
"MIT"
] | 1 | 2020-05-08T08:58:28.000Z | 2020-05-08T08:58:28.000Z | exps/algos/BOHB.py | EM-AutoML/AutoDL-Projects | 8ff416fe5d6cb1b310b885fe376e6f2790fbda14 | [
"MIT"
] | null | null | null | exps/algos/BOHB.py | EM-AutoML/AutoDL-Projects | 8ff416fe5d6cb1b310b885fe376e6f2790fbda14 | [
"MIT"
] | 2 | 2020-05-08T09:14:36.000Z | 2020-09-28T06:59:30.000Z | ##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 #
###################################################################
# BOHB: Robust and Efficient Hyperparameter Optimization at Scale #
# required to install hpbandster ##################################
# bash ./scripts-search/algos/BOHB.sh -1 ##################
###################################################################
import os, sys, time, glob, random, argparse
import numpy as np, collections
from copy import deepcopy
from pathlib import Path
import torch
import torch.nn as nn
lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from config_utils import load_config, dict2config, configure2str
from datasets import get_datasets, SearchDataset
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
from utils import get_model_infos, obtain_accuracy
from log_utils import AverageMeter, time_string, convert_secs2time
from nas_201_api import NASBench201API as API
from models import CellStructure, get_search_spaces
# BOHB: Robust and Efficient Hyperparameter Optimization at Scale, ICML 2018
import ConfigSpace
from hpbandster.optimizers.bohb import BOHB
import hpbandster.core.nameserver as hpns
from hpbandster.core.worker import Worker
def get_configuration_space(max_nodes, search_space):
cs = ConfigSpace.ConfigurationSpace()
#edge2index = {}
for i in range(1, max_nodes):
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter(node_str, search_space))
return cs
def config2structure_func(max_nodes):
def config2structure(config):
genotypes = []
for i in range(1, max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
op_name = config[node_str]
xlist.append((op_name, j))
genotypes.append( tuple(xlist) )
return CellStructure( genotypes )
return config2structure
class MyWorker(Worker):
def __init__(self, *args, convert_func=None, nas_bench=None, time_budget=None, **kwargs):
super().__init__(*args, **kwargs)
self.convert_func = convert_func
self.nas_bench = nas_bench
self.time_budget = time_budget
self.seen_archs = []
self.sim_cost_time = 0
self.real_cost_time = 0
self.is_end = False
def get_the_best(self):
assert len(self.seen_archs) > 0
best_index, best_acc = -1, None
for arch_index in self.seen_archs:
info = self.nas_bench.get_more_info(arch_index, 'cifar10-valid', None, True)
vacc = info['valid-accuracy']
if best_acc is None or best_acc < vacc:
best_acc = vacc
best_index = arch_index
assert best_index != -1
return best_index
def compute(self, config, budget, **kwargs):
start_time = time.time()
structure = self.convert_func( config )
arch_index = self.nas_bench.query_index_by_arch( structure )
info = self.nas_bench.get_more_info(arch_index, 'cifar10-valid', None, True)
cur_time = info['train-all-time'] + info['valid-per-time']
cur_vacc = info['valid-accuracy']
self.real_cost_time += (time.time() - start_time)
if self.sim_cost_time + cur_time <= self.time_budget and not self.is_end:
self.sim_cost_time += cur_time
self.seen_archs.append( arch_index )
return ({'loss': 100 - float(cur_vacc),
'info': {'seen-arch' : len(self.seen_archs),
'sim-test-time' : self.sim_cost_time,
'current-arch' : arch_index}
})
else:
self.is_end = True
return ({'loss': 100,
'info': {'seen-arch' : len(self.seen_archs),
'sim-test-time' : self.sim_cost_time,
'current-arch' : None}
})
def main(xargs, nas_bench):
assert torch.cuda.is_available(), 'CUDA is not available.'
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads( xargs.workers )
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
assert xargs.dataset == 'cifar10', 'currently only support CIFAR-10'
if xargs.data_path is not None:
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
split_Fpath = 'configs/nas-benchmark/cifar-split.txt'
cifar_split = load_config(split_Fpath, None, None)
train_split, valid_split = cifar_split.train, cifar_split.valid
logger.log('Load split file from {:}'.format(split_Fpath))
config_path = 'configs/nas-benchmark/algos/R-EA.config'
config = load_config(config_path, {'class_num': class_num, 'xshape': xshape}, logger)
# To split data
train_data_v2 = deepcopy(train_data)
train_data_v2.transform = valid_data.transform
valid_data = train_data_v2
search_data = SearchDataset(xargs.dataset, train_data, train_split, valid_split)
# data loader
train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split) , num_workers=xargs.workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=xargs.workers, pin_memory=True)
logger.log('||||||| {:10s} ||||||| Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(train_loader), len(valid_loader), config.batch_size))
logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))
extra_info = {'config': config, 'train_loader': train_loader, 'valid_loader': valid_loader}
else:
config_path = 'configs/nas-benchmark/algos/R-EA.config'
config = load_config(config_path, None, logger)
logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))
extra_info = {'config': config, 'train_loader': None, 'valid_loader': None}
# nas dataset load
assert xargs.arch_nas_dataset is not None and os.path.isfile(xargs.arch_nas_dataset)
search_space = get_search_spaces('cell', xargs.search_space_name)
cs = get_configuration_space(xargs.max_nodes, search_space)
config2structure = config2structure_func(xargs.max_nodes)
hb_run_id = '0'
NS = hpns.NameServer(run_id=hb_run_id, host='localhost', port=0)
ns_host, ns_port = NS.start()
num_workers = 1
#nas_bench = AANASBenchAPI(xargs.arch_nas_dataset)
#logger.log('{:} Create NAS-BENCH-API DONE'.format(time_string()))
workers = []
for i in range(num_workers):
w = MyWorker(nameserver=ns_host, nameserver_port=ns_port, convert_func=config2structure, nas_bench=nas_bench, time_budget=xargs.time_budget, run_id=hb_run_id, id=i)
w.run(background=True)
workers.append(w)
start_time = time.time()
bohb = BOHB(configspace=cs,
run_id=hb_run_id,
eta=3, min_budget=12, max_budget=200,
nameserver=ns_host,
nameserver_port=ns_port,
num_samples=xargs.num_samples,
random_fraction=xargs.random_fraction, bandwidth_factor=xargs.bandwidth_factor,
ping_interval=10, min_bandwidth=xargs.min_bandwidth)
results = bohb.run(xargs.n_iters, min_n_workers=num_workers)
bohb.shutdown(shutdown_workers=True)
NS.shutdown()
real_cost_time = time.time() - start_time
id2config = results.get_id2config_mapping()
incumbent = results.get_incumbent_id()
logger.log('Best found configuration: {:} within {:.3f} s'.format(id2config[incumbent]['config'], real_cost_time))
best_arch = config2structure( id2config[incumbent]['config'] )
info = nas_bench.query_by_arch( best_arch )
if info is None: logger.log('Did not find this architecture : {:}.'.format(best_arch))
else : logger.log('{:}'.format(info))
logger.log('-'*100)
logger.log('workers : {:.1f}s with {:} archs'.format(workers[0].time_budget, len(workers[0].seen_archs)))
logger.close()
return logger.log_dir, nas_bench.query_index_by_arch( best_arch ), real_cost_time
if __name__ == '__main__':
parser = argparse.ArgumentParser("Regularized Evolution Algorithm")
parser.add_argument('--data_path', type=str, help='Path to dataset')
parser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100', 'ImageNet16-120'], help='Choose between Cifar10/100 and ImageNet-16.')
# channels and number-of-cells
parser.add_argument('--search_space_name', type=str, help='The search space name.')
parser.add_argument('--max_nodes', type=int, help='The maximum number of nodes.')
parser.add_argument('--channel', type=int, help='The number of channels.')
parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.')
parser.add_argument('--time_budget', type=int, help='The total time cost budge for searching (in seconds).')
# BOHB
parser.add_argument('--strategy', default="sampling", type=str, nargs='?', help='optimization strategy for the acquisition function')
parser.add_argument('--min_bandwidth', default=.3, type=float, nargs='?', help='minimum bandwidth for KDE')
parser.add_argument('--num_samples', default=64, type=int, nargs='?', help='number of samples for the acquisition function')
parser.add_argument('--random_fraction', default=.33, type=float, nargs='?', help='fraction of random configurations')
parser.add_argument('--bandwidth_factor', default=3, type=int, nargs='?', help='factor multiplied to the bandwidth')
parser.add_argument('--n_iters', default=100, type=int, nargs='?', help='number of iterations for optimization method')
# log
parser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')
parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.')
parser.add_argument('--arch_nas_dataset', type=str, help='The path to load the architecture dataset (tiny-nas-benchmark).')
parser.add_argument('--print_freq', type=int, help='print frequency (default: 200)')
parser.add_argument('--rand_seed', type=int, help='manual seed')
args = parser.parse_args()
#if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000)
if args.arch_nas_dataset is None or not os.path.isfile(args.arch_nas_dataset):
nas_bench = None
else:
print ('{:} build NAS-Benchmark-API from {:}'.format(time_string(), args.arch_nas_dataset))
nas_bench = API(args.arch_nas_dataset)
if args.rand_seed < 0:
save_dir, all_indexes, num, all_times = None, [], 500, []
for i in range(num):
print ('{:} : {:03d}/{:03d}'.format(time_string(), i, num))
args.rand_seed = random.randint(1, 100000)
save_dir, index, ctime = main(args, nas_bench)
all_indexes.append( index )
all_times.append( ctime )
print ('\n average time : {:.3f} s'.format(sum(all_times)/len(all_times)))
torch.save(all_indexes, save_dir / 'results.pth')
else:
main(args, nas_bench)
| 48.755365 | 200 | 0.678961 |
a0289cfd55923cc918fde110f6edd5429f055e3a | 212 | py | Python | tcai_AtBSwPython/lists.py | CailleauThierry/tcai_AtBSwPython | e576c5c4e274db3f46ed0c4d559084b5c9b199bf | [
"Apache-2.0"
] | null | null | null | tcai_AtBSwPython/lists.py | CailleauThierry/tcai_AtBSwPython | e576c5c4e274db3f46ed0c4d559084b5c9b199bf | [
"Apache-2.0"
] | null | null | null | tcai_AtBSwPython/lists.py | CailleauThierry/tcai_AtBSwPython | e576c5c4e274db3f46ed0c4d559084b5c9b199bf | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: 06_lists.ipynb (unless otherwise specified).
__all__ = ['spam']
# Cell
# ['cat','bat','rat','elephant']
spam = ['cat','bat','rat','elephant']
spam
print(spam)
spam[0] | 21.2 | 88 | 0.660377 |
21521d51e2a1192355fce836292fca35560334a0 | 2,067 | py | Python | MLibrary/classification.py | imis-lab/book-chapter | 8260a60ec91dd29616eeed80f34bdea00fb73cd7 | [
"MIT"
] | null | null | null | MLibrary/classification.py | imis-lab/book-chapter | 8260a60ec91dd29616eeed80f34bdea00fb73cd7 | [
"MIT"
] | null | null | null | MLibrary/classification.py | imis-lab/book-chapter | 8260a60ec91dd29616eeed80f34bdea00fb73cd7 | [
"MIT"
] | null | null | null | from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
class ClassifierGenerator:
"""Widely-used text classifiers.
"""
def naive_bayes(self):
"""Generate a Multinomial naive Bayes text classifier.
:return: a multinomial naive bayes classifier
"""
return MultinomialNB()
def knn(self, n_neighbors: int = 5, weights: str = 'uniform'):
"""Generate a k nearest neighbors text classifier.
:param n_neighbors: number of neighbors considered
:param weights: how to treat the considered neighbors
:return: a k-nn nearest neighbors classifier
"""
return KNeighborsClassifier(n_neighbors=n_neighbors, weights=weights)
def linear_svm(self):
"""Generate a linear support vector machine text classifier.
:return: a linear support vector machine classifier
"""
return LinearSVC()
def decision_tree(self, max_depth: int = 5, random_state: int = 0):
"""Generate a decision tree text classifier.
:param max_depth: the maxi depth of the tree
:param random_state: the random state
:return: a decision tree classifier
"""
return DecisionTreeClassifier(max_depth=max_depth, random_state=random_state)
def logistic_regression(self, multi_class: str = 'multinomial', solver: str = 'lbfgs', random_state: int = 0):
"""Generate a logistic regression text classifier.
:param multi_class: the type of classification
:param solver: the utilized solver
:param random_state: the random state
:return: a logistic regression classifier
"""
return LogisticRegression(random_state=random_state, solver=solver, multi_class=multi_class)
def neural_network(self):
"""Not implemented yet.
:return: a neural network classifier
"""
pass
| 34.45 | 114 | 0.688437 |
a3b31e6f82645684a07c77b70b1a27b8dbb10e7e | 2,100 | py | Python | zomboid/scripting/objects/sound.py | FWolfe/pyZomboid | 4ee0e113321cb50d860762cdef2e88bd504c20d0 | [
"MIT"
] | 1 | 2021-04-24T01:46:16.000Z | 2021-04-24T01:46:16.000Z | zomboid/scripting/objects/sound.py | FWolfe/pyZomboid | 4ee0e113321cb50d860762cdef2e88bd504c20d0 | [
"MIT"
] | null | null | null | zomboid/scripting/objects/sound.py | FWolfe/pyZomboid | 4ee0e113321cb50d860762cdef2e88bd504c20d0 | [
"MIT"
] | 1 | 2021-04-24T01:46:19.000Z | 2021-04-24T01:46:19.000Z | # -*- coding: utf-8 -*-
from zomboid.audio.sound import GameSound, MasterVolume, GameSoundClip
from .base import BaseScriptObject
from ..parser import ScriptParser, Block
class GameSoundScript(BaseScriptObject):
gameSound : GameSound = None
def __init__(self):
self.gameSound = GameSound()
def Load(self, name : str, data : str) -> None:
self.gameSound.name = name
block = ScriptParser.parse(data)
block = block.children[0]
for value in block.values:
key, value = value.string.split(' = ')
if key == 'category':
self.gameSound.category = value
elif key == 'is3D':
self.gameSound.is3D = value.lower() == 'true'
elif key == 'loop':
self.gameSound.loop = value.lower() == 'true'
elif key == 'master':
self.gameSound.master = MasterVolume.valueOf(value)
for subblock in block.children:
if subblock.type == 'clip':
clip = self.LoadClip(subblock)
self.gameSound.clips.add(clip)
def LoadClip(self, block : Block) -> GameSoundClip:
clip = GameSoundClip(self.gameSound)
for value in block.values:
key, value = value.string.split(' = ')
if key == 'distanceMax':
clip.distanceMax = int(value) # NOTE: this should be a float.
elif key == 'distanceMin':
clip.distanceMin = int(value) # NOTE: this should be a float.
elif key == 'event':
clip.event = value
elif key == 'file':
clip.file = value
elif key == 'pitch':
clip.pitch = float(value)
elif key == 'volume':
clip.volume = float(value)
elif key == 'reverbFactor':
clip.reverbFactor = float(value)
elif key == 'reverbMaxRange':
clip.reverbMaxRange = float(value)
return clip
def reset(self) -> None:
self.gameSound.reset()
| 30 | 77 | 0.538095 |
94cd07c85c308dffea4cd8812b61ccdb4cbfe785 | 932 | py | Python | 0404 Linked List Union.py | ansabgillani/binarysearchcomproblems | 12fe8632f8cbb5058c91a55bae53afa813a3247e | [
"MIT"
] | 1 | 2020-12-29T21:17:26.000Z | 2020-12-29T21:17:26.000Z | 0404 Linked List Union.py | ansabgillani/binarysearchcomproblems | 12fe8632f8cbb5058c91a55bae53afa813a3247e | [
"MIT"
] | null | null | null | 0404 Linked List Union.py | ansabgillani/binarysearchcomproblems | 12fe8632f8cbb5058c91a55bae53afa813a3247e | [
"MIT"
] | 4 | 2021-09-09T17:42:43.000Z | 2022-03-18T04:54:03.000Z | # class LLNode:
# def __init__(self, val, next=None):
# self.val = val
# self.next = next
class Solution:
def solve(self, ll0, ll1):
ans = LLNode(None)
cur = ans
while ll0 and ll1:
if ll0.val < ll1.val:
if ll0.val != cur.val:
cur.next = LLNode(ll0.val)
cur = cur.next
ll0 = ll0.next
else:
if ll1.val != cur.val:
cur.next = LLNode(ll1.val)
cur = cur.next
ll1 = ll1.next
while ll0:
if ll0.val != cur.val:
cur.next = LLNode(ll0.val)
cur = cur.next
ll0 = ll0.next
while ll1:
if ll1.val != cur.val:
cur.next = LLNode(ll1.val)
cur = cur.next
ll1 = ll1.next
return ans.next
| 25.888889 | 46 | 0.409871 |
6122707d1becb62e809d1cc05bc0af78a65488cd | 1,608 | py | Python | banditMachine/banditArm.py | arielbarreiros96/nArmedBanditProblem | af76455ac17f1f57cce57dc56713f7af133b4021 | [
"MIT"
] | 1 | 2021-03-25T03:03:16.000Z | 2021-03-25T03:03:16.000Z | banditMachine/banditArm.py | arielbarreiros96/nArmedBanditProblem | af76455ac17f1f57cce57dc56713f7af133b4021 | [
"MIT"
] | null | null | null | banditMachine/banditArm.py | arielbarreiros96/nArmedBanditProblem | af76455ac17f1f57cce57dc56713f7af133b4021 | [
"MIT"
] | null | null | null | import numpy as np
import random as rnd
__author__ = "Ariel Barreiros and Richar Sosa"
__status__ = "Development"
class BanditArm:
"""
La clase BanditArm reproduce el comportamiento de un brazo de una máquina tragaperras. Y almacena el conjunto de
recompensas que este brazo ha entregado
"""
def __init__(self, arm_id, mean, standard_deviation):
"""
Constructor de la clase
:param arm_id: es un identificador para el brazo
:param mean: es la media de las recompensas que genera el brazo
:param standard_deviation: es la desviación estándar de las recompensas con respecto al valor
establecido como media
"""
# Initialize variables
self.__arm_id = arm_id
self.__mean = mean
self.__standard_deviation = standard_deviation
self.__reward_history = np.empty((1, 0))
def calculate_immediate_reward(self, ):
"""
Calcula la recompensa del brazo cuando se acciona y guarda este valor en un registro
:return: recompensa para una muestra
"""
reward = rnd.gauss(self.__mean, self.__standard_deviation)
self.__reward_history = np.append(self.__reward_history, reward)
return reward
def get_reward_history(self):
"""
Retorna el campo privado 'reward history'
:return: reward history
"""
return self.__reward_history
def get_arm_id(self):
"""
Retorna el campo privado 'arm_id'
:return: arm id
"""
return self.__arm_id
| 28.210526 | 116 | 0.639925 |
973ce3c09ea01a972981c1650e62c3daa415a510 | 2,384 | py | Python | website/snat/views.py | vincent1016/FlexGW-1 | 16f7ffe7b261bddebc96c0f87ff0c62dc22157dc | [
"BSD-3-Clause"
] | 1 | 2021-11-08T12:23:37.000Z | 2021-11-08T12:23:37.000Z | website/snat/views.py | vincent1016/FlexGW2.0 | 16f7ffe7b261bddebc96c0f87ff0c62dc22157dc | [
"BSD-3-Clause"
] | null | null | null | website/snat/views.py | vincent1016/FlexGW2.0 | 16f7ffe7b261bddebc96c0f87ff0c62dc22157dc | [
"BSD-3-Clause"
] | 1 | 2022-03-03T14:27:30.000Z | 2022-03-03T14:27:30.000Z | # -*- coding: utf-8 -*-
"""
website.snat.views
~~~~~~~~~~~~~~~~~~
vpn views:
/snat
"""
from flask import Blueprint, render_template
from flask import url_for, redirect, flash
from flask import request
from flask.ext.login import login_required
from flask.ext.babel import gettext
from website import __version__
from website.snat.forms import ConsoleForm
from website.snat.forms import SnatForm
from website.snat.services import iptables_get_snat_rules, iptables_set_snat_rules
from website.snat.services import ensure_iptables, reset_iptables
snat = Blueprint('snat', __name__, url_prefix='/snat',
template_folder='templates',
static_folder='static')
@snat.route('/')
@login_required
def index():
rules = iptables_get_snat_rules()
if isinstance(rules, list) and not rules:
flash(gettext('there is no snat ruls yet.'), 'info')
return render_template('index.html', rules=rules, version=__version__)
@snat.route('/add', methods=['GET', 'POST'])
@login_required
def add():
form = SnatForm()
if form.validate_on_submit():
if iptables_set_snat_rules('add', form.source.data, form.gateway.data):
message = gettext("snat rule is added: %(source)s ==> %(gateway)s.", source=form.source.data, gateway=form.gateway.data)
flash(message, 'success')
return redirect(url_for('snat.index'))
return render_template('add.html', form=form, version=__version__)
@snat.route('/del', methods=['POST'])
@login_required
def delete():
source = request.form['source']
gateway = request.form['gateway']
if iptables_set_snat_rules('del', source, gateway):
message = gettext("snat rule is gettextd: %(source)s ==> %(gateway)s.", source=source, gateway=gateway)
flash(message, 'success')
return redirect(url_for('snat.index'))
@snat.route('/console', methods=['GET', 'POST'])
@login_required
def console():
form = ConsoleForm()
if request.method == 'POST':
if form.validate_on_submit():
if form.ensure.data:
ensure_iptables()
flash(gettext('all snat started!'), 'success')
if form.reset.data:
reset_iptables()
flash(gettext('all snat reseted!'), 'success')
return render_template('console.html', form=form, version=__version__)
| 31.786667 | 132 | 0.663171 |
3ccb3c954ee97e82d1b7e489c5ffb9f929818094 | 3,277 | py | Python | ColoreRegioni/__init__.py | MCilento93/ColoriRegioni | cd20344c083c1ed8a6c1301593823e8f2d1973bf | [
"MIT"
] | 7 | 2021-01-08T16:43:52.000Z | 2021-03-01T21:30:54.000Z | ColoreRegioni/__init__.py | MCilento93/ColoriRegioni | cd20344c083c1ed8a6c1301593823e8f2d1973bf | [
"MIT"
] | 3 | 2021-01-29T12:00:38.000Z | 2021-03-10T08:33:27.000Z | ColoreRegioni/__init__.py | MCilento93/ColoriRegioni | cd20344c083c1ed8a6c1301593823e8f2d1973bf | [
"MIT"
] | 1 | 2022-02-06T20:54:24.000Z | 2022-02-06T20:54:24.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 20:31:58 2020
@author: mario
"""
from bs4 import BeautifulSoup as Soup
import requests
class ColoreRegioni():
url='http://www.governo.it/it/articolo/domande-frequenti-sulle-misure-adottate-dal-governo/15638?gclid=CjwKCAiAwrf-BRA9EiwAUWwKXicC1bzopYynHP9pvRxHUza7Ar4dte9hWHi55Uj4xfuAHanOCf7a1BoCTggQAvD_BwE'
def get_color(onclick):
if onclick != None:
if 'rosso' in onclick:
return 'rosso','🔴'
elif 'arancione' in onclick:
return 'arancione','🟠'
elif 'giallo' in onclick:
return 'giallo','🟡'
elif 'verde' in onclick:
return 'verde','🟢'
elif '' in onclick:
return 'bianca','⚪️'
else:
return None,None
def __init__(self):
response = requests.request("GET", ColoreRegioni.url)
page = response.text[1:-1]
soup = Soup(page, "html.parser")
results={}
for elem in soup.find_all("path"):
reg_name=elem.attrs.get('id')
reg_color,reg_emoji=ColoreRegioni.get_color(elem.attrs.get('onclick'))
if reg_name:
results[reg_name]=[reg_color,reg_emoji]
self.__dict=results
self.__cambio_denominazione()
def __cambio_denominazione(self):
""" Renaming according to istat nomenclature https://www.istat.it/it/archivio/6789 """
keys_dict={'piemonte':'Piemonte',
'veneto':'Veneto',
'lombardia':'Lombardia',
'emiliaromagna':'Emilia-Romagna',
'umbria':'Umbria',
'lazio':'Lazio',
'toscana':'Toscana',
'abruzzo':'Abbruzzo',
'molise':'Molise',
'basilicata':'Basilicata',
'puglia':'Puglia',
'marche':'Marche',
'sicilia':'Sicilia',
'sardegna':'Sardegna',
'liguria':'Liguria',
'trento':'Trentino-Alto Adige/Südtirol',
'bolzano':'Bolzano',
'friuliveneziagiulia':'Friuli-Venezia Giulia',
'valledaosta':"Valle d'Aosta/Vallée d'Aoste",
'campania':'Campania',
'calabria':'Calabria'}
new_dict={}
for key,value in self.__dict.items():
new_dict[keys_dict[key]] = self.__dict[key]
self.__dict=new_dict
@property
def denominazioni(self):
return list(self.__dict.keys())
@property
def colori_emoji(self):
return self.__dict
@property
def colori(self):
colori={}
for key,value in self.__dict.items():
colori[key]=value[0]
return colori
@property
def emoji(self):
emoji={}
for key,value in self.__dict.items():
emoji[key]=value[1]
return emoji
if __name__=='__main__':
print(f'\nDenominazione regioni: {ColoreRegioni().denominazioni}')
print(f'\nDizionario completo: {ColoreRegioni().colori_emoji}')
print(f'\nSolo colori: {ColoreRegioni().colori}')
print(f'\nSolo emoji: {ColoreRegioni().emoji}')
| 32.127451 | 199 | 0.545621 |
8c7b4b520bbff6d4f1c3ae96101502fdd9c8fb32 | 6,395 | py | Python | asposewordscloud/models/requests/delete_form_field_request.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 14 | 2018-07-15T17:01:52.000Z | 2018-11-29T06:15:33.000Z | asposewordscloud/models/requests/delete_form_field_request.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 1 | 2018-09-28T12:59:34.000Z | 2019-10-08T08:42:59.000Z | asposewordscloud/models/requests/delete_form_field_request.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 2 | 2020-12-21T07:59:17.000Z | 2022-02-16T21:41:25.000Z | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="delete_form_field_request.py">
# Copyright (c) 2021 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import json
from six.moves.urllib.parse import quote
from asposewordscloud import *
from asposewordscloud.models import *
from asposewordscloud.models.requests import *
from asposewordscloud.models.responses import *
class DeleteFormFieldRequest(BaseRequestObject):
"""
Request model for delete_form_field operation.
Initializes a new instance.
:param name The filename of the input document.
:param index Object index.
:param node_path The path to the node in the document tree.
:param folder Original document folder.
:param storage Original document storage.
:param load_encoding Encoding that will be used to load an HTML (or TXT) document if the encoding is not specified in HTML.
:param password Password for opening an encrypted document.
:param dest_file_name Result path of the document after the operation. If this parameter is omitted then result of the operation will be saved as the source document.
:param revision_author Initials of the author to use for revisions.If you set this parameter and then make some changes to the document programmatically, save the document and later open the document in MS Word you will see these changes as revisions.
:param revision_date_time The date and time to use for revisions.
"""
def __init__(self, name, index, node_path=None, folder=None, storage=None, load_encoding=None, password=None, dest_file_name=None, revision_author=None, revision_date_time=None):
self.name = name
self.index = index
self.node_path = node_path
self.folder = folder
self.storage = storage
self.load_encoding = load_encoding
self.password = password
self.dest_file_name = dest_file_name
self.revision_author = revision_author
self.revision_date_time = revision_date_time
def create_http_request(self, api_client):
# verify the required parameter 'name' is set
if self.name is None:
raise ValueError("Missing the required parameter `name` when calling `delete_form_field`") # noqa: E501
# verify the required parameter 'index' is set
if self.index is None:
raise ValueError("Missing the required parameter `index` when calling `delete_form_field`") # noqa: E501
path = '/v4.0/words/{name}/{nodePath}/formfields/{index}'
path_params = {}
if self.name is not None:
path_params['name'] = self.name # noqa: E501
else:
path_params['name'] = '' # noqa: E501
if self.index is not None:
path_params['index'] = self.index # noqa: E501
else:
path_params['index'] = '' # noqa: E501
if self.node_path is not None:
path_params['nodePath'] = self.node_path # noqa: E501
else:
path_params['nodePath'] = '' # noqa: E501
# path parameters
collection_formats = {}
if path_params:
path_params = api_client.sanitize_for_serialization(path_params)
path_params = api_client.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
path = path.replace(
'{%s}' % k,
quote(str(v), safe=api_client.configuration.safe_chars_for_path_param)
)
# remove optional path parameters
path = path.replace('//', '/')
query_params = []
if self.folder is not None:
query_params.append(('folder', self.folder)) # noqa: E501
if self.storage is not None:
query_params.append(('storage', self.storage)) # noqa: E501
if self.load_encoding is not None:
query_params.append(('loadEncoding', self.load_encoding)) # noqa: E501
if self.password is not None:
query_params.append(('password', self.password)) # noqa: E501
if self.dest_file_name is not None:
query_params.append(('destFileName', self.dest_file_name)) # noqa: E501
if self.revision_author is not None:
query_params.append(('revisionAuthor', self.revision_author)) # noqa: E501
if self.revision_date_time is not None:
query_params.append(('revisionDateTime', self.revision_date_time)) # noqa: E501
header_params = {}
form_params = []
body_params = None
return {
"method": "DELETE",
"path": path,
"query_params": query_params,
"header_params": header_params,
"form_params": form_params,
"body": body_params,
"collection_formats": collection_formats,
"response_type": 'None' # noqa: E501
}
def get_response_type(self):
return 'None' # noqa: E501
def deserialize_response(self, api_client, response):
return None
| 46.678832 | 255 | 0.651759 |
a5685dd4288fcd2b43d57449dd13b0b19d19de2c | 609 | py | Python | lib/galaxy/job_metrics/collectl/stats.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/job_metrics/collectl/stats.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | 2 | 2017-05-18T16:12:55.000Z | 2022-03-08T12:08:43.000Z | lib/galaxy/job_metrics/collectl/stats.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | null | null | null | """ Primitive module for tracking running statistics without storing values in
memory.
"""
class StatisticsTracker:
def __init__(self):
self.min = None
self.max = None
self.count = 0
self.sum = 0
def track(self, value):
if self.min is None or value < self.min:
self.min = value
if self.max is None or value > self.max:
self.max = value
self.count += 1
self.sum += value
@property
def avg(self):
if self.count > 0:
return self.sum / self.count
else:
return None
| 22.555556 | 78 | 0.550082 |
500535be6b9a195034a2774a7cbb1f1ca13f417c | 4,708 | py | Python | TSP_rawcord.py | Saurabhbhati/pointer-networks-experiments | 2b51d5088f9e106441813d53d6a02f6631b6d368 | [
"BSD-2-Clause"
] | null | null | null | TSP_rawcord.py | Saurabhbhati/pointer-networks-experiments | 2b51d5088f9e106441813d53d6a02f6631b6d368 | [
"BSD-2-Clause"
] | null | null | null | TSP_rawcord.py | Saurabhbhati/pointer-networks-experiments | 2b51d5088f9e106441813d53d6a02f6631b6d368 | [
"BSD-2-Clause"
] | null | null | null | # Pointer network for TSP
# uses raw coordinates as inputs
import numpy as np
from keras.models import Model
from keras.layers import LSTM, Input, Dense,Bidirectional, TimeDistributed, Conv1D
from keras.utils.np_utils import to_categorical
from PointerLSTM import PointerLSTM
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.optimizers import SGD, Adam, RMSprop
from keras import initializations
TSP_size = 10
hidden_size = 128
## Please download the dataset here: http://goo.gl/NDcOIG and put in tsp_data/
# input data loader
def get_TSP_data(TSP_size,mode):
#TSP_size: size of TSP
#mode: train/test
mode = "_"+mode+"_exact.txt"
#mode = "_"+mode+".txt"
num_lines = 0
with open("tsp_data/tsp_"+str(TSP_size)+mode) as infile:
for line in infile:
num_lines += 1
x_train = np.zeros((num_lines,TSP_size,2))
y_train = np.zeros((num_lines,TSP_size,TSP_size))
count = 0
with open("tsp_data/tsp_"+str(TSP_size)+mode) as infile:
for line in infile:
temp= line.split(' output ')
x_train[count,:,:] = np.array(temp[0].split(),dtype=np.float).reshape(-1,2)
y_label_temp = np.array(temp[1].split()[:-1],dtype=np.float) - 1
y_train[count,:,:] = to_categorical(y_label_temp)
count += 1
return x_train,y_train
def get_path_loss(x,ind):
x = x[ind,:]
err = 0
for i in range(x.shape[0]-1):
err = err + np.sqrt(np.sum((x[i+1,:] - x[i,:]) ** 2))
err = err + np.sqrt(np.sum((x[0,:] - x[-1,:]) ** 2)) # last city to first city
return err
x_train,y_train = get_TSP_data(TSP_size,'train')
def my_init(shape, name=None):
return initializations.uniform(shape, scale=0.08, name=name)
main_input = Input( shape=(TSP_size, 2), name='main_input')
encoder = Conv1D(hidden_size,filter_length=2,border_mode="same")(main_input)
#encoder = TimeDistributed(Dense(output_dim=20))(main_input)
#encoder = Dense(output_dim=hidden_size,activation='tanh')(main_input)
#ncoder = Dense(output_dim=hidden_size,activation='tanh')(encoder)
#encoder = LSTM(output_dim = hidden_size, return_sequences = True, name="encoder2")(encoder)
decoder = PointerLSTM(hidden_size, output_dim=hidden_size,name="decoder")(encoder)
#decoder = Dense(10,activation='softmax')(encoder)
model = Model( input=main_input, output=decoder )
## data append experiments
'''
N = 500000
ind = np.arange(N)
x_train = np.concatenate((x_train,x_train[ind,:,:]),axis=0)
y_train = np.concatenate((y_train,y_train[ind,:,:]),axis=0)
for i in range(N):
ind = np.arange(TSP_size-1)
np.random.shuffle(ind)
ind = np.concatenate(([0],ind+1))
x_train[i,:,:] = x_train[i,ind,:]
y_train[i,:,:] = y_train[i,ind,:]
'''
checkpointer = ModelCheckpoint(filepath='./mlp_test1.h5', verbose=1, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
lr=0.01
for i in range(0,2):
sgd = SGD(lr=lr, decay=0, momentum=0.9, nesterov=True,clipvalue=2)
rms = RMSprop()
#adam = Adam()
model.compile(optimizer=rms,loss='categorical_crossentropy',metrics=['accuracy'])
#if i>0:
#model.load_weights('./mlp_DNN_segmentation_basic.h5')
model.fit( x_train, y_train, validation_split=0.1,nb_epoch = 100, batch_size = 128,callbacks=[early_stopping])
lr=lr/2
pred =model.predict(x_train,batch_size=512,verbose=True)
err_true = 0
err_pred = 0
for i in range(1000):
x = x_train[i,:,:]
ind_true = np.argmax(y_train[i,:,:],axis=1)
result = beam_search_decoder(pred[i,:,:], 3)
ind_pred = result[0][0]
err_true += get_path_loss(x,ind_true)
err_pred += get_path_loss(x,ind_pred)
# Debug
N = 1000
pred =model.predict(x_train[:N,:,:],batch_size=512,verbose=True)
err_true = np.zeros((N,1))
err_pred = np.zeros((N,1))
for i in range(1000):
x = x_train[i,:,:]
ind_true = np.argmax(y_train[i,:,:],axis=1)
result = beam_search_decoder(pred[i,:,:], 3)
ind_pred = result[0][0]
err_true[i,:] = get_path_loss(x,ind_true)
err_pred[i,:] = get_path_loss(x,ind_pred)
# beam search
def beam_search_decoder(data, k):
sequences = [[list(), 1.0]]
for row in data:
all_candidates = list()
for i in range(len(sequences)):
seq, score = sequences[i]
#for j in range(len(row)):
for j in np.setdiff1d(np.arange(len(row)),seq):
candidate = [seq + [j], score - np.log(row[j])]
all_candidates.append(candidate)
ordered = sorted(all_candidates, key=lambda tup:tup[1])
sequences = ordered[:k]
return sequences
x_test,y_test = get_TSP_data(TSP_size,'test')
pred =model.predict(x_test,batch_size=512,verbose=True)
err_true = 0
err_pred = 0
for i in range(10000):
x = x_test[i,:,:]
ind_true = np.argmax(y_test[i,:,:],axis=1)
result = beam_search_decoder(pred[i,:,:], 3)
ind_pred = result[0][0]
err_true += get_path_loss(x,ind_true)
err_pred += get_path_loss(x,ind_pred)
| 32.246575 | 112 | 0.706882 |
e44d6a3a411826df973334195a721a3f977409ad | 2,963 | py | Python | ncp/scripts/generate_toy_plots.py | pierresegonne/ncp | 2decbf7dbf2125353be6f5f030c5bce12beadefd | [
"Apache-2.0"
] | null | null | null | ncp/scripts/generate_toy_plots.py | pierresegonne/ncp | 2decbf7dbf2125353be6f5f030c5bce12beadefd | [
"Apache-2.0"
] | null | null | null | ncp/scripts/generate_toy_plots.py | pierresegonne/ncp | 2decbf7dbf2125353be6f5f030c5bce12beadefd | [
"Apache-2.0"
] | null | null | null | import pathlib
import matplotlib.pyplot as plt
import numpy as np
from ncp import datasets
plt.rcParams.update(
{
"text.usetex": True,
}
)
def dewhiten_x(x):
return x * 3.0126251862413524 + 5.04480454393157
def dewhiten_y(y):
return y * 4.147721555561024 + 0.864837661287082
def data_mean(x):
return x * np.sin(x)
def data_std(x):
return np.abs(0.3 * np.sqrt(1 + x * x))
def main():
save_fn = "outputs_for_aistats_plot.npz"
root_path = pathlib.Path(__file__).parent.parent.parent.resolve()
normal_ood_path = root_path / "logs_toy_plot" / "ours"
more_ood_path = root_path / "logs_toy_plot_more_ood" / "ours"
outputs = {}
outputs["bbb"] = np.load(normal_ood_path / "bbb-0" / save_fn)
outputs["bbb_ncp_normal_ood"] = np.load(normal_ood_path / "bbb_ncp-0" / save_fn)
outputs["bbb_ncp_more_ood"] = np.load(more_ood_path / "bbb_ncp-0" / save_fn)
dataset = datasets.generate_toy_ours_dataset()
x_plot = np.linspace(-4, 14, 1000)
y_plot = data_mean(x_plot)
y_plot_mstd = y_plot - 1.96 * data_std(x_plot)
y_plot_pstd = y_plot + 1.96 * data_std(x_plot)
aspect_ratio = 2.35 / 1
colour_navy_blue = (3 / 255, 15 / 255, 79 / 255)
fig, ax = plt.subplots(figsize=(2 * aspect_ratio, 2))
ax.set_facecolor("#F7F8F6")
ax.grid(True, color="white")
ax.set_xlim([-4, 14])
ax.set_xticks([0, 5, 10])
ax.set_xlabel(r"$x$", fontsize=14, labelpad=2)
ax.set_ylim(-15, 15)
ax.set_yticks([-10, 0, 10])
ax.set_ylabel(r"$y$", fontsize=14, labelpad=1)
ax.plot(
x_plot,
y_plot,
color="black",
linestyle="dashed",
linewidth=1,
label=r"$\mathrm{Truth}$",
)
ax.plot(x_plot, y_plot_mstd, color="black", linestyle="dotted", linewidth=0.5)
ax.plot(x_plot, y_plot_pstd, color="black", linestyle="dotted", linewidth=0.5)
ax.plot(
dewhiten_x(dataset.train.inputs),
dewhiten_y(dataset.train.targets),
"o",
markersize=2.5,
markerfacecolor=(*colour_navy_blue, 0.6),
markeredgewidth=1,
markeredgecolor=(*colour_navy_blue, 0.1),
zorder=5,
)
colors = ["forestgreen", "gold", "darkred"]
labels = [
r"$\mathrm{BBB}$",
r"$\mathrm{BBB\!+\!NCP}$",
r"$\mathrm{BBB\!+\!NCP^*}$",
]
for i, run in enumerate(["bbb", "bbb_ncp_normal_ood", "bbb_ncp_more_ood"]):
ax.plot(
dewhiten_x(outputs[run]["inputs"]),
dewhiten_y(outputs[run]["mean"]),
color=colors[i],
label=labels[i],
alpha=0.95,
linewidth=2,
zorder=5,
)
ax.legend(
loc="upper left",
# bbox_to_anchor=(-0.05, 1.55),
ncol=4,
edgecolor="black",
handlelength=1.2,
fancybox=False,
columnspacing=0.85,
)
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| 25.324786 | 84 | 0.591968 |
8aa6215af2b20fedf4748b9daf944b4de79f1592 | 7,702 | py | Python | models/RanGer.py | Culturenotes/Network-Slimming | 9004ab4c1f6bcbf8f317a37984ed3f8db39ecbe2 | [
"Apache-2.0"
] | 12 | 2020-10-03T14:32:01.000Z | 2021-12-16T09:13:04.000Z | models/RanGer.py | Culturenotes/Network-Slimming | 9004ab4c1f6bcbf8f317a37984ed3f8db39ecbe2 | [
"Apache-2.0"
] | null | null | null | models/RanGer.py | Culturenotes/Network-Slimming | 9004ab4c1f6bcbf8f317a37984ed3f8db39ecbe2 | [
"Apache-2.0"
] | null | null | null | import math
import torch
from torch.optim.optimizer import Optimizer
def centralized_gradient(x, use_gc=True, gc_conv_only=False):
if use_gc:
if gc_conv_only:
if len(list(x.size())) > 3:
x.add_(-x.mean(dim=tuple(range(1, len(list(x.size())))), keepdim=True))
else:
if len(list(x.size())) > 1:
x.add_(-x.mean(dim=tuple(range(1, len(list(x.size())))), keepdim=True))
return x
class Ranger(Optimizer):
def __init__(self, params, lr=1e-3, # lr
alpha=0.5, k=6, N_sma_threshhold=5, # Ranger options
betas=(.95, 0.999), eps=1e-5, weight_decay=1e-5, # Adam options
# Gradient centralization on or off, applied to conv layers only or conv + fc layers
use_gc=True, gc_conv_only=False, gc_loc=True
):
# parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
# parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
# N_sma_threshold of 5 seems better in testing than 4.
# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
# prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas,
N_sma_threshhold=N_sma_threshhold, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
# adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
# look ahead params
self.alpha = alpha
self.k = k
# radam buffer for state
self.radam_buffer = [[None, None, None] for ind in range(10)]
# gc on or off
self.gc_loc = gc_loc
self.use_gc = use_gc
self.gc_conv_only = gc_conv_only
# level of gradient centralization
#self.gc_gradient_threshold = 3 if gc_conv_only else 1
print(
f"Ranger optimizer loaded. \nGradient Centralization usage = {self.use_gc}")
if (self.use_gc and self.gc_conv_only == False):
print(f"GC applied to both conv and fc layers")
elif (self.use_gc and self.gc_conv_only == True):
print(f"GC applied to conv layers only")
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
# note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure.
# Uncomment if you need to use the actual closure...
# if closure is not None:
#loss = closure()
# Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p] # get state dict for this param
if len(state) == 0: # if first time to run...init dictionary with our desired entries
# if self.first_run_check==0:
# self.first_run_check=1
#print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
# look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(
p_data_fp32)
# begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# GC operation for Conv layers and FC layers
# if grad.dim() > self.gc_gradient_threshold:
# grad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True))
if self.gc_loc:
grad = centralized_gradient(grad, use_gc=self.use_gc, gc_conv_only=self.gc_conv_only)
state['step'] += 1
# compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# compute mean moving avg
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * \
state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (
N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
# if group['weight_decay'] != 0:
# p_data_fp32.add_(-group['weight_decay']
# * group['lr'], p_data_fp32)
# apply lr
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
G_grad = exp_avg / denom
else:
G_grad = exp_avg
if group['weight_decay'] != 0:
G_grad.add_(p_data_fp32, alpha=group['weight_decay'])
# GC operation
if self.gc_loc == False:
G_grad = centralized_gradient(G_grad, use_gc=self.use_gc, gc_conv_only=self.gc_conv_only)
p_data_fp32.add_(G_grad, alpha=-step_size * group['lr'])
p.data.copy_(p_data_fp32)
# integrated look ahead...
# we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
# get access to slow param tensor
slow_p = state['slow_buffer']
# (fast weights - slow weights) * alpha
slow_p.add_(p.data - slow_p, alpha=self.alpha)
# copy interpolated weights to RAdam param tensor
p.data.copy_(slow_p)
return loss
| 42.318681 | 133 | 0.513373 |
78af01d5b09ddc07832260477247ed90cc310016 | 2,812 | py | Python | core/helpers/app_manager.py | echim/pySteps | c33ac3446593b545aece475062d140527dcb443c | [
"MIT"
] | 8 | 2018-05-15T21:20:40.000Z | 2021-08-19T00:25:18.000Z | core/helpers/app_manager.py | echim/pySteps | c33ac3446593b545aece475062d140527dcb443c | [
"MIT"
] | null | null | null | core/helpers/app_manager.py | echim/pySteps | c33ac3446593b545aece475062d140527dcb443c | [
"MIT"
] | 2 | 2018-09-12T01:33:54.000Z | 2021-01-25T02:21:58.000Z | import os
import subprocess
from core.default_settings import DefaultSettings
from core.helpers.app_details import AppDetails
from core.helpers.os_helpers import get_app_full_name
from core.helpers.os_helpers import platform_is_linux, platform_is_windows, platform_is_darwin
from core.helpers.webdriver_helpers import get_driver_by_app_name
from core.keyboard_commands.keyboard_commands import maximize_current_window, close_current_window, \
wait_window_maximize_finish
from core.screen.screen import Screen
class AppManager:
def __init__(self, app_details: AppDetails, with_web_driver: bool = False):
self._app_details: AppDetails = app_details
self._img_assets: dict = self._app_details.get_image_assets()
self._driver = with_web_driver
self.browser = None
@property
def app_name(self):
return self._app_details.app_name
@property
def app_path(self):
return self._app_details.app_path
def get_image_assets(self) -> dict:
if self._img_assets is not None:
return self._img_assets
else:
raise Exception('Unable to retrieve Image Assets')
def launch_app(self, extra_params: str = None):
if self._driver:
try:
self.browser = get_driver_by_app_name(self.app_name, extra_params)
except Exception as error:
raise Exception('Unable to launch %s , please close any previous instance. %s' % (self.app_name, error))
try:
self.browser.maximize_window()
wait_window_maximize_finish()
except Exception:
pass
Screen.image_wait(DefaultSettings.CONFIRM_LAUNCH_NAME.value, wait_seconds=10.0)
else:
launch_cmd = [self._app_details.app_path]
if extra_params is not None and isinstance(extra_params, str):
launch_cmd.append(extra_params)
if platform_is_darwin():
launch_cmd = "open -a %s --args %s" % (self._app_details.app_name, extra_params)
subprocess.Popen(launch_cmd, shell=True)
Screen.image_wait(DefaultSettings.CONFIRM_LAUNCH_NAME.value, wait_seconds=10.0)
maximize_current_window()
def close_app(self):
if self._driver:
try:
self.browser.quit()
except:
pass
else:
close_current_window()
app_full_name = get_app_full_name(self.app_name)
if platform_is_windows():
os.system('taskkill /f /im %s' % app_full_name)
elif platform_is_linux():
os.system('pkill %s' % app_full_name)
elif platform_is_darwin():
os.system('killall %s' % app_full_name)
| 35.15 | 120 | 0.651849 |
38d6ecb9931bae41d657dcc6de23f97ac077bbae | 24,890 | py | Python | ch_pipeline/core/dataquery.py | chime-experiment/ch_pipeline | e09539f18cbe4cfafe05c362a5e3c1a19f32d5e6 | [
"MIT"
] | 1 | 2022-01-24T23:22:47.000Z | 2022-01-24T23:22:47.000Z | ch_pipeline/core/dataquery.py | chime-experiment/ch_pipeline | e09539f18cbe4cfafe05c362a5e3c1a19f32d5e6 | [
"MIT"
] | 19 | 2021-01-23T02:01:21.000Z | 2022-02-18T22:34:22.000Z | ch_pipeline/core/dataquery.py | chime-experiment/ch_pipeline | e09539f18cbe4cfafe05c362a5e3c1a19f32d5e6 | [
"MIT"
] | null | null | null | """
Dataset Specification
Lookup information from the database about the data, particularly which files
are contained in a specific dataset (defined by a `run` global flag) and what
each correlator input is connected to.
Dataspec Format
===============
.. deprecated:: pass1
Use `run` global flag instead.
A dataspec is just a dictionary with three required keys.
:name:
A short name given to the dataset.
:instrument:
The name of the instrument that took the data.
:timerange:
A specification of the time range, or ranges that make up the dataset.
The time range is specified either as a dictionary with `start` and `end`
keys, containing datetime objects (in UTC). Or it can be a list of such
ditionaries, to specify multiple time ranges to include. This can be contained
in a dataspec YAML file, and loaded using :class:`LoadDataspec`. Example:
.. code-block:: yaml
datasets:
- name: A
instrument: blanchard
timerange:
- start: 2014-07-26 03:00:00
end: 2014-07-28 01:00:00
- start: 2014-07-28 11:00:00
end: 2014-07-31 00:00:00
"""
import os
from caput import mpiutil, config, pipeline
from draco.core import task
from chimedb import data_index as di
from ch_util import tools, ephemeris, finder, layout
_DEFAULT_NODE_SPOOF = {"cedar_online": "/project/rpp-krs/chime/chime_online/"}
def _force_list(val):
"""Ensure configuration property is a list."""
if val is None:
return []
elif hasattr(val, "__iter__"):
return val
else:
return [val]
class QueryDatabase(task.MPILoggedTask):
"""Find files from specified database queries.
This routine will query the database as specified in the runtime
configuration file.
Attributes
----------
node_spoof : dictionary
(default: {'cedar_online': '/project/rpp-krs/chime/chime_online/'} )
host and directory in which to find data.
start_time, end_time : string (default: None)
start and end times to restrict the database search to
can be in any format ensure_unix will support, including e.g.
20190116T150323 and 2019-1-16 08:03:23 -7
acqtype : string (default: 'corr')
Type of acquisition. Options for acqtype are: 'corr', 'hk', 'weather',
'rawadc', 'gain', 'flaginput', 'digitalgain'.
instrument : string (optional)
Set the instrument name. Common ArchiveInst names are: 'chimeN2',
'chimestack', 'chime26m', 'chimetiming', 'chimecal', 'mingun' etc.
While acqtype returns all 'corr' data, one must specify the instrument
to get e.g. only stacked data (i.e. instrument = 'chimestack')
source_26m : string (default: None)
holography source to include. If None, do not include holography data.
exclude_daytime : bool (default: False)
exclude daytime data
exclude_sun : bool (default: False)
exclude data around Sun
exclude_sun_time_delta : float (default: None)
time_delta parameter passed to exclude_sun()
exclude_sun_time_delta_rise_set : float (default: None)
time_delta_rise_set parameter passed to exclude_sun()
exclude_transits : list of string or float (default: [])
if set, call exclude_transits(). Pass list of sources or RA to exclude.
exclude_transits_time_delta : list of float (default : [])
time in seconds to exclude around each source transit given in `exclude_transits`.
if single value is passed, then that value will be applied to all source transits.
include_transits : list of string or float (default : [])
if set, call include_transits(). Pass list of sources or RA to include.
include_transits_time_delta : list of float (default : [])
time in seconds to include around each source transit given in `include_transits`.
if single value is passed, then that value will be applied to all source transits.
start_RA, end_RA : float (default: None)
starting and ending RA to include. Both values must be included or
no effect
run_name : string (default: None)
run name to include. If used, all other parameters will be ignored.
accept_all_global_flags : bool (default: False)
Accept all global flags. Due to a bug as of 2019-1-16, this may need to
be set to True
exclude_data_flag_types: list of string
Reject time intervals that overlap with DataFlags of these types.
return_intervals : bool (default: False)
Return the full interval from the Finder. Otherwise only a list of file names.
"""
return_intervals = config.Property(proptype=bool, default=False)
node_spoof = config.Property(proptype=dict, default=_DEFAULT_NODE_SPOOF)
acqtype = config.Property(proptype=str, default="corr")
instrument = config.Property(proptype=str, default=None)
source_26m = config.Property(proptype=str, default=None)
start_time = config.Property(default=None)
end_time = config.Property(default=None)
start_csd = config.Property(proptype=float, default=None)
end_csd = config.Property(proptype=float, default=None)
exclude_daytime = config.Property(proptype=bool, default=False)
exclude_sun = config.Property(proptype=bool, default=False)
exclude_sun_time_delta = config.Property(proptype=float, default=None)
exclude_sun_time_delta_rise_set = config.Property(proptype=float, default=None)
exclude_transits = config.Property(proptype=_force_list, default=[])
exclude_transits_time_delta = config.Property(proptype=_force_list, default=[])
include_transits = config.Property(proptype=_force_list, default=[])
include_transits_time_delta = config.Property(proptype=_force_list, default=[])
start_RA = config.Property(proptype=float, default=None)
end_RA = config.Property(proptype=float, default=None)
run_name = config.Property(proptype=str, default=None)
accept_all_global_flags = config.Property(proptype=bool, default=False)
exclude_data_flag_types = config.Property(proptype=list, default=[])
def setup(self):
"""Query the database and fetch the files
Returns
-------
files : list
List of files to load
"""
files = None
# Query the database on rank=0 only, and broadcast to everywhere else
if mpiutil.rank0:
if self.run_name:
return self.QueryRun()
layout.connect_database()
f = finder.Finder(node_spoof=self.node_spoof)
f.filter_acqs(di.AcqType.name == self.acqtype)
if self.instrument is not None:
f.filter_acqs(di.ArchiveInst.name == self.instrument)
if self.accept_all_global_flags:
f.accept_all_global_flags()
# Use start and end times if set, or try and use the start and end CSDs
if self.start_time:
st, et = self.start_time, self.end_time
elif self.start_csd:
st = ephemeris.csd_to_unix(self.start_csd)
et = (
ephemeris.csd_to_unix(self.end_csd)
if self.end_csd is not None
else None
)
# Note: include_time_interval includes the specified time interval
# Using this instead of set_time_range, which only narrows the interval
# f.include_time_interval(self.start_time, self.end_time)
f.set_time_range(st, et)
if self.start_RA and self.end_RA:
f.include_RA_interval(self.start_RA, self.end_RA)
elif self.start_RA or self.start_RA:
self.log.warning(
"One but not both of start_RA and end_RA " "are set. Ignoring both."
)
f.filter_acqs(di.ArchiveInst.name == self.instrument)
if self.exclude_daytime:
f.exclude_daytime()
if self.exclude_sun:
f.exclude_sun(
time_delta=self.exclude_sun_time_delta,
time_delta_rise_set=self.exclude_sun_time_delta_rise_set,
)
if self.include_transits:
time_delta = self.include_transits_time_delta
ntime_delta = len(time_delta)
if (ntime_delta > 1) and (ntime_delta < len(self.include_transits)):
raise ValueError(
"Must specify `time_delta` for each source in "
"`include_transits` or provide single value for all sources."
)
for ss, src in enumerate(self.include_transits):
tdelta = time_delta[ss % ntime_delta] if ntime_delta > 0 else None
bdy = (
ephemeris.source_dictionary[src]
if isinstance(src, str)
else src
)
f.include_transits(bdy, time_delta=tdelta)
if self.exclude_transits:
time_delta = self.exclude_transits_time_delta
ntime_delta = len(time_delta)
if (ntime_delta > 1) and (ntime_delta < len(self.exclude_transits)):
raise ValueError(
"Must specify `time_delta` for each source in "
"`exclude_transits` or provide single value for all sources."
)
for ss, src in enumerate(self.exclude_transits):
tdelta = time_delta[ss % ntime_delta] if ntime_delta > 0 else None
bdy = (
ephemeris.source_dictionary[src]
if isinstance(src, str)
else src
)
f.exclude_transits(bdy, time_delta=tdelta)
if self.source_26m:
f.include_26m_obs(self.source_26m)
if len(self.exclude_data_flag_types) > 0:
f.exclude_data_flag_type(self.exclude_data_flag_types)
results = f.get_results()
if not self.return_intervals:
files = [fname for result in results for fname in result[0]]
files.sort()
else:
files = results
files.sort(key=lambda x: x[1][0])
files = mpiutil.world.bcast(files, root=0)
# Make sure all nodes have container before return
mpiutil.world.Barrier()
return files
class QueryRun(task.MPILoggedTask):
"""Find the files belonging to a specific `run`.
This routine will query the database for the global flag corresponding to
the given run, and will use the start and end times (as well as the
instrument) to return a list of the contained files.
Attributes
----------
run_name : str
Name of the `run` defined in the database.
node_spoof : str, optional
Node spoof argument. See documentation of :class:`ch_util.finder.Finder`.
"""
run_name = config.Property(proptype=str)
node_spoof = config.Property(proptype=dict, default=_DEFAULT_NODE_SPOOF)
def setup(self):
"""Fetch the files in the specified run.
Returns
-------
files : list
List of files to load
"""
from ch_util import layout
from chimedb import data_index as di
files = None
# Query the database on rank=0 only, and broadcast to everywhere else
if mpiutil.rank0:
layout.connect_database()
cat_run = (
layout.global_flag_category.select()
.where(layout.global_flag_category.name == "run")
.get()
)
# Find run in database
run_query = layout.global_flag.select().where(
layout.global_flag.category == cat_run,
layout.global_flag.name == self.run_name,
)
# Make sure we only have flags with active events
run_query = (
run_query.join(layout.graph_obj)
.join(layout.event)
.where(layout.event.active)
)
if run_query.count() == 0:
raise RuntimeError("Run %s not found in database" % self.run_name)
elif run_query.count() > 1:
raise RuntimeError(
"Multiple global flags found in database for run %s" % self.run_name
)
run = run_query.get()
# Fetch run start and end time
run_event = run.event().get()
start, end = run_event.start.time, run_event.end.time
# Fetch the instrument
if run.inst is None:
raise RuntimeError("Instrument is not specified in database.")
inst_obj = run.inst
# Create a finder object limited to the relevant time
fi = finder.Finder(node_spoof=self.node_spoof)
fi.only_corr()
# Set the time range that encapsulates all the intervals
fi.set_time_range(start, end)
# Add in all the time ranges
# for ti in timerange:
# fi.include_time_interval(ti['start'], ti['end'])
# Only include the required instrument
fi.filter_acqs(di.ArchiveAcq.inst == inst_obj)
# Pull out the results and extract all the files
results = fi.get_results()
files = [fname for result in results for fname in result[0]]
files.sort()
files = mpiutil.world.bcast(files, root=0)
# Make sure all nodes have container before return
mpiutil.world.Barrier()
return files
class QueryDataspecFile(task.MPILoggedTask):
"""Find the available files given a dataspec from a file.
.. deprecated:: pass1
Use the `run` global flag in the database,
combined with :class:`LoadRun` instead.
Attributes
----------
dataset_file : str
YAML file containing dataset specification. If not specified, use the
one contained within the ch_pipeline repository.
dataset_name : str
Name of dataset to use.
archive_root : str
Root of archive to add to file paths.
"""
dataset_file = config.Property(proptype=str, default="")
dataset_name = config.Property(proptype=str, default="")
node_spoof = config.Property(proptype=dict, default=_DEFAULT_NODE_SPOOF)
def setup(self):
"""Fetch the files in the given dataspec.
Returns
-------
files : list
List of files to load
"""
import yaml
# Set to default datasets file
if self.dataset_file == "":
self.dataset_file = os.path.dirname(__file__) + "/data/datasets.yaml"
# Check existense and read yaml datasets file
if not os.path.exists(self.dataset_file):
raise Exception("Dataset file not found.")
with open(self.dataset_file, "r") as f:
dconf = yaml.safe_load(f)
if "datasets" not in dconf:
raise Exception("No datasets.")
dsets = dconf["datasets"]
# Find the correct dataset
dspec = None
for ds in dsets:
if ds["name"] == self.dataset_name:
dspec = ds
break
# Raise exception if it's not found
if dspec is None:
raise Exception(
"Dataset %s not found in %s." % (self.dataset_name, self.dataset_file)
)
if ("instrument" not in dspec) or ("timerange" not in dspec):
raise Exception("Invalid dataset.")
# Add archive root if exists
if self.node_spoof is not None:
dspec["node_spoof"] = self.node_spoof
files = files_from_spec(dspec, node_spoof=self.node_spoof)
return files
class QueryDataspec(task.MPILoggedTask):
"""Find the available files given a dataspec in the config file.
Attributes
----------
instrument : str
Name of the instrument.
timerange : list
List of time ranges as documented above.
node_spoof : dict, optional
Optional node spoof argument.
"""
instrument = config.Property(proptype=str)
timerange = config.Property(proptype=list)
node_spoof = config.Property(proptype=dict, default=_DEFAULT_NODE_SPOOF)
def setup(self):
"""Fetch the files in the given dataspec.
Returns
-------
files : list
List of files to load
"""
dspec = {"instrument": self.instrument, "timerange": self.timerange}
# Add archive root if exists
if self.node_spoof is not None:
dspec["node_spoof"] = self.node_spoof
files = files_from_spec(dspec, node_spoof=self.node_spoof)
return files
class QueryAcquisitions(task.MPILoggedTask):
"""Iterate over acquisitions.
This routine will query the database as specified in the runtime
configuration file. It will iterate over the returned acquisitions
in chronological order, outputing a list of the corresponding files.
Attributes
----------
node_spoof : dict
Host and directory in which to find data.
start_time, end_time : str
Find all acquisitions between this start and end time.
instrument : str
Find all acquisitions from this instrument.
accept_all_global_flags : bool
Accept all global flags.
min_num_files : int
Do not process acquisitions that contain less than this number of files.
max_num_files : int
Maximum number of files to return at once. If an acquisition
contains more than this number of files, then it will be split
up into multiple blocks (pipeline iterations) of size roughly
equal to max_num_files.
"""
node_spoof = config.Property(proptype=dict, default=_DEFAULT_NODE_SPOOF)
instrument = config.Property(proptype=str, default="chimestack")
start_time = config.Property(default=None)
end_time = config.Property(default=None)
accept_all_global_flags = config.Property(proptype=bool, default=False)
min_num_files = config.Property(proptype=int, default=None)
max_num_files = config.Property(proptype=int, default=None)
def setup(self):
"""Query the database, fetch the files, and save to attribute."""
from ch_util import layout
from chimedb import data_index as di
# Function to break a list of files into groups of roughly the same size
def _choose_group_size(n, m, accept):
if (n % m) < accept:
return m
l, u = m - 1, m + 1
while ((n % l) > accept) and ((n % u) > accept):
l, u = l - 1, u + 1
if (n % l) < (n % u):
return l
else:
return u
# Query the database on rank=0 only, and broadcast to everywhere else
files = None
if self.comm.rank == 0:
layout.connect_database()
fi = finder.Finder(node_spoof=self.node_spoof)
fi.only_corr()
if self.accept_all_global_flags:
fi.accept_all_global_flags()
fi.set_time_range(self.start_time, self.end_time)
fi.filter_acqs(di.ArchiveInst.name == self.instrument)
files = []
for aa, acq in enumerate(fi.acqs):
acq_results = fi.get_results_acq(aa)
filelist = [ff for acqr in acq_results for ff in acqr[0]]
nfiles = len(filelist)
if (self.min_num_files is not None) and (nfiles < self.min_num_files):
continue
if (self.max_num_files is None) or (nfiles <= self.max_num_files):
files.append(filelist)
else:
group_size = _choose_group_size(
nfiles,
self.max_num_files,
max(1, int(0.10 * self.max_num_files)),
)
ngroup, offset = nfiles // group_size, (nfiles % group_size) // 2
bnd = [offset + gg * group_size for gg in range(ngroup + 1)]
bnd[0], bnd[-1] = 0, nfiles
files += [
filelist[bnd[ii] : bnd[ii + 1]] for ii in range(len(bnd) - 1)
]
# Broadcast the files to the other nodes
files = self.comm.bcast(files, root=0)
self.comm.Barrier()
self.files = files
def next(self):
"""Return the files from the next acquisition.
Returns
-------
files : list
List of files to load
"""
if len(self.files) == 0:
raise pipeline.PipelineStopIteration
files = self.files.pop(0)
return files
class QueryInputs(task.MPILoggedTask):
"""From a dataspec describing the data create a list of objects describing
the inputs in the files.
Attributes
----------
cache : bool
Only query for the inputs for the first container received. For all
subsequent files just return the initial set of inputs. This can help
minimise the number of potentially fragile database operations.
"""
cache = config.Property(proptype=bool, default=False)
_cached_inputs = None
def next(self, ts):
"""Generate an input description from the timestream passed in.
Parameters
----------
ts : andata.CorrData
Timestream container.
Returns
-------
inputs : list of :class:`CorrInput`
A list of describing the inputs as they are in the file.
"""
# Fetch from the cache if we can
if self.cache and self._cached_inputs:
self.log.debug("Using cached inputs.")
return self._cached_inputs
inputs = None
if mpiutil.rank0:
# Get the datetime of the middle of the file
time = ephemeris.unix_to_datetime(0.5 * (ts.time[0] + ts.time[-1]))
inputs = tools.get_correlator_inputs(time)
inputs = tools.reorder_correlator_inputs(ts.index_map["input"], inputs)
# Broadcast input description to all ranks
inputs = mpiutil.world.bcast(inputs, root=0)
# Save into the cache for the next iteration
if self.cache:
self._cached_inputs = inputs
# Make sure all nodes have container before return
mpiutil.world.Barrier()
return inputs
def finder_from_spec(spec, node_spoof=None):
"""Get a `Finder` object from the dataspec.
Parameters
----------
dspec : dict
Dataspec dictionary.
Returns
-------
fi : ch_util.finder.Finder
"""
instrument = spec["instrument"]
timerange = spec["timerange"]
fi = None
if mpiutil.rank0:
# Get instrument
inst_obj = (
di.ArchiveInst.select().where(di.ArchiveInst.name == instrument).get()
)
# Ensure timerange is a list
if not isinstance(timerange, list):
timerange = [timerange]
# Find the earliest and latest times
earliest = min([tr["start"] for tr in timerange])
latest = max([tr["end"] for tr in timerange])
# Set the archive_root
if node_spoof is None and "node_spoof" in spec:
node_spoof = spec["node_spoof"]
# Create a finder object limited to the relevant time
fi = finder.Finder(node_spoof=node_spoof)
# Set the time range that encapsulates all the intervals
fi.set_time_range(earliest, latest)
# Add in all the time ranges
for ti in timerange:
fi.include_time_interval(ti["start"], ti["end"])
# Only include the required instrument
fi.filter_acqs(di.ArchiveAcq.inst == inst_obj)
return fi
def files_from_spec(spec, node_spoof=None):
"""Get the names of files in a dataset.
Parameters
----------
dspec : dict
Dataspec dictionary.
Returns
-------
files : list
"""
# Get a list of files in a dataset from an instrument name and timerange.
files = None
if mpiutil.rank0:
# Get the finder object
fi = finder_from_spec(spec, node_spoof)
# Pull out the results and extract all the files
results = fi.get_results()
files = [fname for result in results for fname in result[0]]
files.sort()
files = mpiutil.world.bcast(files, root=0)
return files
| 33.726287 | 90 | 0.60675 |
4f111dca95ac4737729bfd91fdf965e2f3b2429b | 5,530 | py | Python | src/illumidesk/authenticators/validator.py | 1kastner/illumidesk | d4b3a6b964d7cf9f705ff70513491247e01e701f | [
"MIT"
] | null | null | null | src/illumidesk/authenticators/validator.py | 1kastner/illumidesk | d4b3a6b964d7cf9f705ff70513491247e01e701f | [
"MIT"
] | 1 | 2020-04-24T16:58:52.000Z | 2020-04-24T16:58:52.000Z | src/illumidesk/authenticators/validator.py | netoisc/illumidesk | 41b99594ac576247f3432b750157c6384b93bc47 | [
"MIT"
] | null | null | null | import os
import sys
import time
from collections import OrderedDict
from jupyterhub.utils import url_path_join
from ltiauthenticator import LTILaunchValidator
from oauthlib.oauth1 import RequestValidator
from oauthlib.oauth1.rfc5849 import signature
from tornado.auth import OAuthMixin
from tornado.web import HTTPError
from tornado.web import RequestHandler
from traitlets.config import LoggingConfigurable
from typing import Any
from typing import Dict
from typing import Optional
from .utils import LTI11_LAUNCH_PARAMS_REQUIRED
from .utils import LTI11_OAUTH_ARGS
from .utils import LTIUtils
class LTI11LaunchValidator(LoggingConfigurable):
"""
This class closely mimics the jupyterhub/ltiauthenticator LTILaunchValidator
base class. Inherits from the LoggingConfigurable traitlet to support logging.
Allows JupyterHub to verify LTI 1.1 compatible requests as a tool
provider (TP).
For an instance of this class to work, you need to set the consumer key and
shared secret key(s)/value(s) in `LTI11Authenticator` settings, which inherits
from the ``ltiauthenticator.LTIAuthenticator`` class. The key/value pairs are
set as are defined as a dict using the ``consumers`` attribute.
Attributes:
consumers: consumer key and shared secret key/value pair(s)
"""
# Keep a class-wide, global list of nonces so we can detect & reject
# replay attacks. This possibly makes this non-threadsafe, however.
nonces = OrderedDict()
def __init__(self, consumers):
self.consumers = consumers
def validate_launch_request(
self, launch_url: str, headers: Dict[str, Any], args: Dict[str, Any],
) -> bool:
"""
Validate a given LTI 1.1 launch request. The arguments' k/v's are either
required, recommended, or optional. The required/recommended/optional
keys are listed in the utils.LTI11Utils class.
Args:
launch_url: URL (base_url + path) that receives the launch request,
usually from a tool consumer.
headers: HTTP headers included with the POST request
args: the body sent to the launch url.
Returns:
True if the validation passes, False otherwise.
Raises:
HTTPError if a required argument is not inclued in the POST request.
"""
# Ensure that required oauth_* body arguments are included in the request
for param in LTI11_OAUTH_ARGS:
if param not in args.keys():
raise HTTPError(
400, 'Required oauth arg %s not included in request' % param
)
if not args.get(param):
raise HTTPError(
400, 'Required oauth arg %s does not have a value' % param
)
# Ensure that consumer key is registered in in jupyterhub_config.py
# LTIAuthenticator.consumers defined in parent class
if args['oauth_consumer_key'] not in self.consumers:
raise HTTPError(401, 'unknown oauth_consumer_key')
# Ensure that required LTI 1.1 body arguments are included in the request
for param in LTI11_LAUNCH_PARAMS_REQUIRED:
if param not in args.keys():
raise HTTPError(
400, 'Required LTI arg %s not included in request' % param
)
if not args.get(param):
raise HTTPError(
400, 'Required LTI arg %s does not have a value' % param
)
# Inspiration to validate nonces/timestamps from OAuthlib
# https://github.com/oauthlib/oauthlib/blob/master/oauthlib/oauth1/rfc5849/endpoints/base.py#L147
if len(str(int(args['oauth_timestamp']))) != 10:
raise HTTPError(401, 'Invalid timestamp size')
try:
ts = int(args['oauth_timestamp'])
except ValueError:
raise HTTPError(401, 'Timestamp must be an integer')
else:
# Reject timestamps that are older than 30 seconds
if abs(time.time() - ts) > 30:
raise HTTPError(
401,
'Timestamp given is invalid, differ from '
'allowed by over %s seconds.' % str(int(time.time() - ts)),
)
if (
ts in LTI11LaunchValidator.nonces
and args['oauth_nonce'] in LTI11LaunchValidator.nonces[ts]
):
raise HTTPError(401, 'oauth_nonce + oauth_timestamp already used')
LTI11LaunchValidator.nonces.setdefault(ts, set()).add(args['oauth_nonce'])
# convert arguments dict back to a list of tuples for signature
args_list = [(k, v) for k, v in args.items()]
base_string = signature.signature_base_string(
'POST',
signature.base_string_uri(launch_url),
signature.normalize_parameters(
signature.collect_parameters(body=args_list, headers=headers)
),
)
consumer_secret = self.consumers[args['oauth_consumer_key']]
sign = signature.sign_hmac_sha1(base_string, consumer_secret, None)
is_valid = signature.safe_string_equals(sign, args['oauth_signature'])
self.log.debug('signature in request: %s' % args['oauth_signature'])
self.log.debug('calculated signature: %s' % sign)
if not is_valid:
raise HTTPError(401, 'Invalid oauth_signature')
return True
| 38.943662 | 105 | 0.643761 |
00075af5c869be044adb79c87e6b367072672348 | 2,700 | py | Python | jatek_fuggvenyek.py | SzanSzabolcs/my-first-github-repo | 9e9299a2fe7bf927b4cde3a60883a875a9a0ec7c | [
"MIT"
] | null | null | null | jatek_fuggvenyek.py | SzanSzabolcs/my-first-github-repo | 9e9299a2fe7bf927b4cde3a60883a875a9a0ec7c | [
"MIT"
] | 1 | 2019-05-29T09:45:59.000Z | 2019-05-29T09:45:59.000Z | jatek_fuggvenyek.py | SzanSzabolcs/my-first-github-repo | 9e9299a2fe7bf927b4cde3a60883a875a9a0ec7c | [
"MIT"
] | null | null | null | from enum import Enum
from random import randint
#-----------------------------------------------------------------
# Függvény definíciók
# -----------------------------------------------------------------
def sorsol():
targyak = ["balta", "páncél", "pajzs", "láda", "kard", "sisak"]
index = randint(0, len(targyak) - 1)
return targyak[index]
def pontszam(targylista):
pontertekek = {
"balta": 1,
"páncél": 5,
"pajzs": 3,
"láda": 3,
"kard": 4,
"sisak": 2
}
osszpontszam = 0
for targy in targylista:
osszpontszam += pontertekek[targy]
return osszpontszam
def kiir(targyak):
for targy in targyak:
print(targy)
def kiir_fejlett(targyak):
pontertekek = {
"balta": 1,
"páncél": 5,
"pajzs": 3,
"láda": 3,
"kard": 4,
"sisak": 2
}
print("")
print("Ezeket a targyakat szerezted:")
for targy in targyak:
print(" - " + targy + ": " + str(pontertekek[targy]))
print("")
print("Összpontszám: " + str(pontszam(targyak)))
print("")
def nevvalasztas():
nev = input("Add meg a neved! ")
with open("nev.txt", 'w') as f:
f.write(nev)
return nev
def file_kiiras(filenev):
with open(filenev, 'r') as f:
tartalom = f.read()
print(tartalom)
def nevolvasas():
with open("nev.txt", 'r') as f:
nev = f.read()
return nev
def udvozlet():
print("")
print("------------------------")
print("Üdvözöllek a játékomban!")
print("------------------------")
print("")
print("Juss messzebb az erdőben!")
print("")
def viszlat(megtett_tavolsag, gyujtott_arany):
print("")
print("-------------------------")
print("Sajnos vége a játéknak...")
print("-------------------------")
print("")
print("Megtett távolság: " + str(megtett_tavolsag))
print("Gyűjtött arany: " + str(gyujtott_arany))
print("")
def aranyat_talaltal(uj_arany, osszes_arany):
osszes_arany += uj_arany
print("")
print("Megbotlasz valamiben...")
print("Lenézel a lábad elé...")
print("Találtál " + str(uj_arany) + " aranyat!")
print("Most " + str(osszes_arany) + " aranyad van.")
return osszes_arany
def csapdara_leptel(osszes_elet):
osszes_elet -= 1
if osszes_elet == 0:
print("Sajnos meghaltál!")
else:
print("Csapda! Már csak " + str(osszes_elet) + " életed van!")
return osszes_elet
class Irany(Enum):
SEMERRE = 0
BALRA = 1
JOBBRA = 2
FEL = 3
LE = 4 | 23.684211 | 71 | 0.494074 |
c59860387b15a9707e6afb50d85c2658d30a75d6 | 3,270 | py | Python | research/cv/resnet50_adv_pruning/eval.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/cv/resnet50_adv_pruning/eval.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/cv/resnet50_adv_pruning/eval.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
eval.
"""
import os
import argparse
import numpy as np
from mindspore import context, Tensor
from mindspore import nn
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import dtype as mstype
from src.pet_dataset import create_dataset
from src.config import cfg
from src.resnet_imgnet import resnet50
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--checkpoint_path', type=str,
default='resnet50-imgnet-0.65x-80.24.ckpt', help='Checkpoint file path')
parser.add_argument('--dataset_path', type=str,
default='/home/hankai/xiaoan/data/test.mindrecord', help='Dataset path')
parser.add_argument('--platform', type=str, default='GPU', help='run platform')
args_opt = parser.parse_args()
if __name__ == '__main__':
config_platform = cfg
if args_opt.platform == "Ascend":
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend",
device_id=device_id, save_graphs=False)
elif args_opt.platform == "GPU":
context.set_context(mode=context.GRAPH_MODE,
device_target="GPU", save_graphs=False)
else:
raise ValueError("Unsupported platform.")
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
dataset = create_dataset(dataset_path=args_opt.dataset_path,
do_train=False,
config=config_platform,
platform=args_opt.platform,
batch_size=config_platform.batch_size)
step_size = dataset.get_dataset_size()
index = []
with open('index.txt', 'r') as f:
for line in f:
ind = Tensor((np.array(line.strip('\n').split(' ')[:-1])).astype(np.int32).reshape(-1, 1))
index.append(ind)
net = resnet50(
rate=0.65, class_num=config_platform.num_classes, index=index)
if args_opt.platform == "Ascend":
net.to_float(mstype.float16)
for _, cell in net.cells_and_names():
if isinstance(cell, nn.Dense):
cell.to_float(mstype.float32)
if args_opt.checkpoint_path:
param_dict = load_checkpoint(args_opt.checkpoint_path)
load_param_into_net(net, param_dict)
net.set_train(False)
model = Model(net, loss_fn=loss, metrics={'acc'})
res = model.eval(dataset)
print("result:", res, "ckpt=", args_opt.checkpoint_path)
| 37.586207 | 102 | 0.662385 |
cebfebd42ae8f85815cabe968789716951c1fa4e | 12,646 | py | Python | venv/lib/python3.8/site-packages/plotly/validators/_treemap.py | SvtFilatov/soccer-predict-prices | b003ebd7c3657688790183ef7d719c42290c11b9 | [
"MIT"
] | 10 | 2021-05-31T07:18:08.000Z | 2022-03-19T09:20:11.000Z | venv/lib/python3.8/site-packages/plotly/validators/_treemap.py | SvtFilatov/soccer-predict-prices | b003ebd7c3657688790183ef7d719c42290c11b9 | [
"MIT"
] | 1 | 2021-08-03T12:23:01.000Z | 2021-08-10T08:35:22.000Z | venv/lib/python3.8/site-packages/plotly/validators/_treemap.py | SvtFilatov/soccer-predict-prices | b003ebd7c3657688790183ef7d719c42290c11b9 | [
"MIT"
] | 3 | 2021-01-31T16:40:52.000Z | 2021-08-29T18:32:34.000Z | import _plotly_utils.basevalidators
class TreemapValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="treemap", parent_name="", **kwargs):
super(TreemapValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Treemap"),
data_docs=kwargs.pop(
"data_docs",
"""
branchvalues
Determines how the items in `values` are
summed. When set to "total", items in `values`
are taken to be value of all its descendants.
When set to "remainder", items in `values`
corresponding to the root and the branches
sectors are taken to be the extra part not part
of the sum of the values at their leaves.
count
Determines default for `values` when it is not
provided, by inferring a 1 for each of the
"leaves" and/or "branches", otherwise 0.
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for customdata .
domain
:class:`plotly.graph_objects.treemap.Domain`
instance or dict with compatible properties
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for hoverinfo .
hoverlabel
:class:`plotly.graph_objects.treemap.Hoverlabel
` instance or dict with compatible properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-time-
format#locale_format for details on the date
formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event
data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
variables `currentPath`, `root`, `entry`,
`percentRoot`, `percentEntry` and
`percentParent`. Anything contained in tag
`<extra>` is displayed in the secondary box,
for example "<extra>{fullData.name}</extra>".
To hide the secondary box completely, use an
empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for hovertemplate .
hovertext
Sets hover text elements associated with each
sector. If a single string, the same string
appears for all data points. If an array of
string, the items are mapped in order of this
trace's sectors. To be seen, trace `hoverinfo`
must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for hovertext .
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for ids .
insidetextfont
Sets the font used for `textinfo` lying inside
the sector.
labels
Sets the labels of each of the sectors.
labelssrc
Sets the source reference on Chart Studio Cloud
for labels .
level
Sets the level from which this trace hierarchy
is rendered. Set `level` to `''` to start from
the root node in the hierarchy. Must be an "id"
if `ids` is filled in, otherwise plotly
attempts to find a matching item in `labels`.
marker
:class:`plotly.graph_objects.treemap.Marker`
instance or dict with compatible properties
maxdepth
Sets the number of rendered sectors from any
given `level`. Set `maxdepth` to "-1" to render
all the levels in the hierarchy.
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for meta .
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the trace.
outsidetextfont
Sets the font used for `textinfo` lying outside
the sector. This option refers to the root of
the hierarchy presented on top left corner of a
treemap graph. Please note that if a hierarchy
has multiple root nodes, this option won't have
any effect and `insidetextfont` would be used.
parents
Sets the parent sectors for each of the
sectors. Empty string items '' are understood
to reference the root node in the hierarchy. If
`ids` is filled, `parents` items are understood
to be "ids" themselves. When `ids` is not set,
plotly attempts to find matching items in
`labels`, but beware they must be unique.
parentssrc
Sets the source reference on Chart Studio Cloud
for parents .
pathbar
:class:`plotly.graph_objects.treemap.Pathbar`
instance or dict with compatible properties
root
:class:`plotly.graph_objects.treemap.Root`
instance or dict with compatible properties
sort
Determines whether or not the sectors are
reordered from largest to smallest.
stream
:class:`plotly.graph_objects.treemap.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each sector.
If trace `textinfo` contains a "text" flag,
these elements will be seen on the chart. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be
seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on
the graph.
textposition
Sets the positions of the `text` elements.
textsrc
Sets the source reference on Chart Studio Cloud
for text .
texttemplate
Template string used for rendering the
information text that appear on points. Note
that this will override `textinfo`. Variables
are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-time-
format#locale_format for details on the date
formatting syntax. Every attributes that can be
specified per-point (the ones that are
`arrayOk: true`) are available. variables
`currentPath`, `root`, `entry`, `percentRoot`,
`percentEntry`, `percentParent`, `label` and
`value`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud
for texttemplate .
tiling
:class:`plotly.graph_objects.treemap.Tiling`
instance or dict with compatible properties
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
values
Sets the values associated with each of the
sectors. Use with `branchvalues` to determine
how the values are summed.
valuessrc
Sets the source reference on Chart Studio Cloud
for values .
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
""",
),
**kwargs
)
| 48.638462 | 72 | 0.546022 |
47d4df3eb76f1f3671923db0847c6cf1b4130862 | 4,081 | py | Python | tests/unit_tests/test_lm/test_base.py | radical-cybertools/radical.pilot | 4ce3efbf3e2f045b5c48fb848e9f65f9f5ea17e9 | [
"MIT"
] | 47 | 2015-03-16T01:08:11.000Z | 2022-02-02T10:36:39.000Z | tests/unit_tests/test_lm/test_base.py | radical-cybertools/radical.pilot | 4ce3efbf3e2f045b5c48fb848e9f65f9f5ea17e9 | [
"MIT"
] | 1,856 | 2015-01-02T09:32:20.000Z | 2022-03-31T21:45:06.000Z | tests/unit_tests/test_lm/test_base.py | radical-cybertools/radical.pilot | 4ce3efbf3e2f045b5c48fb848e9f65f9f5ea17e9 | [
"MIT"
] | 28 | 2015-06-10T18:15:14.000Z | 2021-11-07T04:36:45.000Z | # pylint: disable=protected-access,unused-argument,no-value-for-parameter,abstract-method
import radical.utils as ru
from unittest import mock, TestCase
from radical.pilot.agent.launch_method.base import LaunchMethod
# ------------------------------------------------------------------------------
#
class TestBaseLaunchMethod(TestCase):
# --------------------------------------------------------------------------
#
@mock.patch('radical.utils.zmq.server.Logger')
@mock.patch('radical.utils.zmq.server.Profiler')
@mock.patch('radical.utils.env_eval')
def test_init_from_registry(self, mocked_env_eval, mocked_prof, mocked_log):
class NewLaunchMethod(LaunchMethod):
def _init_from_info(self, lm_info):
self._env = lm_info['env']
self._env_sh = lm_info['env_sh']
self._command = lm_info['command']
assert self._command
# check initialization from registry data only,
reg = ru.zmq.Registry()
reg.start()
lm_name = 'NewLaunchMethod'
lm_info = {'env' : {'test_env': 'test_value'},
'env_sh' : 'env/lm_new.sh',
'command': '/usr/bin/test'}
c = ru.zmq.RegistryClient(url=reg.addr)
c.put('lm.%s' % lm_name.lower(), lm_info)
c.close()
lm = NewLaunchMethod(lm_name, ru.Munch({'reg_addr': reg.addr}), None,
mock.Mock(), mock.Mock())
self.assertEqual(lm._env, lm_info['env'])
self.assertEqual(lm._env_sh, lm_info['env_sh'])
self.assertEqual(lm._command, lm_info['command'])
reg.stop()
reg.wait()
# --------------------------------------------------------------------------
#
@mock.patch.object(LaunchMethod, '__init__', return_value=None)
@mock.patch('radical.utils.sh_callout')
@mock.patch('radical.utils.Logger')
def test_get_mpi_info(self, mocked_logger, mocked_sh_callout, mocked_init):
lm = LaunchMethod('', {}, None, None, None)
lm._log = mocked_logger
with self.assertRaises(ValueError):
# no executable found
lm._get_mpi_info(exe='')
mocked_sh_callout.return_value = ['19.05.2', '', 0]
version, flavor = lm._get_mpi_info('mpirun')
self.assertIsNone(version) # correct version is not set
self.assertEqual(flavor, LaunchMethod.MPI_FLAVOR_UNKNOWN)
mocked_sh_callout.return_value = [
'mpirun (Open MPI) 2.1.2\n\n'
'Report bugs to https://www.open-mpi.org/community/help/\n', '', 0]
version, flavor = lm._get_mpi_info('mpirun')
self.assertEqual(version, '2.1.2')
self.assertEqual(flavor, LaunchMethod.MPI_FLAVOR_OMPI)
mocked_sh_callout.return_value = ['HYDRA build details:', '', 0]
version, flavor = lm._get_mpi_info('mpirun')
self.assertEqual(version, '')
self.assertEqual(flavor, LaunchMethod.MPI_FLAVOR_HYDRA)
mocked_sh_callout.return_value = [
'Intel(R) MPI Library for Linux* OS,\n\n'
'Version 2019 Update 5 Build 20190806\n\n'
'Copyright 2003-2019, Intel Corporation.', '', 0]
version, flavor = lm._get_mpi_info('mpirun')
self.assertEqual(version, '')
self.assertEqual(flavor, LaunchMethod.MPI_FLAVOR_HYDRA)
mocked_sh_callout.return_value = [
'HYDRA build details:\n\n'
'Version: 3.2\n\n'
'Release Date: unreleased development copy\n\n'
'/var/tmp/Intel-mvapich2/OFEDRPMS/BUILD/mvapich2\n\n'
'2.3b-10/src/openpa/src', '', 0]
version, flavor = lm._get_mpi_info('mpirun')
self.assertEqual(version, '')
self.assertEqual(flavor, LaunchMethod.MPI_FLAVOR_HYDRA)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
tc = TestBaseLaunchMethod()
tc.test_get_mpi_info()
# ------------------------------------------------------------------------------
| 36.4375 | 89 | 0.558197 |
1013b6b94846bacf56a7423d89e4213644942717 | 13,953 | py | Python | rewriter/decode.py | yangon99/unified-summarization | 3727a52e91d07061e494ad3ae2178e9e86c73029 | [
"MIT"
] | 1 | 2021-05-30T12:05:53.000Z | 2021-05-30T12:05:53.000Z | rewriter/decode.py | Li-Ming-Fan/unified-summarization | 80d9894c0142a3da3e81d61ab509c37ba15f82ec | [
"MIT"
] | null | null | null | rewriter/decode.py | Li-Ming-Fan/unified-summarization | 80d9894c0142a3da3e81d61ab509c37ba15f82ec | [
"MIT"
] | null | null | null | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to run beam search decoding, including running ROUGE evaluation and producing JSON datafiles for the in-browser attention visualizer, which can be found here https://github.com/abisee/attn_vis"""
import os
import time
import tensorflow as tf
from batcher import Batcher
import beam_search
import data
import cPickle as pk
import json
import pyrouge
import util
import logging
import numpy as np
import pdb
FLAGS = tf.app.flags.FLAGS
SECS_UNTIL_NEW_CKPT = 60 # max number of seconds before loading new checkpoint
class BeamSearchDecoder(object):
"""Beam search decoder."""
def __init__(self, model, batcher, vocab):
"""Initialize decoder.
Args:
model: a Seq2SeqAttentionModel object.
batcher: a Batcher object.
vocab: Vocabulary object
"""
self._model = model
self._model.build_graph()
self._batcher = batcher
self._vocab = vocab
self._saver = tf.train.Saver(max_to_keep=3) # we use this to load checkpoints for decoding
self._sess = tf.Session(config=util.get_config())
if FLAGS.mode == 'evalall':
self.prepare_evaluate()
def prepare_evaluate(self, ckpt_path=None):
# Load an initial checkpoint to use for decoding
if FLAGS.mode == 'evalall':
if FLAGS.load_best_eval_model:
tf.logging.info('Loading best eval checkpoint')
ckpt_path = util.load_ckpt(self._saver, self._sess, ckpt_dir='eval_'+FLAGS.eval_method)
elif FLAGS.eval_ckpt_path:
ckpt_path = util.load_ckpt(self._saver, self._sess, ckpt_path=FLAGS.eval_ckpt_path)
else:
tf.logging.info('Loading best train checkpoint')
ckpt_path = util.load_ckpt(self._saver, self._sess)
elif FLAGS.mode == 'eval':
_ = util.load_ckpt(self._saver, self._sess, ckpt_path=ckpt_path) # load a new checkpoint
if FLAGS.single_pass:
# Make a descriptive decode directory name
ckpt_name = "ckpt-" + ckpt_path.split('-')[-1] # this is something of the form "ckpt-123456"
self._decode_dir = os.path.join(FLAGS.log_root, get_decode_dir_name(ckpt_name))
tf.logging.info('Save evaluation results to '+ self._decode_dir)
if os.path.exists(self._decode_dir):
if FLAGS.mode == 'eval':
return False # The checkpoint has already been evaluated. Evaluate next one.
else:
raise Exception("single_pass decode directory %s should not already exist" % self._decode_dir)
else: # Generic decode dir name
self._decode_dir = os.path.join(FLAGS.log_root, "decode")
# Make the decode dir if necessary
if not os.path.exists(self._decode_dir): os.mkdir(self._decode_dir)
if FLAGS.single_pass:
# Make the dirs to contain output written in the correct format for pyrouge
self._rouge_ref_dir = os.path.join(self._decode_dir, "reference")
if not os.path.exists(self._rouge_ref_dir): os.mkdir(self._rouge_ref_dir)
self._rouge_dec_dir = os.path.join(self._decode_dir, "decoded")
if not os.path.exists(self._rouge_dec_dir): os.mkdir(self._rouge_dec_dir)
if FLAGS.save_vis:
self._rouge_vis_dir = os.path.join(self._decode_dir, "visualize")
if not os.path.exists(self._rouge_vis_dir): os.mkdir(self._rouge_vis_dir)
if FLAGS.save_pkl:
self._result_dir = os.path.join(self._decode_dir, "result")
if not os.path.exists(self._result_dir): os.mkdir(self._result_dir)
return True
def evaluate(self):
"""Decode examples until data is exhausted (if FLAGS.single_pass) and return, or decode indefinitely, loading latest checkpoint at regular intervals"""
t0 = time.time()
counter = 0
while True:
batch = self._batcher.next_batch() # 1 example repeated across batch
if batch is None: # finished decoding dataset in single_pass mode
assert FLAGS.single_pass, "Dataset exhausted, but we are not in single_pass mode"
tf.logging.info("Decoder has finished reading dataset for single_pass.")
tf.logging.info("Output has been saved in %s and %s. Starting ROUGE eval...", self._rouge_ref_dir, self._rouge_dec_dir)
rouge_results_dict = rouge_eval(self._rouge_ref_dir, self._rouge_dec_dir)
rouge_results, rouge_results_str = rouge_log(rouge_results_dict, self._decode_dir)
t1 = time.time()
tf.logging.info("evaluation time: %.3f min", (t1-t0)/60.0)
return rouge_results, rouge_results_str
if FLAGS.decode_method == 'greedy':
output_ids = self._model.run_greedy_search(self._sess, batch)
for i in range(FLAGS.batch_size):
self.process_one_article(batch.original_articles_sents[i], batch.original_abstracts_sents[i], \
batch.original_extracts_ids[i], output_ids[i], \
batch.art_oovs[i], None, None, None, counter)
counter += 1
elif FLAGS.decode_method == 'beam':
# Run beam search to get best Hypothesis
best_hyp = beam_search.run_beam_search(self._sess, self._model, self._vocab, batch)
# Extract the output ids from the hypothesis and convert back to words
output_ids = [int(t) for t in best_hyp.tokens[1:]] # remove start token
best_hyp.log_probs = best_hyp.log_probs[1:] # remove start token probability
self.process_one_article(batch.original_articles_sents[0], batch.original_abstracts_sents[0], \
batch.original_extracts_ids[0], output_ids, batch.art_oovs[0], \
best_hyp.attn_dists, best_hyp.p_gens, best_hyp.log_probs, counter)
counter += 1
def process_one_article(self, original_article_sents, original_abstract_sents, \
original_selected_ids, output_ids, oovs, \
attn_dists, p_gens, log_probs, counter):
# Remove the [STOP] token from decoded_words, if necessary
decoded_words = data.outputids2words(output_ids, self._vocab, oovs)
try:
fst_stop_idx = decoded_words.index(data.STOP_DECODING) # index of the (first) [STOP] symbol
decoded_words = decoded_words[:fst_stop_idx]
except ValueError:
decoded_words = decoded_words
decoded_output = ' '.join(decoded_words) # single string
decoded_sents = data.words2sents(decoded_words)
if FLAGS.single_pass:
verbose = False if FLAGS.mode == 'eval' else True
self.write_for_rouge(original_abstract_sents, decoded_sents, counter, verbose) # write ref summary and decoded summary to file, to eval with pyrouge later
if FLAGS.decode_method == 'beam' and FLAGS.save_vis:
original_article = ' '.join(original_article_sents)
original_abstract = ' '.join(original_abstract_sents)
article_withunks = data.show_art_oovs(original_article, self._vocab) # string
abstract_withunks = data.show_abs_oovs(original_abstract, self._vocab, oovs)
self.write_for_attnvis(article_withunks, abstract_withunks, decoded_words, \
attn_dists, p_gens, log_probs, counter, verbose)
if FLAGS.save_pkl:
self.save_result(original_article_sents, original_abstract_sents, \
original_selected_ids, decoded_sents, counter, verbose)
def save_result(self, article_sents, reference_sents, select_ids, decoded_sents, index, verbose=False):
"""save the result in pickle format"""
data = {'article': article_sents,
'reference': reference_sents,
'select_ids': select_ids,
'decoded': decoded_sents}
output_fname = os.path.join(self._result_dir, 'result_%06d.pkl' % index)
with open(output_fname, 'wb') as output_file:
pk.dump(data, output_file)
if verbose:
tf.logging.info('Wrote result data to %s', output_fname)
def write_for_rouge(self, reference_sents, decoded_sents, ex_index, verbose=False):
"""Write output to file in correct format for eval with pyrouge. This is called in single_pass mode.
Args:
reference_sents: list of strings
decoded_words: list of strings
ex_index: int, the index with which to label the files
"""
# pyrouge calls a perl script that puts the data into HTML files.
# Therefore we need to make our output HTML safe.
decoded_sents = [make_html_safe(w) for w in decoded_sents]
reference_sents = [make_html_safe(w) for w in reference_sents]
# Write to file
ref_file = os.path.join(self._rouge_ref_dir, "%06d_reference.txt" % ex_index)
decoded_file = os.path.join(self._rouge_dec_dir, "%06d_decoded.txt" % ex_index)
with open(ref_file, "w") as f:
for idx,sent in enumerate(reference_sents):
f.write(sent) if idx==len(reference_sents)-1 else f.write(sent+"\n")
with open(decoded_file, "w") as f:
for idx,sent in enumerate(decoded_sents):
f.write(sent) if idx==len(decoded_sents)-1 else f.write(sent+"\n")
if verbose:
tf.logging.info("Wrote example %i to file" % ex_index)
def write_for_attnvis(self, article, abstract, decoded_words, attn_dists, p_gens, \
log_probs, count=None, verbose=False):
"""Write some data to json file, which can be read into the in-browser attention visualizer tool:
https://github.com/abisee/attn_vis
Args:
article: The original article string.
abstract: The human (correct) abstract string.
attn_dists: List of arrays; the attention distributions.
decoded_words: List of strings; the words of the generated summary.
p_gens: List of scalars; the p_gen values. If not running in pointer-generator mode, list of None.
"""
article_lst = article.split() # list of words
decoded_lst = decoded_words # list of decoded words
to_write = {
'article_lst': [make_html_safe(t) for t in article_lst],
'decoded_lst': [make_html_safe(t) for t in decoded_lst],
'abstract_str': make_html_safe(abstract),
'attn_dists': attn_dists,
'probs': np.exp(log_probs).tolist()
}
to_write['p_gens'] = p_gens
if count != None:
output_fname = os.path.join(self._rouge_vis_dir, 'attn_vis_data_%06d.json' % count)
else:
output_fname = os.path.join(self._decode_dir, 'attn_vis_data.json')
with open(output_fname, 'w') as output_file:
json.dump(to_write, output_file)
if verbose:
tf.logging.info('Wrote visualization data to %s', output_fname)
def init_batcher(self):
self._batcher = Batcher(FLAGS.data_path, self._vocab, self._model._hps, single_pass=FLAGS.single_pass)
def print_results(article, abstract, decoded_output):
"""Prints the article, the reference summmary and the decoded summary to screen"""
print ""
tf.logging.info('ARTICLE: %s', article)
tf.logging.info('REFERENCE SUMMARY: %s', abstract)
tf.logging.info('GENERATED SUMMARY: %s', decoded_output)
print ""
def make_html_safe(s):
"""Replace any angled brackets in string s to avoid interfering with HTML attention visualizer."""
s.replace("<", "<")
s.replace(">", ">")
return s
def rouge_eval(ref_dir, dec_dir):
"""Evaluate the files in ref_dir and dec_dir with pyrouge, returning results_dict"""
r = pyrouge.Rouge155()
r.model_filename_pattern = '#ID#_reference.txt'
r.system_filename_pattern = '(\d+)_decoded.txt'
r.model_dir = ref_dir
r.system_dir = dec_dir
logging.getLogger('global').setLevel(logging.WARNING) # silence pyrouge logging
rouge_results = r.convert_and_evaluate()
return r.output_to_dict(rouge_results)
def rouge_log(results_dict, dir_to_write):
"""Log ROUGE results to screen and write to file.
Args:
results_dict: the dictionary returned by pyrouge
dir_to_write: the directory where we will write the results to"""
rouge_results = {}
log_str = ""
for x in ["1","2","l"]:
log_str += "\nROUGE-%s:\n" % x
for y in ["f_score", "recall", "precision"]:
key = "rouge_%s_%s" % (x,y)
key_cb = key + "_cb"
key_ce = key + "_ce"
val = results_dict[key]
val_cb = results_dict[key_cb]
val_ce = results_dict[key_ce]
if y == 'f_score':
rouge_results[x] = val
log_str += "%s: %.4f with confidence interval (%.4f, %.4f)\n" % (key, val, val_cb, val_ce)
tf.logging.info(log_str) # log to screen
results_file = os.path.join(dir_to_write, "ROUGE_results.txt")
tf.logging.info("Writing final ROUGE results to %s...", results_file)
with open(results_file, "w") as f:
f.write(log_str)
return rouge_results, log_str
def get_decode_dir_name(ckpt_name):
"""Make a descriptive name for the decode dir, including the name of the checkpoint we use to decode. This is called in single_pass mode."""
if "train" in FLAGS.data_path: dataset = "train"
elif "val" in FLAGS.data_path: dataset = "val"
elif "test" in FLAGS.data_path: dataset = "test"
else: raise ValueError("FLAGS.data_path %s should contain one of train, val or test" % (FLAGS.data_path))
dirname = "decode_%s_%imaxenc_%ibeam_%imindec_%imaxdec" % (dataset, FLAGS.max_enc_steps, FLAGS.beam_size, FLAGS.min_dec_steps, FLAGS.max_dec_steps)
if ckpt_name is not None:
dirname += "_%s_%s" % (ckpt_name, FLAGS.decode_method)
return dirname
| 45.747541 | 222 | 0.693113 |
d08d70344f23d52660daf2b629641dc6392b6da1 | 784 | py | Python | docker_plugin_api/IpamDriverEntities.py | jacekkow/docker-plugin-api | 86701d02d0f166c753ce631f2ac8fe14e4ff49dc | [
"BSD-3-Clause"
] | null | null | null | docker_plugin_api/IpamDriverEntities.py | jacekkow/docker-plugin-api | 86701d02d0f166c753ce631f2ac8fe14e4ff49dc | [
"BSD-3-Clause"
] | null | null | null | docker_plugin_api/IpamDriverEntities.py | jacekkow/docker-plugin-api | 86701d02d0f166c753ce631f2ac8fe14e4ff49dc | [
"BSD-3-Clause"
] | null | null | null | class RequestPoolEntity:
def __init__(self, AddressSpace: str, Pool: str = None, SubPool: str = None, Options: dict = None, V6: bool = None):
self.AddressSpace = AddressSpace
self.Pool = Pool
self.SubPool = SubPool
self.Options = {} if Options is None else Options
self.V6 = V6
class ReleasePoolEntity:
def __init__(self, PoolID: str):
self.PoolID = PoolID
class RequestAddressEntity:
def __init__(self, PoolID: str, Address: str = None, Options: dict = None):
self.PoolID = PoolID
self.Address = Address
self.Options = {} if Options is None else Options
class ReleaseAddressEntity:
def __init__(self, PoolID: str, Address: str):
self.PoolID = PoolID
self.Address = Address
| 30.153846 | 120 | 0.649235 |
04dc513263a78376774ac8ad09ca34722f8467fe | 9,538 | py | Python | pubnub/endpoints/endpoint.py | KaizenAPI/python | a7be2b25c9574075cfe717382dfe5ced5280f77f | [
"MIT"
] | 1 | 2020-12-30T09:30:23.000Z | 2020-12-30T09:30:23.000Z | blockchain-env/Lib/site-packages/pubnub/endpoints/endpoint.py | zarif007/Block-Chain-Web-App | 40bd4d8d8ce1f6de2840792290bf022d7dfacbb4 | [
"MIT"
] | null | null | null | blockchain-env/Lib/site-packages/pubnub/endpoints/endpoint.py | zarif007/Block-Chain-Web-App | 40bd4d8d8ce1f6de2840792290bf022d7dfacbb4 | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod
import logging
from pubnub import utils
from pubnub.enums import PNStatusCategory
from pubnub.errors import (
PNERR_SUBSCRIBE_KEY_MISSING, PNERR_PUBLISH_KEY_MISSING, PNERR_CHANNEL_OR_GROUP_MISSING,
PNERR_SECRET_KEY_MISSING, PNERR_CHANNEL_MISSING, PNERR_FILE_OBJECT_MISSING,
PNERR_FILE_ID_MISSING, PNERR_FILE_NAME_MISSING
)
from pubnub.exceptions import PubNubException
from pubnub.models.consumer.common import PNStatus
from pubnub.models.consumer.pn_error_data import PNErrorData
from ..structures import RequestOptions, ResponseInfo
logger = logging.getLogger("pubnub")
class Endpoint(object):
SERVER_RESPONSE_SUCCESS = 200
SERVER_RESPONSE_FORBIDDEN = 403
SERVER_RESPONSE_BAD_REQUEST = 400
__metaclass__ = ABCMeta
def __init__(self, pubnub):
self.pubnub = pubnub
self._cancellation_event = None
self._sort_params = False
def cancellation_event(self, event):
self._cancellation_event = event
return self
@abstractmethod
def build_path(self):
pass
@abstractmethod
def custom_params(self):
raise NotImplementedError
def build_data(self):
return None
@abstractmethod
def http_method(self):
pass
@abstractmethod
def validate_params(self):
pass
@abstractmethod
def create_response(self, endpoint):
pass
@abstractmethod
def operation_type(self):
raise NotImplementedError
@abstractmethod
def name(self):
pass
@abstractmethod
def request_timeout(self):
pass
@abstractmethod
def connect_timeout(self):
pass
def is_auth_required(self):
raise NotImplementedError
def affected_channels(self):
return None
def affected_channels_groups(self):
return None
def allow_redirects(self):
return True
def use_base_path(self):
return True
def request_headers(self):
if self.http_method() == "POST":
return {"Content-type": "application/json"}
else:
return {}
def build_file_upload_request(self):
return
def non_json_response(self):
return False
def encoded_params(self):
return {}
def options(self):
return RequestOptions(
path=self.build_path(),
params_callback=self.build_params_callback(),
method=self.http_method(),
request_timeout=self.request_timeout(),
connect_timeout=self.connect_timeout(),
create_response=self.create_response,
create_status=self.create_status,
create_exception=self.create_exception,
operation_type=self.operation_type(),
data=self.build_data(),
files=self.build_file_upload_request(),
sort_arguments=self._sort_params,
allow_redirects=self.allow_redirects(),
use_base_path=self.use_base_path(),
request_headers=self.request_headers(),
non_json_response=self.non_json_response()
)
def sync(self):
self.validate_params()
envelope = self.pubnub.request_sync(self.options())
if envelope.status.is_error():
raise envelope.status.error_data.exception
return envelope
def pn_async(self, callback):
try:
self.validate_params()
options = self.options()
except PubNubException as e:
callback(None, self.create_status(PNStatusCategory.PNBadRequestCategory, None, None, e))
return
def callback_wrapper(envelope):
callback(envelope.result, envelope.status)
return self.pubnub.request_async(endpoint_name=self.name(),
endpoint_call_options=options,
callback=callback_wrapper,
# REVIEW: include self._cancellation_event into options?
cancellation_event=self._cancellation_event)
def result(self):
def handler():
self.validate_params()
return self.options()
return self.pubnub.request_result(options_func=handler,
cancellation_event=self._cancellation_event)
def future(self):
def handler():
self.validate_params()
return self.options()
return self.pubnub.request_future(options_func=handler,
cancellation_event=self._cancellation_event)
def deferred(self):
def handler():
self.validate_params()
return self.options()
return self.pubnub.request_deferred(options_func=handler,
cancellation_event=self._cancellation_event)
def build_params_callback(self):
def callback(params_to_merge):
custom_params = self.custom_params()
custom_params.update(params_to_merge)
custom_params['pnsdk'] = self.pubnub.sdk_name
custom_params['uuid'] = self.pubnub.uuid
for query_key, query_value in self.pubnub._telemetry_manager.operation_latencies().items():
custom_params[query_key] = query_value
if self.is_auth_required() and self.pubnub.config.auth_key is not None:
custom_params['auth'] = self.pubnub.config.auth_key
if self.pubnub.config.disable_token_manager is False and self.pubnub.config.auth_key is None:
tms_properties = self.get_tms_properties()
if tms_properties is not None:
token = self.pubnub.get_token(tms_properties)
if token is not None:
custom_params['auth'] = token
else:
logger.warning("No token found for: " + str(tms_properties))
if self.pubnub.config.secret_key is not None:
utils.sign_request(self, self.pubnub, custom_params, self.http_method(), self.build_data())
custom_params.update(self.encoded_params())
# reassign since pnsdk should be signed unencoded
custom_params['pnsdk'] = utils.url_encode(self.pubnub.sdk_name)
return custom_params
return callback
def validate_subscribe_key(self):
if self.pubnub.config.subscribe_key is None or len(self.pubnub.config.subscribe_key) == 0:
raise PubNubException(pn_error=PNERR_SUBSCRIBE_KEY_MISSING)
def validate_secret_key(self):
if self.pubnub.config.secret_key is None or len(self.pubnub.config.secret_key) == 0:
raise PubNubException(pn_error=PNERR_SECRET_KEY_MISSING)
def validate_channel(self):
if self._channel is None or len(self._channel) == 0:
raise PubNubException(pn_error=PNERR_CHANNEL_MISSING)
def validate_channels_and_groups(self):
if len(self._channels) == 0 and len(self._groups) == 0:
raise PubNubException(pn_error=PNERR_CHANNEL_OR_GROUP_MISSING)
def validate_publish_key(self):
if self.pubnub.config.publish_key is None or len(self.pubnub.config.publish_key) == 0:
raise PubNubException(pn_error=PNERR_PUBLISH_KEY_MISSING)
def validate_file_object(self):
if not self._file_object:
raise PubNubException(pn_error=PNERR_FILE_OBJECT_MISSING)
def validate_file_name(self):
if not self._file_name:
raise PubNubException(pn_error=PNERR_FILE_NAME_MISSING)
def validate_file_id(self):
if not self._file_id:
raise PubNubException(pn_error=PNERR_FILE_ID_MISSING)
def create_status(self, category, response, response_info, exception):
if response_info is not None:
assert isinstance(response_info, ResponseInfo)
pn_status = PNStatus()
if response is None or exception is not None:
pn_status.error = True
if response is not None:
pn_status.original_response = response
if exception is not None:
pn_status.error_data = PNErrorData(str(exception), exception)
if response_info is not None:
pn_status.status_code = response_info.status_code
pn_status.tls_enabled = response_info.tls_enabled
pn_status.origin = response_info.origin
pn_status.uuid = response_info.uuid
pn_status.auth_key = response_info.auth_key
pn_status.client_request = response_info.client_request
pn_status.client_response = response_info.client_response
pn_status.operation = self.operation_type()
pn_status.category = category
pn_status.affected_channels = self.affected_channels()
pn_status.affected_groups = self.affected_channels_groups()
return pn_status
""" Used by asyncio and tornado clients to build exceptions
The only difference with create_status() method is that a status
is wrapped with an exception and also contains this exception inside
as 'status.error_data.exception'
"""
def create_exception(self, category, response, response_info, exception):
status = self.create_status(category, response, response_info, exception)
exception.status = status
return exception
def get_tms_properties(self):
return None
| 33.00346 | 107 | 0.653491 |
cf2331f9917844d677d171969ab9d5986b849183 | 8,100 | py | Python | workloads/imagenet/imagenet_jax/workload.py | Rajathbharadwaj/algorithmic-efficiency | 47d2928836e0574bc54cc3ad58860dd4daf86cce | [
"Apache-2.0"
] | 49 | 2021-03-04T19:37:24.000Z | 2022-03-28T23:05:06.000Z | workloads/imagenet/imagenet_jax/workload.py | Rajathbharadwaj/algorithmic-efficiency | 47d2928836e0574bc54cc3ad58860dd4daf86cce | [
"Apache-2.0"
] | 62 | 2021-03-03T19:46:00.000Z | 2022-03-30T14:15:35.000Z | workloads/imagenet/imagenet_jax/workload.py | Rajathbharadwaj/algorithmic-efficiency | 47d2928836e0574bc54cc3ad58860dd4daf86cce | [
"Apache-2.0"
] | 11 | 2021-03-03T19:43:44.000Z | 2022-02-18T14:46:45.000Z | """ImageNet workload implemented in Jax.
python3 submission_runner.py \
--workload=imagenet_jax \
--submission_path=workloads/imagenet/imagenet_jax/submission.py \
--num_tuning_trials=1
"""
import functools
from typing import Tuple
import optax
import tensorflow as tf
# Hide any GPUs form TensorFlow. Otherwise TF might reserve memory and make it
# unavailable to JAX.
tf.config.experimental.set_visible_devices([], 'GPU')
import tensorflow_datasets as tfds
import jax
import jax.numpy as jnp
import numpy as np
from jax import lax
from flax import jax_utils
import spec
import random_utils as prng
from . import input_pipeline
from . import models
class ImagenetWorkload(spec.Workload):
def __init__(self):
self._eval_ds = None
self._param_shapes = None
self.epoch_metrics = []
# self.model_name = 'ResNet50'
# self.dataset = 'imagenet2012:5.*.*'
# self.num_classes = 1000
# For faster development testing, uncomment the lines below
self.model_name = '_ResNet1'
self.dataset = 'imagenette'
self.num_classes = 10
def has_reached_goal(self, eval_result: float) -> bool:
return eval_result['accuracy'] > self.target_value
@property
def target_value(self):
return 0.76
@property
def loss_type(self):
return spec.LossType.SOFTMAX_CROSS_ENTROPY
@property
def train_mean(self):
return [0.485 * 255, 0.456 * 255, 0.406 * 255]
@property
def train_stddev(self):
return [0.229 * 255, 0.224 * 255, 0.225 * 255]
def model_params_types(self):
pass
@property
def num_train_examples(self):
if 'imagenet2012' in self.dataset:
return 1271167
if 'imagenette' == self.dataset:
return 9469
@property
def num_eval_examples(self):
if 'imagenet2012' in self.dataset:
return 100000
if 'imagenette' == self.dataset:
return 3925
@property
def max_allowed_runtime_sec(self):
if 'imagenet2012' in self.dataset:
return 111600 # 31 hours
if 'imagenette' == self.dataset:
return 3600 # 60 minutes
@property
def eval_period_time_sec(self):
if 'imagenet2012' in self.dataset:
return 6000 # 100 mins
if 'imagenette' == self.dataset:
return 30 # 30 seconds
# Return whether or not a key in spec.ParameterContainer is the output layer
# parameters.
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
pass
def _build_dataset(self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
batch_size):
if batch_size % jax.device_count() > 0:
raise ValueError('Batch size must be divisible by the number of devices')
ds_builder = tfds.builder(self.dataset)
ds_builder.download_and_prepare()
ds = input_pipeline.create_input_iter(
ds_builder,
batch_size,
self.train_mean,
self.train_stddev,
train=True,
cache=False)
return ds
def build_input_queue(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
batch_size: int):
return iter(self._build_dataset(data_rng, split, data_dir, batch_size))
def sync_batch_stats(self, model_state):
"""Sync the batch statistics across replicas."""
# An axis_name is passed to pmap which can then be used by pmean.
# In this case each device has its own version of the batch statistics and
# we average them.
avg_fn = jax.pmap(lambda x: lax.pmean(x, 'x'), 'x')
new_model_state = model_state.copy({
'batch_stats': avg_fn(model_state['batch_stats'])})
return new_model_state
@property
def param_shapes(self):
if self._param_shapes is None:
raise ValueError('This should not happen, workload.init_model_fn() '
'should be called before workload.param_shapes!')
return self._param_shapes
def initialized(self, key, model):
input_shape = (1, 224, 224, 3)
variables = jax.jit(model.init)({'params': key}, jnp.ones(input_shape, model.dtype))
model_state, params = variables.pop('params')
return params, model_state
_InitState = Tuple[spec.ParameterContainer, spec.ModelAuxiliaryState]
def init_model_fn(
self,
rng: spec.RandomState) -> _InitState:
model_cls = getattr(models, self.model_name)
model = model_cls(num_classes=self.num_classes,
dtype=jnp.float32)
self._model = model
params, model_state = self.initialized(rng, model)
self._param_shapes = jax.tree_map(
lambda x: spec.ShapeTuple(x.shape),
params)
model_state = jax_utils.replicate(model_state)
params = jax_utils.replicate(params)
return params, model_state
# Keep this separate from the loss function in order to support optimizers
# that use the logits.
def output_activation_fn(
self,
logits_batch: spec.Tensor,
loss_type: spec.LossType) -> spec.Tensor:
"""Return the final activations of the model."""
pass
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, 0, 0, 0, None),
static_broadcasted_argnums=(0,))
def eval_model_fn(self, params, batch, state, rng):
logits, _ = self.model_fn(
params,
batch,
state,
spec.ForwardPassMode.EVAL,
rng,
update_batch_norm=False)
return self.compute_metrics(logits, batch['label'])
def model_fn(
self,
params: spec.ParameterContainer,
input_batch: spec.Tensor,
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
variables = {'params': params, **model_state}
train = mode == spec.ForwardPassMode.TRAIN
if update_batch_norm:
logits, new_model_state = self._model.apply(
variables,
jax.numpy.squeeze(input_batch['image']),
train=train,
mutable=['batch_stats'])
return logits, new_model_state
else:
logits = self._model.apply(
variables,
jax.numpy.squeeze(input_batch['image']),
train=train,
mutable=False)
return logits, None
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor,
logits_batch: spec.Tensor) -> spec.Tensor: # differentiable
"""Cross Entropy Loss"""
one_hot_labels = jax.nn.one_hot(label_batch,
num_classes=self.num_classes)
xentropy = optax.softmax_cross_entropy(logits=logits_batch,
labels=one_hot_labels)
return jnp.mean(xentropy)
def compute_metrics(self, logits, labels):
loss = self.loss_fn(labels, logits)
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
metrics = {
'loss': loss,
'accuracy': accuracy,
}
metrics = lax.pmean(metrics, axis_name='batch')
return metrics
def eval_model(
self,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str):
"""Run a full evaluation of the model."""
# sync batch statistics across replicas
model_state = self.sync_batch_stats(model_state)
eval_metrics = []
data_rng, model_rng = prng.split(rng, 2)
eval_batch_size = 200
num_batches = self.num_eval_examples // eval_batch_size
if self._eval_ds is None:
self._eval_ds = self._build_dataset(
data_rng, split='test', batch_size=eval_batch_size, data_dir=data_dir)
eval_iter = iter(self._eval_ds)
total_accuracy = 0.
for idx in range(num_batches):
batch = next(eval_iter)
synced_metrics = self.eval_model_fn(params, batch, model_state, rng)
eval_metrics.append(synced_metrics)
total_accuracy += jnp.mean(synced_metrics['accuracy'])
eval_metrics = jax.device_get(jax.tree_map(lambda x: x[0], eval_metrics))
eval_metrics = jax.tree_multimap(lambda *x: np.stack(x), *eval_metrics)
summary = jax.tree_map(lambda x: x.mean(), eval_metrics)
return summary
| 30.337079 | 88 | 0.678148 |
fa0a6f8a1cd3c1a7fa52ea01cb51473c928ee2a3 | 3,735 | py | Python | umap/tests/test_umap_ops.py | cjweir/umap | 60d6b7be30e5d9c40746dcf6052bec09478942b6 | [
"BSD-3-Clause"
] | 2 | 2020-04-23T14:07:52.000Z | 2020-04-27T14:34:19.000Z | umap/tests/test_umap_ops.py | cjweir/umap | 60d6b7be30e5d9c40746dcf6052bec09478942b6 | [
"BSD-3-Clause"
] | null | null | null | umap/tests/test_umap_ops.py | cjweir/umap | 60d6b7be30e5d9c40746dcf6052bec09478942b6 | [
"BSD-3-Clause"
] | 1 | 2021-01-12T08:07:03.000Z | 2021-01-12T08:07:03.000Z | # ===================================================
# UMAP Fit and Transform Operations Test cases
# (not really fitting anywhere else)
# ===================================================
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from sklearn.preprocessing import normalize
from nose.tools import assert_equal, assert_less, assert_raises
from numpy.testing import assert_array_equal
from umap import UMAP
import numpy as np
# Transform isn't stable under batching; hard to opt out of this.
# @SkipTest
# def test_scikit_learn_compatibility():
# check_estimator(UMAP)
# This test is currently to expensive to run when turning
# off numba JITting to detect coverage.
# @SkipTest
# def test_umap_regression_supervision(): # pragma: no cover
# boston = load_boston()
# data = boston.data
# embedding = UMAP(n_neighbors=10,
# min_dist=0.01,
# target_metric='euclidean',
# random_state=42).fit_transform(data, boston.target)
#
# Umap Clusterability
def test_blobs_cluster():
data, labels = make_blobs(n_samples=500, n_features=10, centers=5)
embedding = UMAP().fit_transform(data)
assert_equal(adjusted_rand_score(labels, KMeans(5).fit_predict(embedding)), 1.0)
# Multi-components Layout
def test_multi_component_layout():
data, labels = make_blobs(
100, 2, centers=5, cluster_std=0.5, center_box=[-20, 20], random_state=42
)
true_centroids = np.empty((labels.max() + 1, data.shape[1]), dtype=np.float64)
for label in range(labels.max() + 1):
true_centroids[label] = data[labels == label].mean(axis=0)
true_centroids = normalize(true_centroids, norm="l2")
embedding = UMAP(n_neighbors=4).fit_transform(data)
embed_centroids = np.empty((labels.max() + 1, data.shape[1]), dtype=np.float64)
embed_labels = KMeans(n_clusters=5).fit_predict(embedding)
for label in range(embed_labels.max() + 1):
embed_centroids[label] = data[embed_labels == label].mean(axis=0)
embed_centroids = normalize(embed_centroids, norm="l2")
error = np.sum((true_centroids - embed_centroids) ** 2)
assert_less(error, 15.0, msg="Multi component embedding to far astray")
# ---------------
# Umap Transform
# --------------
def test_bad_transform_data(nn_data):
u = UMAP().fit([[1, 1, 1, 1]])
assert_raises(ValueError, u.transform, [[0, 0, 0, 0]])
# Transform Stability
# -------------------
def test_umap_transform_embedding_stability(iris, iris_selection):
"""Test that transforming data does not alter the learned embeddings
Issue #217 describes how using transform to embed new data using a
trained UMAP transformer causes the fitting embedding matrix to change
in cases when the new data has the same number of rows as the original
training data.
"""
data = iris.data[iris_selection]
fitter = UMAP(n_neighbors=10, min_dist=0.01, random_state=42).fit(data)
original_embedding = fitter.embedding_.copy()
# The important point is that the new data has the same number of rows
# as the original fit data
new_data = np.random.random(data.shape)
_ = fitter.transform(new_data)
assert_array_equal(
original_embedding,
fitter.embedding_,
"Transforming new data changed the original embeddings",
)
# Example from issue #217
a = np.random.random((1000, 10))
b = np.random.random((1000, 5))
umap = UMAP()
u1 = umap.fit_transform(a[:, :5])
u1_orig = u1.copy()
assert_array_equal(u1_orig, umap.embedding_)
_ = umap.transform(b)
assert_array_equal(u1_orig, umap.embedding_)
| 32.478261 | 84 | 0.674431 |
20487ec8a56148d10391484016a8c173799c42c3 | 352 | py | Python | setup.py | rickie/hopla | 24a422194e42c03d5877dc167b2b07147326a595 | [
"Apache-2.0"
] | null | null | null | setup.py | rickie/hopla | 24a422194e42c03d5877dc167b2b07147326a595 | [
"Apache-2.0"
] | null | null | null | setup.py | rickie/hopla | 24a422194e42c03d5877dc167b2b07147326a595 | [
"Apache-2.0"
] | null | null | null | """
Module used for building hopla.
[quote](https://setuptools.readthedocs.io/en/latest/setuptools.html):
As PEP 517 is new, support is not universal, and frontends that do
support it may still have bugs. For compatibility, you may want to
put a setup.py file containing only a setuptools.setup() invocation.
"""
import setuptools
setuptools.setup()
| 29.333333 | 69 | 0.772727 |
49a04930c6114bd68ee8546b56176ba6df51046f | 1,186 | bzl | Python | ubuntu1804/revisions.bzl | Khungruk/base-images-docker | 652dc4752a7b75ac8217d0c6b4352fb6ea160dcb | [
"Apache-2.0"
] | null | null | null | ubuntu1804/revisions.bzl | Khungruk/base-images-docker | 652dc4752a7b75ac8217d0c6b4352fb6ea160dcb | [
"Apache-2.0"
] | null | null | null | ubuntu1804/revisions.bzl | Khungruk/base-images-docker | 652dc4752a7b75ac8217d0c6b4352fb6ea160dcb | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is autogenerated by the dependency update service and should not be modified directly.
# For more details, check the deps_spec.yaml file in the current folder.
UBUNTU1804_TAR = struct(
revision = "1579123927",
sha256 = "e879cb4a02aee37f2b25df363aae3cb3c76fc92c7c9d9ae965192ffa99c55cfd",
)
DEBS_TARBALL = struct(
revision = "1575297074",
sha256 = "ac1597dbd48abe1779d2cb4b6596818224d39084c65e693cdfa5cd1737bc46cb",
)
RULES_DOCKER = struct(
commit = "3772262910d1ac63563e5f1758f07df1f7857442",
sha256 = "53adff9ab61c7722a8ab8695437493b7bb85bc8ea89bb164b67d965864fadc13",
)
| 37.0625 | 98 | 0.779933 |
f9230ab26d9636143ab4b67367d7e9e7f654f57e | 1,686 | py | Python | tests/test_egg.py | MJKirk/dynesty | 747a6eb9557b1a24b6164887a7d45c0b8fa3eb1f | [
"MIT"
] | null | null | null | tests/test_egg.py | MJKirk/dynesty | 747a6eb9557b1a24b6164887a7d45c0b8fa3eb1f | [
"MIT"
] | null | null | null | tests/test_egg.py | MJKirk/dynesty | 747a6eb9557b1a24b6164887a7d45c0b8fa3eb1f | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib
matplotlib.use('Agg')
import dynesty # noqa
"""
Run a series of basic tests of the 2d eggbox
"""
# seed the random number generator
np.random.seed(5647)
nlive = 1000
printing = False
# EGGBOX
# see 1306.2144
def loglike_egg(x):
logl = ((2 + np.cos(x[0] / 2) * np.cos(x[1] / 2))**5)
return logl
def prior_transform_egg(x):
return x * 10 * np.pi
def test_bounds():
# stress test various boundaries
ndim = 2
for bound in ['multi', 'balls', 'cubes']:
sampler = dynesty.NestedSampler(loglike_egg,
prior_transform_egg,
ndim,
nlive=nlive,
bound=bound,
sample='unif')
sampler.run_nested(dlogz=0.01, print_progress=printing)
logz_truth = 235.856
assert (abs(logz_truth - sampler.results.logz[-1]) <
5. * sampler.results.logzerr[-1])
def test_ellipsoids_bootstrap():
# stress test ellipsoid decompositions with bootstrap
ndim = 2
sampler = dynesty.NestedSampler(loglike_egg,
prior_transform_egg,
ndim,
nlive=nlive,
bound='multi',
sample='unif',
bootstrap=5)
sampler.run_nested(dlogz=0.01, print_progress=printing)
logz_truth = 235.856
assert (abs(logz_truth - sampler.results.logz[-1]) <
5. * sampler.results.logzerr[-1])
| 28.576271 | 63 | 0.507117 |
af2932e3f50c70da0b6662126e0ec9f01f2573df | 2,499 | py | Python | marathon/util.py | criteo-forks/marathon-python | 1850734b5b916d1455416833f0aed239b308dd9f | [
"MIT"
] | 202 | 2015-01-07T06:55:14.000Z | 2021-11-30T12:05:50.000Z | marathon/util.py | criteo-forks/marathon-python | 1850734b5b916d1455416833f0aed239b308dd9f | [
"MIT"
] | 213 | 2015-03-04T12:54:09.000Z | 2022-02-28T11:36:28.000Z | marathon/util.py | criteo-forks/marathon-python | 1850734b5b916d1455416833f0aed239b308dd9f | [
"MIT"
] | 169 | 2015-02-06T16:48:05.000Z | 2022-01-18T16:20:56.000Z | # collections.abc new as of 3.3, and collections is deprecated. collections
# will be unavailable in 3.9
try:
import collections.abc as collections
except ImportError:
import collections
import datetime
import logging
try:
import json
except ImportError:
import simplejson as json
import re
def get_log():
return logging.getLogger(__name__.split('.')[0])
class MarathonJsonEncoder(json.JSONEncoder):
"""Custom JSON encoder for Marathon object serialization."""
def default(self, obj):
if hasattr(obj, 'json_repr'):
return self.default(obj.json_repr())
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
if isinstance(obj, collections.Iterable) and not isinstance(obj, str):
try:
return {k: self.default(v) for k, v in obj.items()}
except AttributeError:
return [self.default(e) for e in obj]
return obj
class MarathonMinimalJsonEncoder(json.JSONEncoder):
"""Custom JSON encoder for Marathon object serialization."""
def default(self, obj):
if hasattr(obj, 'json_repr'):
return self.default(obj.json_repr(minimal=True))
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
if isinstance(obj, collections.Iterable) and not isinstance(obj, str):
try:
return {k: self.default(v) for k, v in obj.items() if (v or v in (False, 0))}
except AttributeError:
return [self.default(e) for e in obj if (e or e in (False, 0))]
return obj
def to_camel_case(snake_str):
words = snake_str.split('_')
return words[0] + ''.join(w.capitalize() for w in words[1:])
def to_snake_case(camel_str):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel_str)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
DATETIME_FORMATS = [
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%SZ', # Marathon omits milliseconds when they would be .000
]
def to_datetime(timestamp):
if (timestamp is None or isinstance(timestamp, datetime.datetime)):
return timestamp
else:
for fmt in DATETIME_FORMATS:
try:
return datetime.datetime.strptime(timestamp, fmt).replace(tzinfo=datetime.timezone.utc)
except ValueError:
pass
raise ValueError(f'Unrecognized datetime format: {timestamp}')
| 28.397727 | 103 | 0.621449 |
b5b2a91b097891e20cc7019cb672621ebf2980ea | 9,391 | py | Python | homeassistant/components/cover/__init__.py | bartocc/home-assistant | 86f5f0226cc5647a04b0f7803a09b58ad1b51741 | [
"Apache-2.0"
] | 2 | 2020-06-17T01:23:01.000Z | 2020-06-18T22:17:14.000Z | homeassistant/components/cover/__init__.py | bartocc/home-assistant | 86f5f0226cc5647a04b0f7803a09b58ad1b51741 | [
"Apache-2.0"
] | 6 | 2021-02-08T21:02:40.000Z | 2022-03-12T00:52:16.000Z | homeassistant/components/cover/__init__.py | bartocc/home-assistant | 86f5f0226cc5647a04b0f7803a09b58ad1b51741 | [
"Apache-2.0"
] | 1 | 2019-08-13T11:54:30.000Z | 2019-08-13T11:54:30.000Z | """
Support for Cover devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover/
"""
from datetime import timedelta
import functools as ft
import logging
import voluptuous as vol
from homeassistant.loader import bind_hass
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
from homeassistant.components import group
from homeassistant.helpers import intent
from homeassistant.const import (
SERVICE_OPEN_COVER, SERVICE_CLOSE_COVER, SERVICE_SET_COVER_POSITION,
SERVICE_STOP_COVER, SERVICE_OPEN_COVER_TILT, SERVICE_CLOSE_COVER_TILT,
SERVICE_STOP_COVER_TILT, SERVICE_SET_COVER_TILT_POSITION, STATE_OPEN,
STATE_CLOSED, STATE_OPENING, STATE_CLOSING, ATTR_ENTITY_ID)
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'cover'
DEPENDENCIES = ['group']
SCAN_INTERVAL = timedelta(seconds=15)
GROUP_NAME_ALL_COVERS = 'all covers'
ENTITY_ID_ALL_COVERS = group.ENTITY_ID_FORMAT.format('all_covers')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
DEVICE_CLASSES = [
'damper',
'garage', # Garage door control
'window', # Window control
]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
SUPPORT_OPEN = 1
SUPPORT_CLOSE = 2
SUPPORT_SET_POSITION = 4
SUPPORT_STOP = 8
SUPPORT_OPEN_TILT = 16
SUPPORT_CLOSE_TILT = 32
SUPPORT_STOP_TILT = 64
SUPPORT_SET_TILT_POSITION = 128
ATTR_CURRENT_POSITION = 'current_position'
ATTR_CURRENT_TILT_POSITION = 'current_tilt_position'
ATTR_POSITION = 'position'
ATTR_TILT_POSITION = 'tilt_position'
INTENT_OPEN_COVER = 'HassOpenCover'
INTENT_CLOSE_COVER = 'HassCloseCover'
COVER_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
})
COVER_SET_COVER_POSITION_SCHEMA = COVER_SERVICE_SCHEMA.extend({
vol.Required(ATTR_POSITION):
vol.All(vol.Coerce(int), vol.Range(min=0, max=100)),
})
COVER_SET_COVER_TILT_POSITION_SCHEMA = COVER_SERVICE_SCHEMA.extend({
vol.Required(ATTR_TILT_POSITION):
vol.All(vol.Coerce(int), vol.Range(min=0, max=100)),
})
@bind_hass
def is_closed(hass, entity_id=None):
"""Return if the cover is closed based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_COVERS
return hass.states.is_state(entity_id, STATE_CLOSED)
async def async_setup(hass, config):
"""Track states and offer events for covers."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_COVERS)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_OPEN_COVER, COVER_SERVICE_SCHEMA,
'async_open_cover'
)
component.async_register_entity_service(
SERVICE_CLOSE_COVER, COVER_SERVICE_SCHEMA,
'async_close_cover'
)
component.async_register_entity_service(
SERVICE_SET_COVER_POSITION, COVER_SET_COVER_POSITION_SCHEMA,
'async_set_cover_position'
)
component.async_register_entity_service(
SERVICE_STOP_COVER, COVER_SERVICE_SCHEMA,
'async_stop_cover'
)
component.async_register_entity_service(
SERVICE_OPEN_COVER_TILT, COVER_SERVICE_SCHEMA,
'async_open_cover_tilt'
)
component.async_register_entity_service(
SERVICE_CLOSE_COVER_TILT, COVER_SERVICE_SCHEMA,
'async_close_cover_tilt'
)
component.async_register_entity_service(
SERVICE_STOP_COVER_TILT, COVER_SERVICE_SCHEMA,
'async_stop_cover_tilt'
)
component.async_register_entity_service(
SERVICE_SET_COVER_TILT_POSITION, COVER_SET_COVER_TILT_POSITION_SCHEMA,
'async_set_cover_tilt_position'
)
hass.helpers.intent.async_register(intent.ServiceIntentHandler(
INTENT_OPEN_COVER, DOMAIN, SERVICE_OPEN_COVER,
"Opened {}"))
hass.helpers.intent.async_register(intent.ServiceIntentHandler(
INTENT_CLOSE_COVER, DOMAIN, SERVICE_CLOSE_COVER,
"Closed {}"))
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class CoverDevice(Entity):
"""Representation a cover."""
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
pass
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt.
None is unknown, 0 is closed, 100 is fully open.
"""
pass
@property
def state(self):
"""Return the state of the cover."""
if self.is_opening:
return STATE_OPENING
if self.is_closing:
return STATE_CLOSING
closed = self.is_closed
if closed is None:
return None
return STATE_CLOSED if closed else STATE_OPEN
@property
def state_attributes(self):
"""Return the state attributes."""
data = {}
current = self.current_cover_position
if current is not None:
data[ATTR_CURRENT_POSITION] = self.current_cover_position
current_tilt = self.current_cover_tilt_position
if current_tilt is not None:
data[ATTR_CURRENT_TILT_POSITION] = self.current_cover_tilt_position
return data
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
if self.current_cover_position is not None:
supported_features |= SUPPORT_SET_POSITION
if self.current_cover_tilt_position is not None:
supported_features |= (
SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_STOP_TILT |
SUPPORT_SET_TILT_POSITION)
return supported_features
@property
def is_opening(self):
"""Return if the cover is opening or not."""
pass
@property
def is_closing(self):
"""Return if the cover is closing or not."""
pass
@property
def is_closed(self):
"""Return if the cover is closed or not."""
raise NotImplementedError()
def open_cover(self, **kwargs):
"""Open the cover."""
raise NotImplementedError()
def async_open_cover(self, **kwargs):
"""Open the cover.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.open_cover, **kwargs))
def close_cover(self, **kwargs):
"""Close cover."""
raise NotImplementedError()
def async_close_cover(self, **kwargs):
"""Close cover.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.close_cover, **kwargs))
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
pass
def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.set_cover_position, **kwargs))
def stop_cover(self, **kwargs):
"""Stop the cover."""
pass
def async_stop_cover(self, **kwargs):
"""Stop the cover.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.stop_cover, **kwargs))
def open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
pass
def async_open_cover_tilt(self, **kwargs):
"""Open the cover tilt.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.open_cover_tilt, **kwargs))
def close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
pass
def async_close_cover_tilt(self, **kwargs):
"""Close the cover tilt.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.close_cover_tilt, **kwargs))
def set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
pass
def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.set_cover_tilt_position, **kwargs))
def stop_cover_tilt(self, **kwargs):
"""Stop the cover."""
pass
def async_stop_cover_tilt(self, **kwargs):
"""Stop the cover.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.stop_cover_tilt, **kwargs))
| 29.164596 | 79 | 0.686189 |
11dbd9c4011ecc9a0ac4112e7cf3f873f07edc27 | 1,516 | py | Python | oneshottest.py | lijunyu159/An-automatic-model-to-measure-the-parameters-of-living-pores-based-on-Mask-R-CNN- | 01cc978a86530cac426fa298b2f66f6a8f7738a8 | [
"MIT"
] | 1 | 2021-01-06T14:10:22.000Z | 2021-01-06T14:10:22.000Z | oneshottest.py | lijunyu159/An-automatic-model-to-measure-the-parameters-of-living-pores-based-on-Mask-R-CNN- | 01cc978a86530cac426fa298b2f66f6a8f7738a8 | [
"MIT"
] | 1 | 2021-06-07T09:43:17.000Z | 2021-06-07T09:43:17.000Z | oneshottest.py | lijunyu159/An-automatic-model-to-measure-the-parameters-of-living-pores-based-on-Mask-R-CNN- | 01cc978a86530cac426fa298b2f66f6a8f7738a8 | [
"MIT"
] | null | null | null | import os
import sys
import math
import numpy as np
import cv2
import samples.balloon.mrcnn.model as modellib
import skimage.io
from samples.balloon import balloon
from samples.balloon.mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import random
# Root directory of the project
ROOT_DIR = os.path.abspath("./")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
# Directory to save logs and trained model
# MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco_0160.h5")
if not os.path.exists(MODEL_PATH):
print('no weights!')
# IMAGE_DIR = os.path.join(ROOT_DIR, "/samples/balloon/balloon/test1")
config = balloon.BalloonConfig()
config.display()
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_PATH, config=config)
model.load_weights(MODEL_PATH, by_name=True)
class_names = ['BG', 'stomatal']
# 原demo在使用图像之前已经对图像进行了裁剪 具体代码在model的1224行
# file_names = next(os.walk(IMAGE_DIR))[2]
image = skimage.io.imread(r"D:\poredataset\0024.jpg")
# image 是 3D图像 [image]是4-D列表 len([image]) = 1 [image]代表把整个图像作为一个batch处理
results = model.detect([image], verbose=1)
r = results[0]
# 生成实例可视化 r["rois"]是到图像上的位置坐标
# visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
# class_names, r['scores'])
| 28.603774 | 81 | 0.712401 |
d5118e3307391a85ecac9f94da1b1e017d02fbd2 | 1,204 | py | Python | mlflow/alembic/versions/451aebb31d03_add_metric_step.py | akarloff/mlflow | be9774a76b4b6dcdb8cc2147a93d7c8676438292 | [
"Apache-2.0"
] | 3 | 2019-10-07T01:12:25.000Z | 2020-07-06T04:27:51.000Z | mlflow/alembic/versions/451aebb31d03_add_metric_step.py | akarloff/mlflow | be9774a76b4b6dcdb8cc2147a93d7c8676438292 | [
"Apache-2.0"
] | 15 | 2019-10-07T01:11:46.000Z | 2022-03-08T23:33:53.000Z | mlflow/alembic/versions/451aebb31d03_add_metric_step.py | akarloff/mlflow | be9774a76b4b6dcdb8cc2147a93d7c8676438292 | [
"Apache-2.0"
] | 6 | 2019-11-28T13:23:35.000Z | 2020-07-08T19:22:12.000Z | """add metric step
Revision ID: 451aebb31d03
Revises:
Create Date: 2019-04-22 15:29:24.921354
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '451aebb31d03'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.add_column('metrics', sa.Column('step', sa.BigInteger(), nullable=False, server_default='0'))
# Use batch mode so that we can run "ALTER TABLE" statements against SQLite
# databases (see more info at https://alembic.sqlalchemy.org/en/latest/
# batch.html#running-batch-migrations-for-sqlite-and-other-databases)
with op.batch_alter_table("metrics") as batch_op:
batch_op.drop_constraint(constraint_name='metric_pk', type_="primary")
batch_op.create_primary_key(
constraint_name='metric_pk',
columns=['key', 'timestamp', 'step', 'run_uuid', 'value'])
def downgrade():
# This migration cannot safely be downgraded; once metric data with the same
# (key, timestamp, run_uuid, value) are inserted (differing only in their `step`), we cannot
# revert to a schema where (key, timestamp, run_uuid, value) is the metric primary key.
pass
| 33.444444 | 100 | 0.714286 |
3eca36e36496363d19994b5b1e07a9cc4a179793 | 83 | py | Python | docknv/image/__init__.py | sharingcloud/docknv | 6eec6a576a32cb05278b7af045f90859066c9f1d | [
"MIT"
] | null | null | null | docknv/image/__init__.py | sharingcloud/docknv | 6eec6a576a32cb05278b7af045f90859066c9f1d | [
"MIT"
] | null | null | null | docknv/image/__init__.py | sharingcloud/docknv | 6eec6a576a32cb05278b7af045f90859066c9f1d | [
"MIT"
] | null | null | null | """Image handler."""
from .methods import * # noqa
from .models import * # noqa
| 16.6 | 30 | 0.638554 |
161db334175f70d109375ffb8eb9fb8473487f31 | 776 | py | Python | 713. Subarray Product Less Than K/solution1.py | sunshot/LeetCode | 8f6503201831055f1d49ed3abb25be44a13ec317 | [
"MIT"
] | null | null | null | 713. Subarray Product Less Than K/solution1.py | sunshot/LeetCode | 8f6503201831055f1d49ed3abb25be44a13ec317 | [
"MIT"
] | null | null | null | 713. Subarray Product Less Than K/solution1.py | sunshot/LeetCode | 8f6503201831055f1d49ed3abb25be44a13ec317 | [
"MIT"
] | null | null | null | from typing import List
class Solution:
def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:
if k <= 0:
return 0
ans = 0
left = 0
right = 0
prod = 1
for right in range(len(nums)):
prod *= nums[right]
if prod < k:
ans += right - left + 1
else:
while prod >= k and left <= right:
prod = prod // nums[left]
left += 1
if left <= right and prod < k:
ans += right - left + 1
return ans
if __name__== '__main__':
solution = Solution()
nums = [10,5,2,6]
k = 10
ans = solution.numSubarrayProductLessThanK(nums, k)
print(ans == 3) | 27.714286 | 74 | 0.460052 |
c4f75cba514675188fa95386d9dd8eaff59db203 | 2,605 | py | Python | examples/AdafruitDHT.py | a0933732177/Adafruit_Python_DHT | 6ada1625465e59dc706727d38ef9d2d144e7b920 | [
"MIT"
] | null | null | null | examples/AdafruitDHT.py | a0933732177/Adafruit_Python_DHT | 6ada1625465e59dc706727d38ef9d2d144e7b920 | [
"MIT"
] | null | null | null | examples/AdafruitDHT.py | a0933732177/Adafruit_Python_DHT | 6ada1625465e59dc706727d38ef9d2d144e7b920 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import Adafruit_DHT
# Parse command line parameters.
sensor_args = { '11': Adafruit_DHT.DHT11,
'22': Adafruit_DHT.DHT22,
'2302': Adafruit_DHT.AM2302 }
if len(sys.argv) == 3 and sys.argv[1] in sensor_args:
sensor = sensor_args[sys.argv[1]]
pin = sys.argv[2]
else:
print('Usage: sudo ./Adafruit_DHT.py [11|22|2302] <GPIO pin number>')
print('Example: sudo ./Adafruit_DHT.py 2302 4 - Read from an AM2302 connected to GPIO pin #4')
sys.exit(1)
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# Un-comment the line below to convert the temperature to Fahrenheit.
# temperature = temperature * 9/5.0 + 32
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}* Humidity={1:0.1f}%'.format(temperature, humidity))
else:
print('Failed to get reading. Try again!')
sys.exit(1)
while True:
h0, t0 = Adafruit_DHT.read_retry(sensor, pin)
if h0 is not None and t0 is not None:
print('Temp={0:0.1f}* Humidity={1:0.1f}%'.format(t0, h0))
else:
print('Failed to get reading. Try again!')
sys.exit(1)
| 42.016129 | 98 | 0.723992 |
59287b9e451224e2f987ac1a5c9982089f94ba86 | 6,854 | py | Python | earthquakes/postgres.py | conradhilley/earthquakes | ac641b080b634f56d1099e1705f2b2bb2acb565c | [
"MIT"
] | null | null | null | earthquakes/postgres.py | conradhilley/earthquakes | ac641b080b634f56d1099e1705f2b2bb2acb565c | [
"MIT"
] | null | null | null | earthquakes/postgres.py | conradhilley/earthquakes | ac641b080b634f56d1099e1705f2b2bb2acb565c | [
"MIT"
] | null | null | null | """
Purpose: Postgres Database update and helper functions
Author: Conrad Hilley (conradhilley@gmail.com)
"""
# TODO create table/cursor classes and add common commands as methods
# TODO build base cursor class
import json
import psycopg2
import psycopg2.extras
from config import config
from earthquakes import usgs
# Commands used when updating gdb
UPDATE_CMDS = {'update_point_geom':
"""UPDATE {table} SET geometry = ST_SetSRID(
ST_MakePoint({table}.longitude,
{table}.latitude), 4326);""",
'update_utc_time':
"""UPDATE {table} SET utc_time =
to_timestamp({table}.time/1000);"""}
# Commonly used sql commands
SQL_CMDS = {'estimate_row_count':
"""SELECT reltuples AS approximate_row_count FROM pg_class
WHERE relname = '{table}';""",
'count_rows':
"""SELECT count(*) FROM {table};"""
}
class PostgresDB(object):
def __init__(self, config_file='database.ini', section='postgresql'):
self.params = config(config_file=config_file, section=section)
self.conn = None
def execute(self, sql):
cursor = self.conn.cursor()
cursor.execute(sql)
return cursor
def __enter__(self):
try:
self.conn = psycopg2.connect(**self.params)
self.cursor = self.conn.cursor()
except ConnectionError('Invalid connection parameters'):
self.conn = None
finally:
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.conn:
self.conn.close()
class SearchCursor(object):
def __init__(self, conn, table, columns=(), query=None, chunk_size=100000,
as_dict=False):
self.as_dict = as_dict
self.chunk_size = chunk_size
self.cursor = conn.cursor(
cursor_factory=psycopg2.extras.NamedTupleCursor)
# table and column validation
self.table = table
self.search_cols = columns
self.avail_cols = self._columns()
# verify column type and presence
if self.search_cols:
if isinstance(self.search_cols, str):
self.search_cols = (self.search_cols, )
for col in self.search_cols:
if col not in self.avail_cols:
raise KeyError('Column ({}) not in {}'.format(col,
self.table))
else:
self.search_cols = ('*', )
# build sql
self.sql = """SELECT {cols} from {table}""".format(
cols=', '.join(map(str, self.search_cols)),
table=self.table)
# add query
self.query = query
if self.query:
self.sql = '{sql} WHERE {query};'.format(sql=self.sql,
query=self.query)
else:
self.sql += ';'
def _columns(self):
try:
self.cursor.execute("SELECT * FROM {table} LIMIT 0".format(
table=self.table))
return [desc[0] for desc in self.cursor.description]
except:
raise KeyError('Table ({}) not in database'.format(self.table))
def __iter__(self):
while True:
self.records = self.cursor.fetchmany(self.chunk_size)
if not self.records:
break
for rec in self.records:
# If all records yield as dict or named tuple
if self.search_cols == ('*', ):
if self.as_dict:
yield rec._asdict()
else:
yield rec
else:
# Will need dict for all other access methods
rec_dict = rec._asdict()
if self.as_dict:
yield rec_dict
else:
yield tuple([rec_dict[c] for c in self.search_cols])
def __enter__(self):
try:
self.cursor.execute(self.sql)
self.records = None
except ConnectionError('Invalid cursor parameters'):
pass
finally:
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.cursor:
self.cursor.close()
def update_usgs_data(table='earthquakes'):
# Request data from USGS
usgs_data = usgs.USGSSummary()
r = usgs_data.request_data()
# Parse earthquake json
geojson_dict = json.loads(r.text)
# Open connection to db and activate cursor
with PostgresDB() as db:
with db.cursor as cursor:
cursor.execute(SQL_CMDS['count_rows'].format(table='earthquakes'))
init_cnt = cursor.fetchone()[0]
# Iterate over features, add if not previously existing
for cnt, quake in enumerate(geojson_dict['features']):
# Add USGS ID Attribute
quake['properties']['usgs_id'] = quake['id']
# Read latitude and longitude from coodinates
quake['properties']['longitude'] = \
quake['geometry']['coordinates'][0]
quake['properties']['latitude'] = \
quake['geometry']['coordinates'][1]
# Add properties to earthquakes table
p_keys = quake['properties'].keys()
# Build sql for psycopg2 execute method
placeholders = ', '.join(["%s" for _ in p_keys])
sql = "INSERT INTO {table} ({columns}) " \
"VALUES ({values}) " \
"ON CONFLICT DO NOTHING;".format(table=table,
columns=', '.join(
p_keys),
values=placeholders)
values = [quake['properties'][k] for k in p_keys]
# Insert record
cursor.execute(sql, values)
db.conn.commit()
# Final Count
cursor.execute(SQL_CMDS['count_rows'].format(table='earthquakes'))
final_cnt = cursor.fetchone()[0]
print(' - {} rows added ({} -> {})'.format(
final_cnt - init_cnt, init_cnt, final_cnt))
def main():
print('Reading data from USGS, inserting new records')
update_usgs_data()
for cmd, sql in UPDATE_CMDS.items():
print('\n - {}'.format(cmd))
with PostgresDB() as db:
with db.cursor as cursor:
cursor.execute(sql.format(table='earthquakes'))
db.conn.commit()
if __name__ == '__main__':
main()
| 32.330189 | 78 | 0.524074 |
a91e1b5f05f553d328086a8fa62223417904e812 | 1,993 | py | Python | app/account/tests/factories.py | rogeriopaulos/gep | e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2 | [
"MIT"
] | null | null | null | app/account/tests/factories.py | rogeriopaulos/gep | e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2 | [
"MIT"
] | 2 | 2021-09-02T04:22:45.000Z | 2021-09-02T04:52:26.000Z | app/account/tests/factories.py | rogeriopaulos/gep | e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2 | [
"MIT"
] | 1 | 2021-09-15T02:16:38.000Z | 2021-09-15T02:16:38.000Z | from random import choice
from account.models import Cargo, Orgao, Profile
from account.utils import CARGOS, INSTITUICOES
from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from factory import DjangoModelFactory, Faker, Sequence, SubFactory, django
User = get_user_model()
@django.mute_signals(post_save)
class UserFactory(DjangoModelFactory):
class Meta:
model = User
id = Sequence(lambda x: 12345+x)
username = Sequence(lambda x: f'agent{x}')
first_name = Faker('first_name', locale='pt_BR')
last_name = Faker('last_name', locale='pt_BR')
email = Faker('ascii_free_email', locale='pt_BR')
password = Faker('password', locale='pt_BR', length=12, digits=True, upper_case=True, lower_case=True)
class OrgaoFactory(DjangoModelFactory):
class Meta:
model = Orgao
orgao = choice(INSTITUICOES)[1]
sigla = Faker('word')
class CargoFactory(DjangoModelFactory):
class Meta:
model = Cargo
orgao = SubFactory(OrgaoFactory)
cargo = choice(CARGOS)[1]
@django.mute_signals(post_save)
class ProfileFactory(DjangoModelFactory):
class Meta:
model = Profile
user = SubFactory('account.tests.factories.UserFactory')
nascimento = Faker('date_of_birth', minimum_age=18, maximum_age=80)
orgao_link = SubFactory('account.tests.factories.OrgaoFactory')
cargo_link = SubFactory('account.tests.factories.CargoFactory')
lotacao = Faker('word')
funcao = Faker('word')
matricula = Faker('random_number', locale='pt_BR', digits=9)
cpf = Faker('cpf', locale='pt_BR')
identidade = Faker('rg', locale='pt_BR')
org_identidade = Faker('lexify', text='???', letters='spiaue')
cel_funcional = Faker('lexify', text='(8?)9????-????', letters='1234567890')
cel_pessoal = Faker('lexify', text='(8?)9????-????', letters='1234567890')
endereco = Faker('street_address', locale='pt_BR')
cep = Faker('postcode', locale='pt_BR')
| 29.308824 | 106 | 0.696939 |
aac48c3ccb54005ee1f4e16d51c062f5cadf61c9 | 2,440 | py | Python | src/accounts/views.py | aminhp93/learning_python | f708a61c78e26061cf8b7fa5b1262fb0eea330d7 | [
"MIT"
] | null | null | null | src/accounts/views.py | aminhp93/learning_python | f708a61c78e26061cf8b7fa5b1262fb0eea330d7 | [
"MIT"
] | 7 | 2020-02-11T23:32:33.000Z | 2022-03-11T23:15:53.000Z | src/accounts/views.py | aminhp93/learning_python | f708a61c78e26061cf8b7fa5b1262fb0eea330d7 | [
"MIT"
] | null | null | null | from django.contrib.auth import (authenticate, get_user_model, login, logout)
from django.core.mail import send_mail
from django.conf import settings
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.urls import reverse
from learning_python.utils import generate_confirmation_token, confirm_token
from .forms import UserLoginForm, UserRegisterForm
User = get_user_model()
def login_view(request):
next = request.GET.get('next')
title = "Login"
form = UserLoginForm(request.POST or None)
if form.is_valid():
email = form.cleaned_data.get("email")
password = form.cleaned_data.get('password')
user = authenticate(email=email, password=password)
login(request, user)
if next:
return redirect(next)
return redirect("/")
return render(request, "accounts/form.html", {"form":form, "title": title})
def register_view(request):
next = request.GET.get('next')
title = "Register"
form = UserRegisterForm(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
password = form.cleaned_data.get('password')
user.set_password(password)
user.save()
new_user = authenticate(email=user.email, password=password)
token = generate_confirmation_token(user.email)
confirm_url = "http://localhost:8000" + reverse("accounts:confirm_email", kwargs={"token":token})
# confirm_url = "http://localhost:8000/accounts/confirm_email/{}".format(token)
message = "TEST"
html_message = render_to_string('accounts/confirm_email.html', context={"confirm_url":confirm_url})
subject = "Please confirm your email"
send_mail(subject, message, settings.EMAIL_HOST_USER, [user.email], fail_silently=False, html_message=html_message)
login(request, new_user)
if next:
return redirect(next)
return redirect("/")
context = {
"form": form,
"title": title
}
return render(request, "accounts/form.html", context)
def confirm_email(request, token):
try:
email = confirm_token(token)
except:
print('The confirmation link is invalid or has expired.', 'danger')
user = User.objects.filter(email=email).first()
if user.is_activated:
print('Account already confirmed. Please login.', 'success')
else:
user.is_activated = True
user.save()
print('You have confirmed your account. Thanks!', 'success')
return render(request, "accounts/activated_email.html", {})
def logout_view(request):
logout(request)
return redirect("/") | 33.424658 | 117 | 0.74918 |
f0e2e6cfa5b7f39dea20d8c586bf38047042df10 | 2,903 | py | Python | tests/word_tokenizer/test_mecab_tokenizer.py | upura/konoha | 852fc9275ec9cf6d299a3dfc1cfa6ac5eaf166e5 | [
"MIT"
] | null | null | null | tests/word_tokenizer/test_mecab_tokenizer.py | upura/konoha | 852fc9275ec9cf6d299a3dfc1cfa6ac5eaf166e5 | [
"MIT"
] | null | null | null | tests/word_tokenizer/test_mecab_tokenizer.py | upura/konoha | 852fc9275ec9cf6d299a3dfc1cfa6ac5eaf166e5 | [
"MIT"
] | null | null | null | import pytest
from konoha.konoha_token import Token
from konoha.word_tokenizer import WordTokenizer
def test_word_tokenize_with_mecab():
try:
import natto
del natto
except ImportError:
pytest.skip("natto-py is not installed.")
tokenizer = WordTokenizer(tokenizer="MeCab")
expect = [Token(surface=w) for w in "吾輩 は 猫 で ある".split(" ")]
result = tokenizer.tokenize("吾輩は猫である")
assert expect == result
def test_word_tokenize_with_mecab_whitespace():
try:
import natto
del natto
except ImportError:
pytest.skip("natto-py is not installed.")
tokenizer = WordTokenizer(tokenizer="MeCab")
expect = [Token(surface=w) for w in "吾輩 は で ある".split(" ")]
result = tokenizer.tokenize("吾輩は である")
assert expect == result
"""
$ mecab
吾輩は猫である
吾輩 名詞,一般,*,*,*,*,吾輩,ワガハイ,ワガハイ
は 助詞,係助詞,*,*,*,*,は,ハ,ワ
猫 名詞,一般,*,*,*,*,猫,ネコ,ネコ
で 助動詞,*,*,*,特殊・ダ,連用形,だ,デ,デ
ある 助動詞,*,*,*,五段・ラ行アル,基本形,ある,アル,アル
EOS
"""
mecab_tokens_list = [
{
"surface": "吾輩",
"postag": "名詞",
"postag2": "代名詞",
"postag3": "一般",
"postag4": "*",
"inflection": "*",
"conjugation": "*",
"base_form": None,
"normalized_form": None,
"yomi": "ワガハイ",
"pron": "ワガハイ",
}, # NOQA
{
"surface": "は",
"postag": "助詞",
"postag2": "係助詞",
"postag3": "*",
"postag4": "*",
"inflection": "*",
"conjugation": "*",
"base_form": None,
"normalized_form": None,
"yomi": "ハ",
"pron": "ワ",
}, # NOQA
{
"surface": "猫",
"postag": "名詞",
"postag2": "一般",
"postag3": "*",
"postag4": "*",
"inflection": "*",
"conjugation": "*",
"base_form": None,
"normalized_form": None,
"yomi": "ネコ",
"pron": "ネコ",
}, # NOQA
{
"surface": "で",
"postag": "助動詞",
"postag2": "*",
"postag3": "*",
"postag4": "*",
"inflection": "特殊・ダ",
"conjugation": "連用形",
"base_form": None,
"normalized_form": None,
"yomi": "デ",
"pron": "デ",
}, # NOQA
{
"surface": "ある",
"postag": "助動詞",
"postag2": "*",
"postag3": "*",
"postag4": "*",
"inflection": "五段・ラ行アル",
"conjugation": "基本形",
"base_form": None,
"normalized_form": None,
"yomi": "アル",
"pron": "アル",
}, # NOQA
]
def test_postagging_with_mecab():
"""Test MeCab tokenizer."""
try:
tokenizer = WordTokenizer(tokenizer="mecab", with_postag=True)
except ImportError:
pytest.skip("natto-py is not installed.")
expect = [Token(**kwargs) for kwargs in mecab_tokens_list]
result = tokenizer.tokenize("吾輩は猫である")
assert expect == result
| 23.41129 | 70 | 0.497416 |
6dd2de26e85a8b3a931aeeb01b1f47eb5159bb45 | 9,385 | py | Python | odim/mongo.py | jhuseinovic/odim | 4efb468e3ef9f346a27f73f7b218eea6b807564d | [
"MIT"
] | null | null | null | odim/mongo.py | jhuseinovic/odim | 4efb468e3ef9f346a27f73f7b218eea6b807564d | [
"MIT"
] | null | null | null | odim/mongo.py | jhuseinovic/odim | 4efb468e3ef9f346a27f73f7b218eea6b807564d | [
"MIT"
] | null | null | null | import logging
import re
from datetime import datetime
from decimal import Decimal
from time import sleep
from typing import List, Optional, Union
import bson
from bson import ObjectId as BsonObjectId
from pydantic import Field
from functools import wraps, partial
import asyncio
from pymongo import MongoClient, errors
from pymongo import ASCENDING, DESCENDING
from odim import BaseOdimModel, NotFoundException, Odim, Operation, SearchParams, all_json_encoders
from odim.helper import awaited, get_connection_info
log = logging.getLogger("uvicorn")
client_connections = {}
def async_wrap(func):
@wraps(func)
async def run(*args, loop=None, executor=None, **kwargs):
if loop is None:
loop = asyncio.get_event_loop()
f = partial(func, *args, **kwargs)
return await loop.run_in_executor(executor, f)
return run
# @async_wrap
async def get_mongo_client(alias):
global client_connections
if alias not in client_connections:
cinf = get_connection_info(alias)
client_connections[alias] = MongoClient(cinf.url(withdb=False), cinf.port)[cinf.db]
return client_connections[alias]
class ObjectId(BsonObjectId):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if not BsonObjectId.is_valid(v):
raise ValueError('Invalid objectid')
return BsonObjectId(v)
@classmethod
def __modify_schema__(cls, field_schema):
field_schema.update(type='string')
class BaseMongoModel(BaseOdimModel):
id: Optional[ObjectId] = Field(alias='_id', description="Unique identifier of the object") #
class Config:
arbitrary_types_allowed = True
allow_population_by_field_name = True
# underscore_attrs_are_private = True
json_encoders = {
ObjectId: str,
BsonObjectId: str,
datetime: lambda dt: dt.isoformat(),
Decimal : float #im afraid im loosing precission here or we might output it as string?
}
all_json_encoders.update( BaseMongoModel.Config.json_encoders)
def convert_decimal(dict_item):
if dict_item is None:
return None
elif isinstance(dict_item, list):
l = []
for x in dict_item:
l.append(convert_decimal(x))
return l
elif isinstance(dict_item, dict):
nd = {}
for k, v in list(dict_item.items()):
nd[k] = convert_decimal(v)
return nd
elif isinstance(dict_item, Decimal):
return bson.Decimal128(str(dict_item))
else:
return dict_item
class OdimMongo(Odim):
protocols = ["mongo","mongodb"]
@property
def get_collection_name(self):
if hasattr(self.model, 'Config'):
if hasattr(self.model.Config, 'collection_name'):
cn = self.model.Config.collection_name
return cn
return self.model.__class__.__name__
@property
async def __mongo(self):
# print('get mongo client', self.get_connection_identifier)
client = await get_mongo_client(self.get_connection_identifier)
# print('got mongo client', client)
return client[self.get_collection_name]
async def get(self, id : Union[str, ObjectId], extend_query : dict= {}, include_deleted : bool = False):
if isinstance(id, str):
id = ObjectId(id)
softdel = {self.softdelete(): False} if self.softdelete() and not include_deleted else {}
db = await self.__mongo
ext = self.get_parsed_query(extend_query)
qry = {"_id" : id, **softdel, **ext}
ret = db.find_one(qry)
if not ret:
raise NotFoundException()
ret = self.execute_hooks("pre_init", ret) # we send the DB Object into the PRE_INIT
x = self.model(**ret)
x = self.execute_hooks("post_init", x) # we send the Model Obj into the POST_INIT
return x
async def save(self, extend_query : dict= {}, include_deleted : bool = False) -> ObjectId:
if not self.instance:
raise AttributeError("Can not save, instance not specified ")#describe more how ti instantiate
iii = self.execute_hooks("pre_save", self.instance, created=(not self.instance.id))
dd = convert_decimal(iii.dict(by_alias=True))
if not self.instance.id:
dd["_id"] = BsonObjectId()
iii.id = dd["_id"]
self.instance.id = dd["_id"]
softdel = {self.softdelete(): False} if self.softdelete() else {}
db = await self.__mongo
ret = db.insert_one({**dd, **extend_query, **softdel})
created = True
else:
softdel = {self.softdelete(): False} if self.softdelete() and not include_deleted else {}
db = await self.__mongo
ret = db.replace_one({"_id": self.instance.id, **softdel, **self.get_parsed_query(extend_query)}, dd)
assert ret.modified_count > 0, "Not modified error"
created = False
iii = self.execute_hooks("post_save", iii, created=created)
return self.instance.id
async def update(self, extend_query : dict= {}, include_deleted : bool = False, only_fields : Optional[List['str']] = None):
''' Saves only the changed fields leaving other fields alone '''
iii = self.execute_hooks("pre_save", self.instance, created=False)
dd = convert_decimal(iii.dict(exclude_unset=True, by_alias=True))
if "_id" not in dd:
raise AttributeError("Can not update document without _id")
dd_id = dd["_id"]
if isinstance(dd_id, str):
dd_id = ObjectId(dd_id)
del dd["_id"]
if only_fields and len(only_fields)>0:
dd = dict([(key, val) for key, val in dd.items() if key in only_fields])
softdel = {self.softdelete(): False} if self.softdelete() and not include_deleted else {}
db = await self.__mongo
ret = db.find_one_and_update({"_id" : dd_id, **softdel, **self.get_parsed_query(extend_query)}, {"$set" : dd})
iii = self.execute_hooks("post_save", iii, created=False)
return ret
def get_parsed_query(self, query):
rsp = {}
for k, (op, v) in self.parse_query_operations(query).items():
#perhaps use model to ensure the search value is of correct type
if op == Operation.exact:
rsp[k] = v
elif op == Operation.isnot:
rsp[k] = { "$ne" : v}
elif op == Operation.contains:
rsp[k] = { "$regex" : '.*'+str(v)+'.*', "$options" : "i" }
elif op == Operation.gt:
rsp[k] = { "$gt" : v}
elif op == Operation.gte:
rsp[k] = { "$gte" : v}
elif op == Operation.lt:
rsp[k] = { "$lt" : v}
elif op == Operation.gt:
rsp[k] = { "$lte" : v}
elif op == Operation.null:
if v:
rsp["$or"] = [ {k : {"$exists" : False}}, {k : None} ]
else:
rsp["$and"] = [ {k : {"$exists" : True}}, {k: { "$ne" : None }} ]
return rsp
async def find(self, query: dict, params : SearchParams = None, include_deleted : bool = False, retries=0):
if self.softdelete() and not include_deleted:
query = {self.softdelete(): False, **query}
#TODO use projection on model to limit to only desired fields
find_params = {}
if params:
find_params["skip"] = params.offset
find_params["limit"] = params.limit
if params.sort not in (None,''):
find_params["sort"] = []
for so in params.sort.split(','):
if so[0] == "-":
find_params["sort"].append( (so[1:], DESCENDING) )
else:
find_params["sort"].append( (so, ASCENDING) )
query = self.get_parsed_query(query)
db = await self.__mongo
rsplist = []
try:
results = db.find(query, **find_params)
for x in results:
x2 = self.execute_hooks("pre_init", x)
m = self.model( **x2 )
rsplist.append( self.execute_hooks("post_init", m) )
return rsplist
except Exception as e:
if retries > 5:
raise
log.warn(f'Mongo Query returned an error, retrying find({query})! {e}')
sleep(.2)
return await self.find(query, params, include_deleted, retries=retries+1)
async def count(self, query : dict, include_deleted : bool = False, retries=0):
if self.softdelete() and not include_deleted:
query = {self.softdelete(): False, **query}
try:
db = await self.__mongo
c = db.count_documents(query)
return c
except Exception as e:
if retries > 5:
raise
log.warn(f'Mongo Query returned an error, retrying count({query})! {e}')
sleep(.2)
return await self.count(query, include_deleted, retries=retries+1)
async def delete(self, obj : Union[str, ObjectId, BaseMongoModel], extend_query : dict= {}, force_harddelete : bool = False):
if isinstance(obj, str):
d = {"_id" : ObjectId(obj)}
elif isinstance(obj, ObjectId):
d = {"_id" : obj}
else:
d = obj.dict()
d.update(self.get_parsed_query(extend_query))
softdelete = self.softdelete() and not force_harddelete
db = await self.__mongo
if self.has_hooks("pre_remove","post_remove"):
ret = db.find_one(d)
if not ret:
raise NotFoundException()
ret = self.execute_hooks("pre_init", ret)
x = self.model(**ret)
x = self.execute_hooks("post_init", x)
x = self.execute_hooks("pre_remove", x, softdelete=softdelete)
if softdelete:
rsp = db.find_one_and_update(d, {"$set": {self.softdelete(): True}})
else:
rsp = db.delete_one(d)
if self.has_hooks("post_remove"):
self.execute_hooks("post_remove", x, softdelete=softdelete)
return rsp
| 33.880866 | 127 | 0.6504 |
04bc4674f0f41fbd10f1214684859051beb0abfb | 1,068 | py | Python | drf_email_project/drf_email_project/urls.py | liquanhui01/drf_email_user | 188ecc3e36edb190cc1cd8f908d3b72c42d49301 | [
"MIT"
] | null | null | null | drf_email_project/drf_email_project/urls.py | liquanhui01/drf_email_user | 188ecc3e36edb190cc1cd8f908d3b72c42d49301 | [
"MIT"
] | null | null | null | drf_email_project/drf_email_project/urls.py | liquanhui01/drf_email_user | 188ecc3e36edb190cc1cd8f908d3b72c42d49301 | [
"MIT"
] | 1 | 2021-11-15T08:23:13.000Z | 2021-11-15T08:23:13.000Z | """drf_email_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('auth/', include('users.urls')),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
]
| 35.6 | 81 | 0.715356 |
37e0ee876b350d4580c518135a723dba75206e20 | 6,105 | py | Python | api/filters.py | raheemazeezabiodun/art-backend | 0bc47f3cf6f403101082f201c7fd1ca8108d5731 | [
"MIT"
] | null | null | null | api/filters.py | raheemazeezabiodun/art-backend | 0bc47f3cf6f403101082f201c7fd1ca8108d5731 | [
"MIT"
] | null | null | null | api/filters.py | raheemazeezabiodun/art-backend | 0bc47f3cf6f403101082f201c7fd1ca8108d5731 | [
"MIT"
] | null | null | null | # Standard Library
import functools
import logging
import operator
# Third-Party Imports
from django.db.models import Q
from django_filters import rest_framework as filters
# App Imports
from core.models import AllocationHistory, Asset, AssetLog, User
logger = logging.getLogger(__name__)
NULL_VALUE = "unspecified"
class BaseFilter(filters.FilterSet):
def filter_contains_with_multiple_query_values(self, queryset, name, value):
options = set(value.split(","))
null_lookup = {}
if NULL_VALUE in options:
options.remove(NULL_VALUE)
null_lookup = {"__".join([name, "isnull"]): True}
if options:
lookup = functools.reduce(
operator.or_,
{Q(**{"__".join([name, "icontains"]): item}) for item in options},
)
else:
lookup = Q(**{})
return queryset.filter(Q(lookup | Q(**null_lookup)))
def filter_exact_with_multiple_query_values(self, queryset, name, value):
options = set(value.split(","))
null_lookup = {}
if NULL_VALUE in options:
options.remove(NULL_VALUE)
null_lookup = {"__".join([name, "isnull"]): True}
lookup = {"__".join([name, "in"]): options}
return queryset.filter(Q(**lookup) | Q(**null_lookup))
class AssetFilter(BaseFilter):
email = filters.CharFilter(
field_name="assigned_to__user__email", lookup_expr="icontains"
)
model_number = filters.CharFilter(
field_name="model_number__name",
method="filter_contains_with_multiple_query_values",
)
serial_number = filters.CharFilter(
field_name="serial_number",
lookup_expr="icontains",
method="filter_contains_with_multiple_query_values",
)
asset_type = filters.CharFilter(
field_name="model_number__asset_make__asset_type__name",
method="filter_contains_with_multiple_query_values",
)
current_status = filters.CharFilter(
field_name="current_status", lookup_expr="iexact"
)
verified = filters.CharFilter(field_name="verified", lookup_expr="iexact")
department = filters.CharFilter(
field_name="assigned_to__department__id", lookup_expr="iexact"
)
class Meta:
model = Asset
fields = ["asset_type", "model_number", "email", "current_status", "verified"]
class AssetLogFilter(BaseFilter):
"""Filters asset logs by specified fields"""
# filters asset logs by asset type
asset_type = filters.CharFilter(
field_name="asset__model_number__asset_make__asset_type__name",
lookup_expr="iexact",
)
asset_serial = filters.CharFilter(
field_name="asset__serial_number", lookup_expr="iexact"
)
asset_code = filters.CharFilter(
field_name="asset__asset_code", lookup_expr="iexact"
)
# filters asset logs by year
year = filters.NumberFilter(field_name="created_at", lookup_expr="year")
# filters asset logs by month
month = filters.NumberFilter(field_name="created_at", lookup_expr="month")
# filters asset logs by day
day = filters.NumberFilter(field_name="created_at", lookup_expr="day")
# filters asset logs by user/owner of the asset
user = filters.CharFilter(
field_name="asset__assigned_to__user__email", lookup_expr="icontains"
)
# filter asset logs by checked_by
checked_by = filters.CharFilter(
field_name="checked_by__email", lookup_expr="icontains"
)
asset_category = filters.CharFilter(
field_name="asset__model_number__asset_make__asset_type__asset_sub_category__asset_category__name",
lookup_expr="iexact",
)
asset_sub_category = filters.CharFilter(
field_name="asset__model_number__asset_make__asset_type__asset_sub_category__name",
lookup_expr="iexact",
)
asset_make = filters.CharFilter(
field_name="asset__model_number__asset_make__name", lookup_expr="iexact"
)
class Meta:
model = AssetLog
fields = [
"asset_type",
"asset_serial",
"asset_code",
"user",
"checked_by",
"year",
"month",
"day",
"asset_category",
"asset_sub_category",
"asset_make",
]
class UserFilter(BaseFilter):
cohort = filters.CharFilter(
field_name="cohort",
lookup_expr="iexact",
method="filter_exact_with_multiple_query_values",
)
email = filters.CharFilter(field_name="email", lookup_expr="istartswith")
asset_count = filters.CharFilter(
field_name="allocated_asset_count",
label="Asset count",
lookup_expr="iexact",
method="filter_by_allocated_asset_count",
)
is_active = filters.CharFilter(field_name="is_active", lookup_expr="iexact")
def filter_by_allocated_asset_count(self, queryset, name, value):
users = [
user.id
for user in queryset
if str(user.assetassignee.asset_set.count()) in value.split(",")
]
return queryset.filter(id__in=users)
class Meta:
model = User
fields = ["cohort", "email", "asset_count"]
class AllocationsHistoryFilter(BaseFilter):
"""Filters the allocations"""
owner = filters.CharFilter(
field_name="current_owner__user__email", lookup_expr="icontains"
)
workspace = filters.CharFilter(
field_name="current_owner__workspace__id", lookup_expr="iexact"
)
department = filters.CharFilter(
field_name="current_owner__department__id", lookup_expr="iexact"
)
asset_serial_number = filters.CharFilter(
field_name="asset__serial_number", lookup_expr="iexact"
)
asset_code = filters.CharFilter(
field_name="asset__asset_code", lookup_expr="iexact"
)
class Meta:
model = AllocationHistory
fields = [
"owner",
"workspace",
"department",
"asset_serial_number",
"asset_code",
]
| 31.632124 | 107 | 0.653726 |
00696a2ff352b797b0251ccbe86fe70e9da2f415 | 488 | py | Python | decay_fns/decay_comparison.py | noelevans/playground | da529e967a15bcb217fff091ac0ec5c4dc1821ce | [
"MIT"
] | 1 | 2015-04-28T20:36:57.000Z | 2015-04-28T20:36:57.000Z | decay_fns/decay_comparison.py | noelevans/playground | da529e967a15bcb217fff091ac0ec5c4dc1821ce | [
"MIT"
] | 2 | 2021-02-03T21:05:54.000Z | 2021-03-23T09:25:43.000Z | decay_fns/decay_comparison.py | noelevans/playground | da529e967a15bcb217fff091ac0ec5c4dc1821ce | [
"MIT"
] | null | null | null | from matplotlib import pyplot as plt
import numpy as np
def main():
half_life = 5
x = np.linspace(0, 20, 100)
y_1 = 0.5 ** (x / half_life)
y_2 = np.e ** - (x * half_life)
label_1 = r'$y = (\frac{1}{2})^{x / t_{\frac{1}{2}}}$'
label_2 = r'$y = e^{x.t_{\frac{1}{2}}}$'
plt.plot(x, y_1, linewidth=2, label=label_1)
plt.plot(x, y_2, linewidth=2, label=label_2)
plt.legend(fontsize='xx-large')
plt.show()
if __name__ == '__main__':
main()
| 23.238095 | 58 | 0.559426 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.