code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from datetime import datetime, timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import EmptyPage
from django.http import JsonResponse
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import AllowAny
from common.models import Sign
from mmapi.serializers import sign
from utils.schema_view import DocParam
from utils import CustomSerialzer, CustomPagination, file_util, constants, common_util
class UploadPhoto(GenericAPIView):
"""
上传照片
"""
# 接口参数定义
core_api_fields = (
DocParam("x-token", "header", True, "用户登录Token", "string"),
DocParam("file", "formData", True, "文件", "file"),
)
# 默认序列化类
serializer_class = CustomSerialzer
# 权限判断类,不需要权限的接口,配置为[],或[AllowAny, ]
permission_classes = [AllowAny, ]
# 方法定义
def post(self, request):
form = file_util.UploadFileForm(request.POST, request.FILES) # 注意获取数据的方式
if form.is_valid():
(file_path, file_src_name, file_path2) = file_util.save_file(constants.sign_photo_path
, request.FILES['file']
, thumbnail=True, thumbnail_width=400)
data = {
"photo_url": file_path,
"thumbnail_url": file_path2
}
result = {"code": 1, "msg": "照片上传成功", "data": data}
else:
result = {"code": 0, "msg": "照片无效"}
return JsonResponse(result)
class DoSign(GenericAPIView):
"""
打卡
"""
# 接口参数定义
core_api_fields = (
DocParam("x-token", "header", True, "用户登录Token", "string"),
DocParam("lng", "formData", True, "经度", "number"),
DocParam("lat", "formData", True, "纬度", "number"),
DocParam("address", "formData", True, "地址", "string"),
DocParam("nation", "formData", True, "国家", "string"),
DocParam("province", "formData", True, "省份", "string"),
DocParam("city", "formData", True, "城市", "string"),
DocParam("district", "formData", True, "区县", "string"),
DocParam("street", "formData", True, "街道", "string"),
DocParam("street_number", "formData", True, "门牌", "string"),
DocParam("remark", "formData", True, "备注说明", "string"),
DocParam("photo_url", "formData", True, "照片地址", "string"),
DocParam("thumbnail_url", "formData", True, "照片缩略图地址", "string")
)
# 默认序列化类
serializer_class = CustomSerialzer
# 权限判断类,不需要权限的接口,配置为[],或[AllowAny, ]
permission_classes = [AllowAny, ]
# 方法定义
def post(self, request):
lng = float(request.POST.get("lng", "0"))
lat = float(request.POST.get("lat", "0"))
address = request.POST.get("address", "")
nation = request.POST.get("nation", "")
province = request.POST.get("province", "")
city = request.POST.get("city", "")
district = request.POST.get("district", "")
street = request.POST.get("street", "")
street_number = request.POST.get("street_number", "")
remark = request.POST.get("remark", "")
photo_url = request.POST.get("photo_url", "")
thumbnail_url = request.POST.get("thumbnail_url", "")
try:
sign_entity = Sign()
sign_entity.lng = lng
sign_entity.lat = lat
sign_entity.address = address
sign_entity.nation = nation
sign_entity.province = province
sign_entity.city = city
sign_entity.district = district
sign_entity.street = street
sign_entity.street_number = street_number
sign_entity.district = district
sign_entity.remark = remark
sign_entity.photo_url = photo_url
sign_entity.thumbnail_url = thumbnail_url
sign_entity.user_id = request.user
sign_entity.save()
result = {"code": 1, "msg": "打卡成功"}
except:
result = {"code": 0, "msg": "打卡失败"}
return JsonResponse(result)
class GetList(GenericAPIView):
"""
获取打卡列表
"""
# 默认查询记录集
queryset = Sign.objects.all().order_by("-id")
# 默认序列化类
serializer_class = CustomSerialzer
# 权限判断类,不需要权限的接口,配置为[],或[AllowAny, ]
permission_classes = [AllowAny, ]
# 分页类
pagination_class = CustomPagination
# 接口参数定义
core_api_fields = (
DocParam("current", "formData", True, "当前页码", "integer"),
DocParam("size", "formData", True, "每页记录条数", "integer"),
DocParam("query_date", "formData", False, "查询日期", "String"),
DocParam("query_type", "formData", False, "查询类型", "integer"),
)
# 方法定义
def post(self, request):
# 查询
query_date = request.POST.get("query_date", "")
query_type = int(request.POST.get("query_type", "0"))
queryset = self.get_queryset().filter(user_id=request.user)
if query_date:
begin_date = datetime.strptime(query_date, "%Y-%m-%d")
if query_type == 0: # 月份查询
end_date = common_util.get_first_day_of_next_month(begin_date)
else: # 日期查询
end_date = common_util.get_day(begin_date) + timedelta(days=1)
queryset = queryset.filter(create_time__gte=begin_date, create_time__lt=end_date)
try:
# 分页查询
page = self.paginate_queryset(queryset)
# 数据序列化
sr = sign.QuerySerialzer(instance=page, many=True)
paging_info = self.paginator.get_paging_info()
page = {
"size": paging_info["page_size"],
"current": paging_info["current_page"],
"total": paging_info["total_count"],
"pages": paging_info["total_pages"],
"records": sr.data,
}
data = {
"page": page
}
result = {"code": 1, "msg": "查询成功", "data": data}
except EmptyPage: # 空页,查询页码大于现有页码
result = {"code": 0, "msg": "查询失败,已经是最后一页了"}
return JsonResponse(result)
class GetDetail(GenericAPIView):
"""
获取打卡信息
"""
# 默认序列化类
serializer_class = CustomSerialzer
# 权限判断类,不需要权限的接口,配置为[],或[AllowAny, ]
permission_classes = [AllowAny, ]
# 接口参数定义
core_api_fields = (
DocParam("x-token", "header", True, "用户登录Token", "string"),
DocParam("id", "query", False, "id", "integer"),
)
# 方法定义
def get(self, request):
id = int(request.GET.get("id", "-1"))
sign_entity = Sign.objects.select_related("user").filter(id=id).first()
if sign_entity:
data = {
"id": sign_entity.id,
"lng": sign_entity.lng,
"lat": sign_entity.lat,
"address": sign_entity.address,
"photo_url": sign_entity.photo_url,
"thumbnail_url": sign_entity.thumbnail_url,
"remark": sign_entity.remark,
"openid": sign_entity.user.openid,
"create_time": sign_entity.create_time.strftime("%Y-%m-%d %H:%M:%S"),
}
result = {"code": 1, "msg": "查询成功", "data": data}
else:
result = {"code": 0, "msg": "打卡信息不存在"}
return JsonResponse(result)
class Delete(GenericAPIView):
"""
删除打卡信息
"""
# 接口参数定义
core_api_fields = (
DocParam("x-token", "header", True, "用户登录Token", "string"),
DocParam("id", "query", True, "id", "integer"),
)
# 默认序列化类
serializer_class = CustomSerialzer
# 权限判断类,不需要权限的接口,配置为[],或[AllowAny, ]
permission_classes = [AllowAny, ]
# 方法定义
def get(self, request):
id = int(request.GET.get("id", "0"))
try:
Sign.objects.get(id=id).delete()
result = {"code": 1, "msg": "打卡信息删除成功"}
except ObjectDoesNotExist:
result = {"code": 0, "msg": "打卡信息不存在"}
return JsonResponse(result)
class GetCount(GenericAPIView):
"""
获取打卡数
"""
# 接口参数定义
core_api_fields = (
DocParam("x-token", "header", True, "用户登录Token", "string"),
DocParam("query_date", "formData", False, "查询日期", "String"),
)
# 默认序列化类
serializer_class = CustomSerialzer
# 权限判断类,不需要权限的接口,配置为[],或[AllowAny, ]
permission_classes = [AllowAny, ]
# 方法定义
def post(self, request):
# 查询
query_date = request.POST.get("query_date", "")
print(query_date)
queryset = Sign.objects.filter(user_id=request.user)
if query_date:
begin_date = datetime.strptime(query_date, "%Y-%m-%d")
end_date = common_util.get_day(begin_date) + timedelta(days=1)
queryset = queryset.filter(create_time__gte=begin_date, create_time__lt=end_date)
data_count = queryset.count()
data = {
"data_count": data_count
}
result = {"code": 1, "msg": "查询成功", "data": data}
return JsonResponse(result) | mmapi/views/sign.py | from datetime import datetime, timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import EmptyPage
from django.http import JsonResponse
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import AllowAny
from common.models import Sign
from mmapi.serializers import sign
from utils.schema_view import DocParam
from utils import CustomSerialzer, CustomPagination, file_util, constants, common_util
class UploadPhoto(GenericAPIView):
"""
上传照片
"""
# 接口参数定义
core_api_fields = (
DocParam("x-token", "header", True, "用户登录Token", "string"),
DocParam("file", "formData", True, "文件", "file"),
)
# 默认序列化类
serializer_class = CustomSerialzer
# 权限判断类,不需要权限的接口,配置为[],或[AllowAny, ]
permission_classes = [AllowAny, ]
# 方法定义
def post(self, request):
form = file_util.UploadFileForm(request.POST, request.FILES) # 注意获取数据的方式
if form.is_valid():
(file_path, file_src_name, file_path2) = file_util.save_file(constants.sign_photo_path
, request.FILES['file']
, thumbnail=True, thumbnail_width=400)
data = {
"photo_url": file_path,
"thumbnail_url": file_path2
}
result = {"code": 1, "msg": "照片上传成功", "data": data}
else:
result = {"code": 0, "msg": "照片无效"}
return JsonResponse(result)
class DoSign(GenericAPIView):
"""
打卡
"""
# 接口参数定义
core_api_fields = (
DocParam("x-token", "header", True, "用户登录Token", "string"),
DocParam("lng", "formData", True, "经度", "number"),
DocParam("lat", "formData", True, "纬度", "number"),
DocParam("address", "formData", True, "地址", "string"),
DocParam("nation", "formData", True, "国家", "string"),
DocParam("province", "formData", True, "省份", "string"),
DocParam("city", "formData", True, "城市", "string"),
DocParam("district", "formData", True, "区县", "string"),
DocParam("street", "formData", True, "街道", "string"),
DocParam("street_number", "formData", True, "门牌", "string"),
DocParam("remark", "formData", True, "备注说明", "string"),
DocParam("photo_url", "formData", True, "照片地址", "string"),
DocParam("thumbnail_url", "formData", True, "照片缩略图地址", "string")
)
# 默认序列化类
serializer_class = CustomSerialzer
# 权限判断类,不需要权限的接口,配置为[],或[AllowAny, ]
permission_classes = [AllowAny, ]
# 方法定义
def post(self, request):
lng = float(request.POST.get("lng", "0"))
lat = float(request.POST.get("lat", "0"))
address = request.POST.get("address", "")
nation = request.POST.get("nation", "")
province = request.POST.get("province", "")
city = request.POST.get("city", "")
district = request.POST.get("district", "")
street = request.POST.get("street", "")
street_number = request.POST.get("street_number", "")
remark = request.POST.get("remark", "")
photo_url = request.POST.get("photo_url", "")
thumbnail_url = request.POST.get("thumbnail_url", "")
try:
sign_entity = Sign()
sign_entity.lng = lng
sign_entity.lat = lat
sign_entity.address = address
sign_entity.nation = nation
sign_entity.province = province
sign_entity.city = city
sign_entity.district = district
sign_entity.street = street
sign_entity.street_number = street_number
sign_entity.district = district
sign_entity.remark = remark
sign_entity.photo_url = photo_url
sign_entity.thumbnail_url = thumbnail_url
sign_entity.user_id = request.user
sign_entity.save()
result = {"code": 1, "msg": "打卡成功"}
except:
result = {"code": 0, "msg": "打卡失败"}
return JsonResponse(result)
class GetList(GenericAPIView):
"""
获取打卡列表
"""
# 默认查询记录集
queryset = Sign.objects.all().order_by("-id")
# 默认序列化类
serializer_class = CustomSerialzer
# 权限判断类,不需要权限的接口,配置为[],或[AllowAny, ]
permission_classes = [AllowAny, ]
# 分页类
pagination_class = CustomPagination
# 接口参数定义
core_api_fields = (
DocParam("current", "formData", True, "当前页码", "integer"),
DocParam("size", "formData", True, "每页记录条数", "integer"),
DocParam("query_date", "formData", False, "查询日期", "String"),
DocParam("query_type", "formData", False, "查询类型", "integer"),
)
# 方法定义
def post(self, request):
# 查询
query_date = request.POST.get("query_date", "")
query_type = int(request.POST.get("query_type", "0"))
queryset = self.get_queryset().filter(user_id=request.user)
if query_date:
begin_date = datetime.strptime(query_date, "%Y-%m-%d")
if query_type == 0: # 月份查询
end_date = common_util.get_first_day_of_next_month(begin_date)
else: # 日期查询
end_date = common_util.get_day(begin_date) + timedelta(days=1)
queryset = queryset.filter(create_time__gte=begin_date, create_time__lt=end_date)
try:
# 分页查询
page = self.paginate_queryset(queryset)
# 数据序列化
sr = sign.QuerySerialzer(instance=page, many=True)
paging_info = self.paginator.get_paging_info()
page = {
"size": paging_info["page_size"],
"current": paging_info["current_page"],
"total": paging_info["total_count"],
"pages": paging_info["total_pages"],
"records": sr.data,
}
data = {
"page": page
}
result = {"code": 1, "msg": "查询成功", "data": data}
except EmptyPage: # 空页,查询页码大于现有页码
result = {"code": 0, "msg": "查询失败,已经是最后一页了"}
return JsonResponse(result)
class GetDetail(GenericAPIView):
"""
获取打卡信息
"""
# 默认序列化类
serializer_class = CustomSerialzer
# 权限判断类,不需要权限的接口,配置为[],或[AllowAny, ]
permission_classes = [AllowAny, ]
# 接口参数定义
core_api_fields = (
DocParam("x-token", "header", True, "用户登录Token", "string"),
DocParam("id", "query", False, "id", "integer"),
)
# 方法定义
def get(self, request):
id = int(request.GET.get("id", "-1"))
sign_entity = Sign.objects.select_related("user").filter(id=id).first()
if sign_entity:
data = {
"id": sign_entity.id,
"lng": sign_entity.lng,
"lat": sign_entity.lat,
"address": sign_entity.address,
"photo_url": sign_entity.photo_url,
"thumbnail_url": sign_entity.thumbnail_url,
"remark": sign_entity.remark,
"openid": sign_entity.user.openid,
"create_time": sign_entity.create_time.strftime("%Y-%m-%d %H:%M:%S"),
}
result = {"code": 1, "msg": "查询成功", "data": data}
else:
result = {"code": 0, "msg": "打卡信息不存在"}
return JsonResponse(result)
class Delete(GenericAPIView):
"""
删除打卡信息
"""
# 接口参数定义
core_api_fields = (
DocParam("x-token", "header", True, "用户登录Token", "string"),
DocParam("id", "query", True, "id", "integer"),
)
# 默认序列化类
serializer_class = CustomSerialzer
# 权限判断类,不需要权限的接口,配置为[],或[AllowAny, ]
permission_classes = [AllowAny, ]
# 方法定义
def get(self, request):
id = int(request.GET.get("id", "0"))
try:
Sign.objects.get(id=id).delete()
result = {"code": 1, "msg": "打卡信息删除成功"}
except ObjectDoesNotExist:
result = {"code": 0, "msg": "打卡信息不存在"}
return JsonResponse(result)
class GetCount(GenericAPIView):
"""
获取打卡数
"""
# 接口参数定义
core_api_fields = (
DocParam("x-token", "header", True, "用户登录Token", "string"),
DocParam("query_date", "formData", False, "查询日期", "String"),
)
# 默认序列化类
serializer_class = CustomSerialzer
# 权限判断类,不需要权限的接口,配置为[],或[AllowAny, ]
permission_classes = [AllowAny, ]
# 方法定义
def post(self, request):
# 查询
query_date = request.POST.get("query_date", "")
print(query_date)
queryset = Sign.objects.filter(user_id=request.user)
if query_date:
begin_date = datetime.strptime(query_date, "%Y-%m-%d")
end_date = common_util.get_day(begin_date) + timedelta(days=1)
queryset = queryset.filter(create_time__gte=begin_date, create_time__lt=end_date)
data_count = queryset.count()
data = {
"data_count": data_count
}
result = {"code": 1, "msg": "查询成功", "data": data}
return JsonResponse(result) | 0.268462 | 0.143427 |
import threading
from contextlib import contextmanager
from geobox.model.tasks import Task
from geobox.utils import join_threads
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
class ProcessThread(threading.Thread):
def __init__(self, app_state, task_class_mapping, task_process_mapping):
threading.Thread.__init__(self)
self.daemon = True
self.app_state = app_state
self.background_threads = {}
self.task_process_mapping = task_process_mapping
self.task_classes = task_class_mapping.values()
self.concurrency = 2
def run(self):
self.cleanup_old_tasks()
while not self.app_state.wait_for_app_shutdown(timeout=2):
self.check_running_tasks()
self.check_new_tasks()
self.stop_running_tasks()
def shutdown(self):
pass
def cleanup_old_tasks(self):
session = self.app_state.user_db_session()
query = session.query(Task).with_polymorphic(self.task_classes)
query = query.filter(Task.is_running == True)
for task in query:
task.is_running = False
session.commit()
def check_new_tasks(self):
free_task_slots = self.concurrency - len(self.background_threads)
if free_task_slots <= 0:
return
session = self.app_state.user_db_session()
query = session.query(Task).with_polymorphic(self.task_classes)
query = query.filter(Task.is_active == True).filter(Task.is_running == False).filter(Task.is_paused == False)
query = query.order_by(Task.time_created)
query = query.limit(free_task_slots)
for task in query:
log.debug('starting %s', task)
self.start_task_process(task)
task.is_running = True
session.commit()
session.close()
def start_task_process(self, task):
log.debug('starting new process for %s', task)
process_class = self.task_process_mapping[task.type]
p = process_class(self.app_state, task)
self.background_threads[task.id] = p
p.start()
def check_running_tasks(self):
log.debug('checking tasks')
session = self.app_state.user_db_session()
for task_id, t in self.background_threads.items():
if not t.is_alive():
log.debug('process %s terminated', t)
del self.background_threads[task_id]
task = session.query(Task).with_polymorphic('*').get(task_id)
task.is_running = False
session.commit()
for task_id, t in self.background_threads.items():
task = session.query(Task).with_polymorphic('*').get(task_id)
if task.is_paused:
log.debug('task %s paused', t)
t.terminate()
def stop_running_tasks(self):
log.debug('stopping task')
for t in self.background_threads.itervalues():
log.debug('stopping task %s', t)
t.terminate()
join_threads(self.background_threads.values(), max_wait_time=5)
self.background_threads.clear()
class ProcessBase(threading.Thread):
def __init__(self, app_state, task):
threading.Thread.__init__(self)
# store only task id, we don't want to keep task object
# around in other thread
self.task_id = task.id
self.app_state = app_state
@contextmanager
def task(self):
"""
Contextmanager for task object. Changes on object will
be saved when no exception is raised.
"""
session = self.app_state.user_db_session()
query = session.query(Task).with_polymorphic('*')
task = query.filter(Task.id == self.task_id).one()
try:
yield task
except Exception:
session.rollback()
raise
else:
session.commit()
def task_done(self):
"""
Mark task as done.
"""
with self.task() as task:
task.refresh_time_updated()
task.is_running = False
task.is_active = False
task.progress = 1.0
log.debug('Task %d done' % self.task_id)
def task_failed(self, e):
"""
Mark task as failed
"""
with self.task() as task:
task.is_running = False
task.is_active = True
task.is_paused = True
task.error = str(e)
log.error('Task %d failed' % self.task_id)
log.exception(e)
def update_task_status(self):
with self.task() as task:
task.refresh_time_updated()
def process(self):
raise NotImplementedError()
def run(self):
self.process()
def terminate(self):
pass | app/geobox/process/base.py |
import threading
from contextlib import contextmanager
from geobox.model.tasks import Task
from geobox.utils import join_threads
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
class ProcessThread(threading.Thread):
def __init__(self, app_state, task_class_mapping, task_process_mapping):
threading.Thread.__init__(self)
self.daemon = True
self.app_state = app_state
self.background_threads = {}
self.task_process_mapping = task_process_mapping
self.task_classes = task_class_mapping.values()
self.concurrency = 2
def run(self):
self.cleanup_old_tasks()
while not self.app_state.wait_for_app_shutdown(timeout=2):
self.check_running_tasks()
self.check_new_tasks()
self.stop_running_tasks()
def shutdown(self):
pass
def cleanup_old_tasks(self):
session = self.app_state.user_db_session()
query = session.query(Task).with_polymorphic(self.task_classes)
query = query.filter(Task.is_running == True)
for task in query:
task.is_running = False
session.commit()
def check_new_tasks(self):
free_task_slots = self.concurrency - len(self.background_threads)
if free_task_slots <= 0:
return
session = self.app_state.user_db_session()
query = session.query(Task).with_polymorphic(self.task_classes)
query = query.filter(Task.is_active == True).filter(Task.is_running == False).filter(Task.is_paused == False)
query = query.order_by(Task.time_created)
query = query.limit(free_task_slots)
for task in query:
log.debug('starting %s', task)
self.start_task_process(task)
task.is_running = True
session.commit()
session.close()
def start_task_process(self, task):
log.debug('starting new process for %s', task)
process_class = self.task_process_mapping[task.type]
p = process_class(self.app_state, task)
self.background_threads[task.id] = p
p.start()
def check_running_tasks(self):
log.debug('checking tasks')
session = self.app_state.user_db_session()
for task_id, t in self.background_threads.items():
if not t.is_alive():
log.debug('process %s terminated', t)
del self.background_threads[task_id]
task = session.query(Task).with_polymorphic('*').get(task_id)
task.is_running = False
session.commit()
for task_id, t in self.background_threads.items():
task = session.query(Task).with_polymorphic('*').get(task_id)
if task.is_paused:
log.debug('task %s paused', t)
t.terminate()
def stop_running_tasks(self):
log.debug('stopping task')
for t in self.background_threads.itervalues():
log.debug('stopping task %s', t)
t.terminate()
join_threads(self.background_threads.values(), max_wait_time=5)
self.background_threads.clear()
class ProcessBase(threading.Thread):
def __init__(self, app_state, task):
threading.Thread.__init__(self)
# store only task id, we don't want to keep task object
# around in other thread
self.task_id = task.id
self.app_state = app_state
@contextmanager
def task(self):
"""
Contextmanager for task object. Changes on object will
be saved when no exception is raised.
"""
session = self.app_state.user_db_session()
query = session.query(Task).with_polymorphic('*')
task = query.filter(Task.id == self.task_id).one()
try:
yield task
except Exception:
session.rollback()
raise
else:
session.commit()
def task_done(self):
"""
Mark task as done.
"""
with self.task() as task:
task.refresh_time_updated()
task.is_running = False
task.is_active = False
task.progress = 1.0
log.debug('Task %d done' % self.task_id)
def task_failed(self, e):
"""
Mark task as failed
"""
with self.task() as task:
task.is_running = False
task.is_active = True
task.is_paused = True
task.error = str(e)
log.error('Task %d failed' % self.task_id)
log.exception(e)
def update_task_status(self):
with self.task() as task:
task.refresh_time_updated()
def process(self):
raise NotImplementedError()
def run(self):
self.process()
def terminate(self):
pass | 0.375592 | 0.146423 |
import signature_dispatch as sd, pytest
from typing import List, Callable
@pytest.fixture(autouse=True, params=[False, True])
def currentframe(request, monkeypatch):
# Not all python implementations support `inspect.currentframe()`, so run
# every test with and without it.
if request.param:
import inspect
monkeypatch.setattr(inspect, 'currentframe', lambda: None)
def test_positional_or_keyword():
@sd
def f(a):
return a
@sd
def f(a, b):
return a, b
assert f(1) == 1
assert f(a=1) == 1
assert f(1, 2) == (1, 2)
assert f(1, b=2) == (1, 2)
assert f(a=1, b=2) == (1, 2)
with pytest.raises(TypeError):
f()
with pytest.raises(TypeError):
f(1, 2, 3)
def test_var_positional():
@sd
def f(*a):
return a
@sd
def f(*a, b):
return a, b
assert f() == ()
assert f(1) == (1,)
assert f(1, 2) == (1, 2)
assert f(b=1) == ((), 1)
assert f(1, b=2) == ((1,), 2)
assert f(1, 2, b=3) == ((1, 2), 3)
with pytest.raises(TypeError):
f(c=1)
def test_keyword_only():
@sd
def f(*, a):
return a
@sd
def f(*, a, b):
return a, b
assert f(a=1) == 1
assert f(a=1, b=2) == (1, 2)
with pytest.raises(TypeError):
f()
with pytest.raises(TypeError):
f(1)
with pytest.raises(TypeError):
f(b=1)
def test_var_keyword():
@sd
def f(**kwargs):
return kwargs
@sd
def f(a, **kwargs):
return a, kwargs
assert f() == {}
assert f(a=1) == {'a': 1}
assert f(b=1) == {'b': 1}
assert f(a=1, b=2) == {'a': 1, 'b': 2}
assert f(1) == (1, {})
assert f(1, b=2) == (1, {'b': 2})
assert f(1, c=2) == (1, {'c': 2})
assert f(1, b=2, c=3) == (1, {'b': 2, 'c': 3})
with pytest.raises(TypeError):
f(1, 2)
with pytest.raises(TypeError):
f(1, a=2) # `a` specified twice
def test_annotation():
@sd
def f(a: int):
return 'int', a
@sd
def f(a: str):
return 'str', a
@sd
def f(a: List[int]):
return 'List[int]', a
@sd
def f(a: Callable):
return 'Callable', a
assert f(1) == ('int', 1)
assert f('a') == ('str', 'a')
assert f([]) == ('List[int]', [])
assert f([1]) == ('List[int]', [1])
assert f(max) == ('Callable', max)
with pytest.raises(TypeError):
f()
with pytest.raises(TypeError):
f({})
with pytest.raises(TypeError):
f(['a'])
def test_annotation_default():
@sd
def f(a: int=0):
return 'int', a
@sd
def f(a: str):
return 'str', a
assert f() == ('int', 0)
assert f(1) == ('int', 1)
assert f('a') == ('str', 'a')
def test_annotation_var_positional():
@sd
def f(*a: int):
return 'int', a
@sd
def f(*a: str):
return 'str', a
assert f() == ('int', ())
assert f(1) == ('int', (1,))
assert f(1, 2) == ('int', (1, 2))
assert f('a') == ('str', ('a',))
assert f('a', 'b') == ('str', ('a', 'b'))
def test_annotation_var_keyword():
@sd
def f(**a: int):
return 'int', a
@sd
def f(**a: str):
return 'str', a
assert f() == ('int', {})
assert f(a=1) == ('int', {'a': 1})
assert f(a=1, b=2) == ('int', {'a': 1, 'b': 2})
assert f(a='a') == ('str', {'a': 'a'})
assert f(a='a', b='b') == ('str', {'a': 'a', 'b': 'b'})
def test_method():
class C:
@sd
def m(self, a):
return a
@sd
def m(self, a, b):
return a, b
obj = C()
assert obj.m(1) == 1
assert obj.m(1, 2) == (1, 2)
with pytest.raises(TypeError):
obj.m()
with pytest.raises(TypeError):
obj.m(1, 2, 3)
def test_classmethod():
class C:
@sd
def m(cls, a):
return cls, a
@sd
def m(cls, a, b):
return cls, a, b
m = classmethod(m)
obj = C()
assert obj.m(1) == (C, 1)
assert obj.m(1, 2) == (C, 1, 2)
with pytest.raises(TypeError):
obj.m()
with pytest.raises(TypeError):
obj.m(1, 2, 3)
@pytest.mark.parametrize(
'deco_a,deco_b,expected', [
(sd, sd, 'a'),
(sd(priority=1), sd, 'a'),
(sd, sd(priority=1), 'b'),
(sd(priority=1), sd(priority=1), 'a'),
(sd(priority=-1), sd, 'b'),
(sd, sd(priority=-1), 'a'),
(sd(priority=-1), sd(priority=-1), 'a'),
(sd(priority=1), sd(priority=-1), 'a'),
(sd(priority=-1), sd(priority=1), 'b'),
],
)
def test_priority(deco_a, deco_b, expected):
@deco_a
def f():
return 'a'
@deco_b
def f():
return 'b'
assert f() == expected
def test_overload():
@sd
def f(a):
return a
@f.overload
def _(a, b):
return a, b
assert f(1) == 1
assert f(1, 2) == (1, 2)
with pytest.raises(TypeError):
f()
with pytest.raises(TypeError):
f(1, 2, 3)
@pytest.mark.parametrize(
'priority, expected', [
(-1, 'a'),
(0, 'a'),
(1, 'b'),
],
)
def test_overload_priority(priority, expected):
@sd
def f():
return 'a'
@f.overload(priority=priority)
def _():
return 'b'
assert f() == expected
def test_docstring():
@sd
def f(a):
"a"
return a
@sd
def f(a, b):
"a, b"
return a, b
assert f.__doc__ == "a"
def test_error_message():
@sd
def f(a):
return a
@sd
def f(a, b):
return a, b
with pytest.raises(TypeError) as err:
f()
assert err.match(r"(?m)can't dispatch the given arguments to any of the candidate functions:")
assert err.match(r"(?m)arguments: $")
assert err.match(r"(?m)candidates:$")
assert err.match(r"(?m)\(a\): missing a required argument: 'a'$")
assert err.match(r"(?m)\(a, b\): missing a required argument: 'a'$")
with pytest.raises(TypeError) as err:
f(1, 2, 3)
assert err.match(r"(?m)can't dispatch the given arguments to any of the candidate functions:")
assert err.match(r"(?m)arguments: 1, 2, 3$")
assert err.match(r"(?m)candidates:$")
assert err.match(r"(?m)\(a\): too many positional arguments$")
assert err.match(r"(?m)\(a, b\): too many positional arguments$")
def test_error_message_annotation():
@sd
def f(a: int):
return a
@sd
def f(a: List[int]):
return a
with pytest.raises(TypeError) as err:
f('a')
assert err.match(r"(?m)can't dispatch the given arguments to any of the candidate functions:")
assert err.match(r"(?m)arguments: 'a'$")
assert err.match(r"(?m)candidates:$")
assert err.match(r"(?m)\(a: ?int\): type of a must be int; got str instead$")
assert err.match(r"(?m)\(a: ?List\[int\]\): type of a must be a list; got str instead$")
with pytest.raises(TypeError) as err:
f(['a'])
assert err.match(r"(?m)can't dispatch the given arguments to any of the candidate functions:")
assert err.match(r"(?m)arguments: \['a'\]$")
assert err.match(r"(?m)candidates:$")
assert err.match(r"(?m)\(a: ?int\): type of a must be int; got list instead$")
assert err.match(r"(?m)\(a: ?List\[int\]\): type of a\[0\] must be int; got str instead$")
def test_function_raises_type_error():
@sd
def f(a):
raise TypeError("my error")
@sd
def f(a):
return a
with pytest.raises(TypeError, match="my error"):
f(1)
def test_ignore_local_variables_with_same_name():
f = None
@sd
def f(a):
return a
@sd
def f(a, b):
return a, b
assert f(1) == 1
assert f(1, 2) == (1, 2)
with pytest.raises(TypeError):
f()
with pytest.raises(TypeError):
f(1, 2, 3) | tests/test_dispatch.py |
import signature_dispatch as sd, pytest
from typing import List, Callable
@pytest.fixture(autouse=True, params=[False, True])
def currentframe(request, monkeypatch):
# Not all python implementations support `inspect.currentframe()`, so run
# every test with and without it.
if request.param:
import inspect
monkeypatch.setattr(inspect, 'currentframe', lambda: None)
def test_positional_or_keyword():
@sd
def f(a):
return a
@sd
def f(a, b):
return a, b
assert f(1) == 1
assert f(a=1) == 1
assert f(1, 2) == (1, 2)
assert f(1, b=2) == (1, 2)
assert f(a=1, b=2) == (1, 2)
with pytest.raises(TypeError):
f()
with pytest.raises(TypeError):
f(1, 2, 3)
def test_var_positional():
@sd
def f(*a):
return a
@sd
def f(*a, b):
return a, b
assert f() == ()
assert f(1) == (1,)
assert f(1, 2) == (1, 2)
assert f(b=1) == ((), 1)
assert f(1, b=2) == ((1,), 2)
assert f(1, 2, b=3) == ((1, 2), 3)
with pytest.raises(TypeError):
f(c=1)
def test_keyword_only():
@sd
def f(*, a):
return a
@sd
def f(*, a, b):
return a, b
assert f(a=1) == 1
assert f(a=1, b=2) == (1, 2)
with pytest.raises(TypeError):
f()
with pytest.raises(TypeError):
f(1)
with pytest.raises(TypeError):
f(b=1)
def test_var_keyword():
@sd
def f(**kwargs):
return kwargs
@sd
def f(a, **kwargs):
return a, kwargs
assert f() == {}
assert f(a=1) == {'a': 1}
assert f(b=1) == {'b': 1}
assert f(a=1, b=2) == {'a': 1, 'b': 2}
assert f(1) == (1, {})
assert f(1, b=2) == (1, {'b': 2})
assert f(1, c=2) == (1, {'c': 2})
assert f(1, b=2, c=3) == (1, {'b': 2, 'c': 3})
with pytest.raises(TypeError):
f(1, 2)
with pytest.raises(TypeError):
f(1, a=2) # `a` specified twice
def test_annotation():
@sd
def f(a: int):
return 'int', a
@sd
def f(a: str):
return 'str', a
@sd
def f(a: List[int]):
return 'List[int]', a
@sd
def f(a: Callable):
return 'Callable', a
assert f(1) == ('int', 1)
assert f('a') == ('str', 'a')
assert f([]) == ('List[int]', [])
assert f([1]) == ('List[int]', [1])
assert f(max) == ('Callable', max)
with pytest.raises(TypeError):
f()
with pytest.raises(TypeError):
f({})
with pytest.raises(TypeError):
f(['a'])
def test_annotation_default():
@sd
def f(a: int=0):
return 'int', a
@sd
def f(a: str):
return 'str', a
assert f() == ('int', 0)
assert f(1) == ('int', 1)
assert f('a') == ('str', 'a')
def test_annotation_var_positional():
@sd
def f(*a: int):
return 'int', a
@sd
def f(*a: str):
return 'str', a
assert f() == ('int', ())
assert f(1) == ('int', (1,))
assert f(1, 2) == ('int', (1, 2))
assert f('a') == ('str', ('a',))
assert f('a', 'b') == ('str', ('a', 'b'))
def test_annotation_var_keyword():
@sd
def f(**a: int):
return 'int', a
@sd
def f(**a: str):
return 'str', a
assert f() == ('int', {})
assert f(a=1) == ('int', {'a': 1})
assert f(a=1, b=2) == ('int', {'a': 1, 'b': 2})
assert f(a='a') == ('str', {'a': 'a'})
assert f(a='a', b='b') == ('str', {'a': 'a', 'b': 'b'})
def test_method():
class C:
@sd
def m(self, a):
return a
@sd
def m(self, a, b):
return a, b
obj = C()
assert obj.m(1) == 1
assert obj.m(1, 2) == (1, 2)
with pytest.raises(TypeError):
obj.m()
with pytest.raises(TypeError):
obj.m(1, 2, 3)
def test_classmethod():
class C:
@sd
def m(cls, a):
return cls, a
@sd
def m(cls, a, b):
return cls, a, b
m = classmethod(m)
obj = C()
assert obj.m(1) == (C, 1)
assert obj.m(1, 2) == (C, 1, 2)
with pytest.raises(TypeError):
obj.m()
with pytest.raises(TypeError):
obj.m(1, 2, 3)
@pytest.mark.parametrize(
'deco_a,deco_b,expected', [
(sd, sd, 'a'),
(sd(priority=1), sd, 'a'),
(sd, sd(priority=1), 'b'),
(sd(priority=1), sd(priority=1), 'a'),
(sd(priority=-1), sd, 'b'),
(sd, sd(priority=-1), 'a'),
(sd(priority=-1), sd(priority=-1), 'a'),
(sd(priority=1), sd(priority=-1), 'a'),
(sd(priority=-1), sd(priority=1), 'b'),
],
)
def test_priority(deco_a, deco_b, expected):
@deco_a
def f():
return 'a'
@deco_b
def f():
return 'b'
assert f() == expected
def test_overload():
@sd
def f(a):
return a
@f.overload
def _(a, b):
return a, b
assert f(1) == 1
assert f(1, 2) == (1, 2)
with pytest.raises(TypeError):
f()
with pytest.raises(TypeError):
f(1, 2, 3)
@pytest.mark.parametrize(
'priority, expected', [
(-1, 'a'),
(0, 'a'),
(1, 'b'),
],
)
def test_overload_priority(priority, expected):
@sd
def f():
return 'a'
@f.overload(priority=priority)
def _():
return 'b'
assert f() == expected
def test_docstring():
@sd
def f(a):
"a"
return a
@sd
def f(a, b):
"a, b"
return a, b
assert f.__doc__ == "a"
def test_error_message():
@sd
def f(a):
return a
@sd
def f(a, b):
return a, b
with pytest.raises(TypeError) as err:
f()
assert err.match(r"(?m)can't dispatch the given arguments to any of the candidate functions:")
assert err.match(r"(?m)arguments: $")
assert err.match(r"(?m)candidates:$")
assert err.match(r"(?m)\(a\): missing a required argument: 'a'$")
assert err.match(r"(?m)\(a, b\): missing a required argument: 'a'$")
with pytest.raises(TypeError) as err:
f(1, 2, 3)
assert err.match(r"(?m)can't dispatch the given arguments to any of the candidate functions:")
assert err.match(r"(?m)arguments: 1, 2, 3$")
assert err.match(r"(?m)candidates:$")
assert err.match(r"(?m)\(a\): too many positional arguments$")
assert err.match(r"(?m)\(a, b\): too many positional arguments$")
def test_error_message_annotation():
@sd
def f(a: int):
return a
@sd
def f(a: List[int]):
return a
with pytest.raises(TypeError) as err:
f('a')
assert err.match(r"(?m)can't dispatch the given arguments to any of the candidate functions:")
assert err.match(r"(?m)arguments: 'a'$")
assert err.match(r"(?m)candidates:$")
assert err.match(r"(?m)\(a: ?int\): type of a must be int; got str instead$")
assert err.match(r"(?m)\(a: ?List\[int\]\): type of a must be a list; got str instead$")
with pytest.raises(TypeError) as err:
f(['a'])
assert err.match(r"(?m)can't dispatch the given arguments to any of the candidate functions:")
assert err.match(r"(?m)arguments: \['a'\]$")
assert err.match(r"(?m)candidates:$")
assert err.match(r"(?m)\(a: ?int\): type of a must be int; got list instead$")
assert err.match(r"(?m)\(a: ?List\[int\]\): type of a\[0\] must be int; got str instead$")
def test_function_raises_type_error():
@sd
def f(a):
raise TypeError("my error")
@sd
def f(a):
return a
with pytest.raises(TypeError, match="my error"):
f(1)
def test_ignore_local_variables_with_same_name():
f = None
@sd
def f(a):
return a
@sd
def f(a, b):
return a, b
assert f(1) == 1
assert f(1, 2) == (1, 2)
with pytest.raises(TypeError):
f()
with pytest.raises(TypeError):
f(1, 2, 3) | 0.891899 | 0.734572 |
from BTrees.OOBTree import OOBTree # pylint: disable=import-error
from persistent.list import PersistentList
from pyramid.threadlocal import get_current_registry
from zope.container.interfaces import IContained, IContainer
from zope.container.ordered import OrderedContainer
from zope.lifecycleevent.interfaces import IObjectMovedEvent
from zope.location.interfaces import ISublocations
from pyams_utils.adapter import ContextAdapter, adapter_config
__docformat__ = 'restructuredtext'
class SimpleContainerMixin:
"""Simple container mixin class"""
next_id = 1
def append(self, obj):
"""Append object to container"""
key = str(self.next_id)
self[key] = obj
self.next_id += 1
return obj.__name__
class BTreeOrderedContainer(OrderedContainer):
"""BTree based ordered container
This container maintain a manual order of it's contents
"""
def __init__(self):
# pylint: disable=super-init-not-called
self._data = OOBTree()
self._order = PersistentList()
class ParentSelector:
"""Interface based parent selector
This selector can be used as a subscriber predicate on IObjectAddedEvent to define
an interface that the new parent must support for the event to be applied:
.. code-block:: python
from pyams_utils.interfaces.site import ISiteRoot
@subscriber(IObjectAddedEvent, parent_selector=ISiteRoot)
def siteroot_object_added_event_handler(event):
'''This is an event handler for an ISiteRoot object added event'''
"""
def __init__(self, ifaces, config):
# pylint: disable=unused-argument
if not isinstance(ifaces, (list, tuple, set)):
ifaces = (ifaces,)
self.interfaces = ifaces
def text(self):
"""Predicate string output"""
return 'parent_selector = %s' % str(self.interfaces)
phash = text
def __call__(self, event):
if not IObjectMovedEvent.providedBy(event): # pylint: disable=no-value-for-parameter
return False
for intf in self.interfaces:
try:
if intf.providedBy(event.newParent):
return True
except (AttributeError, TypeError):
if isinstance(event.newParent, intf):
return True
return False
@adapter_config(required=IContained, provides=ISublocations)
class ContainerSublocationsAdapter(ContextAdapter):
"""Contained object sub-locations adapter
This adapter checks for custom ISublocations interface adapters which can
be defined by any component to get access to inner locations, defined for
example via annotations.
"""
def sublocations(self):
"""See `zope.location.interfaces.ISublocations` interface"""
context = self.context
# Check for adapted sub-locations first...
registry = get_current_registry()
for name, adapter in registry.getAdapters((context,), ISublocations):
if not name: # don't reuse default adapter!!
continue
yield from adapter.sublocations()
# then yield container items
if IContainer.providedBy(context):
yield from context.values()
def find_objects_matching(root, condition, ignore_root=False):
"""Find all objects in root that match the condition
The condition is a Python callable object that takes an object as
argument and must return a boolean result.
All sub-objects of the root will also be searched recursively.
:param object root: the parent object from which search is started
:param callable condition: a callable object which may return true for a given
object to be selected
:param boolean ignore_root: if *True*, the root object will not be returned, even if it matches
the given condition
:return: an iterator for all root's sub-objects matching condition
"""
if (not ignore_root) and condition(root):
yield root
locations = ISublocations(root, None)
if locations is not None:
for location in locations.sublocations(): # pylint: disable=too-many-function-args
if condition(location):
yield location
yield from find_objects_matching(location, condition, ignore_root=True)
def find_objects_providing(root, interface, ignore_root=False):
"""Find all objects in root that provide the specified interface
All sub-objects of the root will also be searched recursively.
:param object root: object; the parent object from which search is started
:param Interface interface: interface; an interface that sub-objects should provide
:param boolean ignore_root: if *True*, the root object will not be returned, even if it
provides the given interface
:return: an iterator for all root's sub-objects that provide the given interface
"""
yield from find_objects_matching(root, interface.providedBy, ignore_root) | src/pyams_utils/container.py | from BTrees.OOBTree import OOBTree # pylint: disable=import-error
from persistent.list import PersistentList
from pyramid.threadlocal import get_current_registry
from zope.container.interfaces import IContained, IContainer
from zope.container.ordered import OrderedContainer
from zope.lifecycleevent.interfaces import IObjectMovedEvent
from zope.location.interfaces import ISublocations
from pyams_utils.adapter import ContextAdapter, adapter_config
__docformat__ = 'restructuredtext'
class SimpleContainerMixin:
"""Simple container mixin class"""
next_id = 1
def append(self, obj):
"""Append object to container"""
key = str(self.next_id)
self[key] = obj
self.next_id += 1
return obj.__name__
class BTreeOrderedContainer(OrderedContainer):
"""BTree based ordered container
This container maintain a manual order of it's contents
"""
def __init__(self):
# pylint: disable=super-init-not-called
self._data = OOBTree()
self._order = PersistentList()
class ParentSelector:
"""Interface based parent selector
This selector can be used as a subscriber predicate on IObjectAddedEvent to define
an interface that the new parent must support for the event to be applied:
.. code-block:: python
from pyams_utils.interfaces.site import ISiteRoot
@subscriber(IObjectAddedEvent, parent_selector=ISiteRoot)
def siteroot_object_added_event_handler(event):
'''This is an event handler for an ISiteRoot object added event'''
"""
def __init__(self, ifaces, config):
# pylint: disable=unused-argument
if not isinstance(ifaces, (list, tuple, set)):
ifaces = (ifaces,)
self.interfaces = ifaces
def text(self):
"""Predicate string output"""
return 'parent_selector = %s' % str(self.interfaces)
phash = text
def __call__(self, event):
if not IObjectMovedEvent.providedBy(event): # pylint: disable=no-value-for-parameter
return False
for intf in self.interfaces:
try:
if intf.providedBy(event.newParent):
return True
except (AttributeError, TypeError):
if isinstance(event.newParent, intf):
return True
return False
@adapter_config(required=IContained, provides=ISublocations)
class ContainerSublocationsAdapter(ContextAdapter):
"""Contained object sub-locations adapter
This adapter checks for custom ISublocations interface adapters which can
be defined by any component to get access to inner locations, defined for
example via annotations.
"""
def sublocations(self):
"""See `zope.location.interfaces.ISublocations` interface"""
context = self.context
# Check for adapted sub-locations first...
registry = get_current_registry()
for name, adapter in registry.getAdapters((context,), ISublocations):
if not name: # don't reuse default adapter!!
continue
yield from adapter.sublocations()
# then yield container items
if IContainer.providedBy(context):
yield from context.values()
def find_objects_matching(root, condition, ignore_root=False):
"""Find all objects in root that match the condition
The condition is a Python callable object that takes an object as
argument and must return a boolean result.
All sub-objects of the root will also be searched recursively.
:param object root: the parent object from which search is started
:param callable condition: a callable object which may return true for a given
object to be selected
:param boolean ignore_root: if *True*, the root object will not be returned, even if it matches
the given condition
:return: an iterator for all root's sub-objects matching condition
"""
if (not ignore_root) and condition(root):
yield root
locations = ISublocations(root, None)
if locations is not None:
for location in locations.sublocations(): # pylint: disable=too-many-function-args
if condition(location):
yield location
yield from find_objects_matching(location, condition, ignore_root=True)
def find_objects_providing(root, interface, ignore_root=False):
"""Find all objects in root that provide the specified interface
All sub-objects of the root will also be searched recursively.
:param object root: object; the parent object from which search is started
:param Interface interface: interface; an interface that sub-objects should provide
:param boolean ignore_root: if *True*, the root object will not be returned, even if it
provides the given interface
:return: an iterator for all root's sub-objects that provide the given interface
"""
yield from find_objects_matching(root, interface.providedBy, ignore_root) | 0.768125 | 0.187728 |
import probe_config as conf
import socket
import re
import os
import tempfile
import shutil
class Swift:
def __init__(self, myname, is_storage):
self.myname = myname
print "Myname = " + self.myname
self.allnodes = conf.swift_nodes
print "all nodes=" + str(self.allnodes)
self.all_ips = [socket.gethostbyname(x) for x in self.allnodes]
self.my_ip = socket.gethostbyname(self.myname)
self.base_dir = '/srv/node/%s1' % conf.data_disk
self.is_storage = is_storage
def _grep(self, needle, filename):
with open(filename, "r") as infile:
for line in infile:
if re.search(needle, line):
return True
return False
def _append_to_file(self, line, filename):
with open(filename, "a") as outfile:
outfile.write(line)
def _initialize_container(self):
print "Initializing container"
os.system('swift -A http://localhost:8080/auth/v1.0 -U simba:simba -K simba123 post simbastore')
def _replace_in_file(self, before, after, filename):
with open(filename, "r") as infile:
lines = infile.readlines()
fh, path = tempfile.mkstemp()
with open(path, 'w') as outfile:
for line in lines:
line = re.sub(before, after, line)
outfile.write(line)
os.close(fh)
os.rename(path, filename)
def _build_ring(self, ring_type, port):
b = "%s.builder" % ring_type
dev = "%s1" % conf.data_disk
os.system("swift-ring-builder %s create %d 3 1" % (b, conf.swift_num_partitions))
znum=1
for node in self.all_ips:
os.system("swift-ring-builder %s add z%d-%s:%d/%s 100" % (b, znum, node, port, dev))
znum += 1
os.system("swift-ring-builder %s" % b)
os.system("swift-ring-builder %s rebalance" % b)
def _build_rings(self):
print 'self.all_ips[0]==', self.all_ips[0]
print 'self.my_ip==', self.my_ip
if self.my_ip == self.all_ips[0]:
self._build_ring('account', 6002)
self._build_ring('container', 6001)
self._build_ring('object', 6000)
shutil.copy2('account.ring.gz', '/etc/swift')
shutil.copy2('container.ring.gz', '/etc/swift')
shutil.copy2('object.ring.gz', '/etc/swift')
os.system('chown -R swift:swift /etc/swift')
def _configure_limits(self):
s = """
* soft nofile 999999
* hard nofile 999999
"""
with open('/etc/security/limits.conf','a') as outfile:
outfile.write(s)
os.system('sysctl -p')
def _configure_sysctl(self):
s = """
# disable TIME_WAIT.. wait..
net.ipv4.tcp_tw_recycle=1
net.ipv4.tcp_tw_reuse=1
# disable syn cookies
net.ipv4.tcp_syncookies = 0
# double amount of allowed conntrack
net.ipv4.netfilter.ip_conntrack_max = 262144
net.core.rmem_max = 8388608
net.core.wmem_max = 8388608
net.core.rmem_default = 65536
net.core.wmem_default = 65536
net.ipv4.tcp_rmem = 4096 87380 8388608
net.ipv4.tcp_wmem = 4096 65536 8388608
net.ipv4.tcp_mem = 8388608 8388608 8388608
net.ipv4.ip_local_port_range = 15000 61000
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.core.somaxconn = 32768
net.ipv4.tcp_max_syn_backlog = 10240
net.core.netdev_max_backlog = 10240
fs.file-max = 999999
"""
with open('/etc/sysctl.conf','w') as outfile:
outfile.write(s)
os.system('sysctl -p')
def _update_users(self):
if not self._grep('swift', '/etc/passwd'):
self._append_to_file('swift:x:109:120::/home/swift:/bin/false', '/etc/passwd')
if not self._grep('swift', '/etc/group'):
self._append_to_file('swift:x:120:', '/etc/group')
os.system('mkdir -p /home/swift')
os.system('chown swift:swift /home/swift')
def _configure_rsync(self):
s="""
uid = swift
gid = swift
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
address = %s
[account]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/account.lock
[container]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/container.lock
[object]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/object.lock
""" % self.my_ip
with open('/etc/rsyncd.conf', 'w') as outfile:
outfile.write(s)
self._replace_in_file('RSYNC_ENABLE=false', 'RSYNC_ENABLE=true', '/etc/default/rsync')
def _configure_account_server(self):
if not os.path.exists('/etc/swift'):
os.makedirs('/etc/swift')
s= """\
[DEFAULT]
bind_ip = %s
workers = 2
devices=/srv/node
[pipeline:main]
pipeline = account-server
[app:account-server]
use = egg:swift#account
[account-replicator]
concurrency = 4
[account-auditor]
[account-reaper]
concurrency = 4\
""" % self.my_ip
with open('/etc/swift/account-server.conf', 'w') as outfile:
outfile.write(s)
def _configure_container_server(self):
if not os.path.exists('/etc/swift'):
os.makedirs('/etc/swift')
s="""\
[DEFAULT]
bind_ip = %s
workers = 2
devices=/srv/node
[pipeline:main]
pipeline = container-server
[app:container-server]
use = egg:swift#container
[container-replicator]
concurrency = 4
[container-updater]
concurrency = 2
[container-auditor]
[container-sync]\
""" % self.my_ip
with open('/etc/swift/container-server.conf', 'w') as outfile:
outfile.write(s)
def _configure_object_server(self):
if not os.path.exists('/etc/swift'):
os.makedirs('/etc/swift')
s="""\
[DEFAULT]
bind_ip = %s
workers = 4
devices=/srv/node
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:swift#object
network_chunk_size=65536
disk_chunk_size=65536
threads_per_disk=4
replication_concurrency=1
[object-replicator]
concurrency = 1
[object-updater]
concurrency = 1
[object-auditor]
files_per_second = 1
bytes_per_second = 65536
""" % self.my_ip
with open('/etc/swift/object-server.conf', 'w') as outfile:
outfile.write(s)
def _configure_hash(self):
s="""\
[swift-hash]
# random unique strings that can never change (DO NOT LOSE)
swift_hash_path_prefix = 256b3282f8acc0ee0dad2565d1ab670a
swift_hash_path_suffix = 13409460ac1879aff0b161c750fa7db1
"""
with open('/etc/swift/swift.conf', 'w') as outfile:
outfile.write(s)
def _configure_proxy_server(self):
s="""\
[DEFAULT]
bind_port = 8080
workers = 8
user = swift
[pipeline:main]
pipeline = healthcheck cache tempauth proxy-server
[app:proxy-server]
use = egg:swift#proxy
allow_account_management = true
account_autocreate = true
[filter:tempauth]
use = egg:swift#tempauth
user_system_root = testpass .admin https://%s:8080/v1/AUTH_system
user_simba_simba = simba123 .admin http://%s:8080/v1/AUTH_system
token_life = 604800
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:cache]
use = egg:swift#memcache
""" % (self.my_ip, self.my_ip)
all_proxy_nodes = [socket.gethostbyname(x) for x in conf.proxy_nodes]
m = "memcache_servers = %s:11211," % all_proxy_nodes[0]
for p in all_proxy_nodes[1:]:
m += "%s:11211," % p
m += '\n'
with open('/etc/swift/proxy-server.conf', 'w') as outfile:
outfile.write(s)
outfile.write(m)
def _configure_as_storage_node(self):
self._update_users()
os.system("./partition.sh %s %s" % (conf.data_disk, self.base_dir))
os.system("chown swift:swift %s" % self.base_dir)
self._configure_rsync()
self._configure_account_server()
self._configure_container_server()
self._configure_object_server()
self._configure_hash()
self._build_rings()
self._configure_sysctl()
self._configure_limits()
def _configure_as_proxy_node(self):
self._update_users()
# IF PROXY NODES = SWIFT NODES, LEAVE THIS COMMENTED OUT
#os.system("./partition.sh %s %s" % (conf.data_disk, self.base_dir))
#os.system("chgrp %s %s" % (conf.proj, self.base_dir))
#os.system("chmod g+w %s" % self.base_dir)
self._configure_proxy_server()
self._replace_in_file('^-l.*', '-l %s' % self.my_ip, '/etc/memcached.conf')
self._configure_hash()
self._build_rings()
self._configure_sysctl()
def _start_proxy_node(self):
os.system("service memcached stop")
os.system("service memcached start")
os.system('swift-init proxy start')
if self.myname == self.allnodes[-1]:
self._initialize_container()
def _start_storage_node(self):
os.system("service rsync restart")
os.system('swift-init all start')
def configure(self):
print 'Configure swift...'
if self.is_storage:
print 'Configure as Storage Node'
self._configure_as_storage_node()
else:
print 'Configure as Proxy Node'
self._configure_as_proxy_node()
def start(self):
if self.is_storage:
print 'Start Storage Node'
self._start_storage_node()
else:
print 'Start Proxy Node'
self._start_proxy_node()
def stop(self):
os.system('swift-init all stop')
if not self.is_storage:
os.system('service memcached stop') | server/scripts/probe/swift.py |
import probe_config as conf
import socket
import re
import os
import tempfile
import shutil
class Swift:
def __init__(self, myname, is_storage):
self.myname = myname
print "Myname = " + self.myname
self.allnodes = conf.swift_nodes
print "all nodes=" + str(self.allnodes)
self.all_ips = [socket.gethostbyname(x) for x in self.allnodes]
self.my_ip = socket.gethostbyname(self.myname)
self.base_dir = '/srv/node/%s1' % conf.data_disk
self.is_storage = is_storage
def _grep(self, needle, filename):
with open(filename, "r") as infile:
for line in infile:
if re.search(needle, line):
return True
return False
def _append_to_file(self, line, filename):
with open(filename, "a") as outfile:
outfile.write(line)
def _initialize_container(self):
print "Initializing container"
os.system('swift -A http://localhost:8080/auth/v1.0 -U simba:simba -K simba123 post simbastore')
def _replace_in_file(self, before, after, filename):
with open(filename, "r") as infile:
lines = infile.readlines()
fh, path = tempfile.mkstemp()
with open(path, 'w') as outfile:
for line in lines:
line = re.sub(before, after, line)
outfile.write(line)
os.close(fh)
os.rename(path, filename)
def _build_ring(self, ring_type, port):
b = "%s.builder" % ring_type
dev = "%s1" % conf.data_disk
os.system("swift-ring-builder %s create %d 3 1" % (b, conf.swift_num_partitions))
znum=1
for node in self.all_ips:
os.system("swift-ring-builder %s add z%d-%s:%d/%s 100" % (b, znum, node, port, dev))
znum += 1
os.system("swift-ring-builder %s" % b)
os.system("swift-ring-builder %s rebalance" % b)
def _build_rings(self):
print 'self.all_ips[0]==', self.all_ips[0]
print 'self.my_ip==', self.my_ip
if self.my_ip == self.all_ips[0]:
self._build_ring('account', 6002)
self._build_ring('container', 6001)
self._build_ring('object', 6000)
shutil.copy2('account.ring.gz', '/etc/swift')
shutil.copy2('container.ring.gz', '/etc/swift')
shutil.copy2('object.ring.gz', '/etc/swift')
os.system('chown -R swift:swift /etc/swift')
def _configure_limits(self):
s = """
* soft nofile 999999
* hard nofile 999999
"""
with open('/etc/security/limits.conf','a') as outfile:
outfile.write(s)
os.system('sysctl -p')
def _configure_sysctl(self):
s = """
# disable TIME_WAIT.. wait..
net.ipv4.tcp_tw_recycle=1
net.ipv4.tcp_tw_reuse=1
# disable syn cookies
net.ipv4.tcp_syncookies = 0
# double amount of allowed conntrack
net.ipv4.netfilter.ip_conntrack_max = 262144
net.core.rmem_max = 8388608
net.core.wmem_max = 8388608
net.core.rmem_default = 65536
net.core.wmem_default = 65536
net.ipv4.tcp_rmem = 4096 87380 8388608
net.ipv4.tcp_wmem = 4096 65536 8388608
net.ipv4.tcp_mem = 8388608 8388608 8388608
net.ipv4.ip_local_port_range = 15000 61000
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.core.somaxconn = 32768
net.ipv4.tcp_max_syn_backlog = 10240
net.core.netdev_max_backlog = 10240
fs.file-max = 999999
"""
with open('/etc/sysctl.conf','w') as outfile:
outfile.write(s)
os.system('sysctl -p')
def _update_users(self):
if not self._grep('swift', '/etc/passwd'):
self._append_to_file('swift:x:109:120::/home/swift:/bin/false', '/etc/passwd')
if not self._grep('swift', '/etc/group'):
self._append_to_file('swift:x:120:', '/etc/group')
os.system('mkdir -p /home/swift')
os.system('chown swift:swift /home/swift')
def _configure_rsync(self):
s="""
uid = swift
gid = swift
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
address = %s
[account]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/account.lock
[container]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/container.lock
[object]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/object.lock
""" % self.my_ip
with open('/etc/rsyncd.conf', 'w') as outfile:
outfile.write(s)
self._replace_in_file('RSYNC_ENABLE=false', 'RSYNC_ENABLE=true', '/etc/default/rsync')
def _configure_account_server(self):
if not os.path.exists('/etc/swift'):
os.makedirs('/etc/swift')
s= """\
[DEFAULT]
bind_ip = %s
workers = 2
devices=/srv/node
[pipeline:main]
pipeline = account-server
[app:account-server]
use = egg:swift#account
[account-replicator]
concurrency = 4
[account-auditor]
[account-reaper]
concurrency = 4\
""" % self.my_ip
with open('/etc/swift/account-server.conf', 'w') as outfile:
outfile.write(s)
def _configure_container_server(self):
if not os.path.exists('/etc/swift'):
os.makedirs('/etc/swift')
s="""\
[DEFAULT]
bind_ip = %s
workers = 2
devices=/srv/node
[pipeline:main]
pipeline = container-server
[app:container-server]
use = egg:swift#container
[container-replicator]
concurrency = 4
[container-updater]
concurrency = 2
[container-auditor]
[container-sync]\
""" % self.my_ip
with open('/etc/swift/container-server.conf', 'w') as outfile:
outfile.write(s)
def _configure_object_server(self):
if not os.path.exists('/etc/swift'):
os.makedirs('/etc/swift')
s="""\
[DEFAULT]
bind_ip = %s
workers = 4
devices=/srv/node
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:swift#object
network_chunk_size=65536
disk_chunk_size=65536
threads_per_disk=4
replication_concurrency=1
[object-replicator]
concurrency = 1
[object-updater]
concurrency = 1
[object-auditor]
files_per_second = 1
bytes_per_second = 65536
""" % self.my_ip
with open('/etc/swift/object-server.conf', 'w') as outfile:
outfile.write(s)
def _configure_hash(self):
s="""\
[swift-hash]
# random unique strings that can never change (DO NOT LOSE)
swift_hash_path_prefix = 256b3282f8acc0ee0dad2565d1ab670a
swift_hash_path_suffix = 13409460ac1879aff0b161c750fa7db1
"""
with open('/etc/swift/swift.conf', 'w') as outfile:
outfile.write(s)
def _configure_proxy_server(self):
s="""\
[DEFAULT]
bind_port = 8080
workers = 8
user = swift
[pipeline:main]
pipeline = healthcheck cache tempauth proxy-server
[app:proxy-server]
use = egg:swift#proxy
allow_account_management = true
account_autocreate = true
[filter:tempauth]
use = egg:swift#tempauth
user_system_root = testpass .admin https://%s:8080/v1/AUTH_system
user_simba_simba = simba123 .admin http://%s:8080/v1/AUTH_system
token_life = 604800
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:cache]
use = egg:swift#memcache
""" % (self.my_ip, self.my_ip)
all_proxy_nodes = [socket.gethostbyname(x) for x in conf.proxy_nodes]
m = "memcache_servers = %s:11211," % all_proxy_nodes[0]
for p in all_proxy_nodes[1:]:
m += "%s:11211," % p
m += '\n'
with open('/etc/swift/proxy-server.conf', 'w') as outfile:
outfile.write(s)
outfile.write(m)
def _configure_as_storage_node(self):
self._update_users()
os.system("./partition.sh %s %s" % (conf.data_disk, self.base_dir))
os.system("chown swift:swift %s" % self.base_dir)
self._configure_rsync()
self._configure_account_server()
self._configure_container_server()
self._configure_object_server()
self._configure_hash()
self._build_rings()
self._configure_sysctl()
self._configure_limits()
def _configure_as_proxy_node(self):
self._update_users()
# IF PROXY NODES = SWIFT NODES, LEAVE THIS COMMENTED OUT
#os.system("./partition.sh %s %s" % (conf.data_disk, self.base_dir))
#os.system("chgrp %s %s" % (conf.proj, self.base_dir))
#os.system("chmod g+w %s" % self.base_dir)
self._configure_proxy_server()
self._replace_in_file('^-l.*', '-l %s' % self.my_ip, '/etc/memcached.conf')
self._configure_hash()
self._build_rings()
self._configure_sysctl()
def _start_proxy_node(self):
os.system("service memcached stop")
os.system("service memcached start")
os.system('swift-init proxy start')
if self.myname == self.allnodes[-1]:
self._initialize_container()
def _start_storage_node(self):
os.system("service rsync restart")
os.system('swift-init all start')
def configure(self):
print 'Configure swift...'
if self.is_storage:
print 'Configure as Storage Node'
self._configure_as_storage_node()
else:
print 'Configure as Proxy Node'
self._configure_as_proxy_node()
def start(self):
if self.is_storage:
print 'Start Storage Node'
self._start_storage_node()
else:
print 'Start Proxy Node'
self._start_proxy_node()
def stop(self):
os.system('swift-init all stop')
if not self.is_storage:
os.system('service memcached stop') | 0.122143 | 0.069795 |
import subprocess
def run():
subprocess.call(["python", "incremental_learning.py",
"--train_data_path", "../data/slovenian/slo_train_binarized.tsv",
"--test_data_path", "../data/slovenian/slo_internal_test_binarized.tsv",
"--eval_data_path", "../data/slovenian/slo_val_binarized.tsv",
"--output_dir", "../models/shebert_slovenian1",
"--data_column", "data",
"--label_column", "label",
"--tokenizer_file", "..models/shebert_en_finetune/vocab.txt",
"--config_file", "..models/shebert_en_finetune/config.json",
"--model_file", "..models/shebert_en_finetune/pytorch_model.bin",
"--random_seed", "42"])
subprocess.call(["python", "incremental_learning.py",
"--train_data_path", "../data/slovenian/slo_train_binarized.tsv",
"--test_data_path", "../data/slovenian/slo_internal_test_binarized.tsv",
"--eval_data_path", "../data/slovenian/slo_val_binarized.tsv",
"--output_dir", "../models/shebert_slovenian2",
"--data_column", "data",
"--label_column", "label",
"--tokenizer_file", "..models/shebert_en_finetune/vocab.txt",
"--config_file", "..models/shebert_en_finetune/config.json",
"--model_file", "..models/shebert_en_finetune/pytorch_model.bin",
"--random_seed", "84"])
subprocess.call(["python", "incremental_learning.py",
"--train_data_path", "../data/slovenian/slo_train_binarized.tsv",
"--test_data_path", "../data/slovenian/slo_internal_test_binarized.tsv",
"--eval_data_path", "../data/slovenian/slo_val_binarized.tsv",
"--output_dir", "../models/shebert_slovenian3",
"--data_column", "data",
"--label_column", "label",
"--tokenizer_file", "..models/shebert_en_finetune/vocab.txt",
"--config_file", "..models/shebert_en_finetune/config.json",
"--model_file", "..models/shebert_en_finetune/pytorch_model.bin",
"--random_seed", "126"])
if __name__ == "__main__":
run() | src/start_script_shebert2.py | import subprocess
def run():
subprocess.call(["python", "incremental_learning.py",
"--train_data_path", "../data/slovenian/slo_train_binarized.tsv",
"--test_data_path", "../data/slovenian/slo_internal_test_binarized.tsv",
"--eval_data_path", "../data/slovenian/slo_val_binarized.tsv",
"--output_dir", "../models/shebert_slovenian1",
"--data_column", "data",
"--label_column", "label",
"--tokenizer_file", "..models/shebert_en_finetune/vocab.txt",
"--config_file", "..models/shebert_en_finetune/config.json",
"--model_file", "..models/shebert_en_finetune/pytorch_model.bin",
"--random_seed", "42"])
subprocess.call(["python", "incremental_learning.py",
"--train_data_path", "../data/slovenian/slo_train_binarized.tsv",
"--test_data_path", "../data/slovenian/slo_internal_test_binarized.tsv",
"--eval_data_path", "../data/slovenian/slo_val_binarized.tsv",
"--output_dir", "../models/shebert_slovenian2",
"--data_column", "data",
"--label_column", "label",
"--tokenizer_file", "..models/shebert_en_finetune/vocab.txt",
"--config_file", "..models/shebert_en_finetune/config.json",
"--model_file", "..models/shebert_en_finetune/pytorch_model.bin",
"--random_seed", "84"])
subprocess.call(["python", "incremental_learning.py",
"--train_data_path", "../data/slovenian/slo_train_binarized.tsv",
"--test_data_path", "../data/slovenian/slo_internal_test_binarized.tsv",
"--eval_data_path", "../data/slovenian/slo_val_binarized.tsv",
"--output_dir", "../models/shebert_slovenian3",
"--data_column", "data",
"--label_column", "label",
"--tokenizer_file", "..models/shebert_en_finetune/vocab.txt",
"--config_file", "..models/shebert_en_finetune/config.json",
"--model_file", "..models/shebert_en_finetune/pytorch_model.bin",
"--random_seed", "126"])
if __name__ == "__main__":
run() | 0.3295 | 0.135289 |
import json
import pickle
import numpy as np
import pytest
from mockredis import MockRedis
from .conftest import models
from cf_predict import __version__
from cf_predict.resources import get_db
from cf_predict.errors import NoPredictMethod
@pytest.mark.usefixtures("client_class")
class TestCf_predict:
def test_catalogue(self):
rv = self.client.get("/")
assert rv.status_code == 200
assert rv.json == {
"predict_url": "http://localhost/predict",
"api_version": __version__
}
def test_get_db(self):
r = get_db()
r.set("test", 5)
assert int(r.get("test")) == 5
def test_no_model_in_db(self, monkeypatch, caplog):
monkeypatch.setattr("cf_predict.resources.get_db", MockRedis)
pytest.raises(ValueError, self.client.get, "/predict")
assert "No model" in caplog.text()
def test_model_pickle_error(self, monkeypatch, caplog):
def broken_pickle(anything):
raise IOError
monkeypatch.setattr("pickle.loads", broken_pickle)
pytest.raises(IOError, self.client.get, "/predict")
assert "could not be unpickled" in caplog.text()
def test_model_no_predict_error(self, monkeypatch, caplog, broken_model):
monkeypatch.setattr("cf_predict.resources.get_db", broken_model)
pytest.raises(NoPredictMethod, self.client.get, "/predict")
assert "has no predict method" in caplog.text()
def test_get_version(self):
rv = self.client.get("/predict")
assert rv.status_code == 200
assert rv.json == {
"model_version": "1.2.0"
}
def test_post_prediction_valid_features_one_record(self):
features = {"features": [1, 2, 3, 4, 5]}
model = pickle.loads(models().get("1.2.0"))
rv = self.client.post("/predict",
data=json.dumps(features),
content_type="application/json")
assert rv.status_code == 200
assert rv.json == {
"model_version": "1.2.0",
"prediction": list(model.predict(np.array(features["features"]).reshape(1, -1)))
}
def test_post_prediction_valid_features_multiple_records(self):
features = {"features": [[1, 2, 3, 4, 5],
[6, 7, 8, 9, 1],
[2, 3, 4, 5, 6]]}
model = pickle.loads(models().get("1.2.0"))
rv = self.client.post("/predict",
data=json.dumps(features),
content_type="application/json")
assert rv.status_code == 200
assert rv.json == {
"model_version": "1.2.0",
"prediction": list(model.predict(np.array(features["features"])))
}
def test_post_prediction_invalid_features(self):
features = {"features": [1, 2, "lol", 4, 5]}
rv = self.client.post("/predict",
data=json.dumps(features),
content_type="application/json")
assert rv.status_code == 400
assert rv.json == {
"message": "Features [1, 2, 'lol', 4, 5] do not match expected input for model version 1.2.0"
}
def test_post_prediction_invalid_json(self):
features = '{"features: [1, 2, 3, 4, 5]'
rv = self.client.post("/predict",
data=features,
content_type="application/json")
assert rv.status_code == 400
assert rv.json == {
"message": "Failed to decode JSON object: Unterminated string starting at: line 1 column 2 (char 1)"
}
def test_post_prediction_wrong_key(self):
features = {"lol": [1, 2, 3, 4, 5]}
rv = self.client.post("/predict",
data=json.dumps(features),
content_type="application/json")
assert rv.status_code == 400
assert rv.json == {
"message": "Features not found in {'lol': [1, 2, 3, 4, 5]}"
} | cf_predict/test/test_cf_predict.py | import json
import pickle
import numpy as np
import pytest
from mockredis import MockRedis
from .conftest import models
from cf_predict import __version__
from cf_predict.resources import get_db
from cf_predict.errors import NoPredictMethod
@pytest.mark.usefixtures("client_class")
class TestCf_predict:
def test_catalogue(self):
rv = self.client.get("/")
assert rv.status_code == 200
assert rv.json == {
"predict_url": "http://localhost/predict",
"api_version": __version__
}
def test_get_db(self):
r = get_db()
r.set("test", 5)
assert int(r.get("test")) == 5
def test_no_model_in_db(self, monkeypatch, caplog):
monkeypatch.setattr("cf_predict.resources.get_db", MockRedis)
pytest.raises(ValueError, self.client.get, "/predict")
assert "No model" in caplog.text()
def test_model_pickle_error(self, monkeypatch, caplog):
def broken_pickle(anything):
raise IOError
monkeypatch.setattr("pickle.loads", broken_pickle)
pytest.raises(IOError, self.client.get, "/predict")
assert "could not be unpickled" in caplog.text()
def test_model_no_predict_error(self, monkeypatch, caplog, broken_model):
monkeypatch.setattr("cf_predict.resources.get_db", broken_model)
pytest.raises(NoPredictMethod, self.client.get, "/predict")
assert "has no predict method" in caplog.text()
def test_get_version(self):
rv = self.client.get("/predict")
assert rv.status_code == 200
assert rv.json == {
"model_version": "1.2.0"
}
def test_post_prediction_valid_features_one_record(self):
features = {"features": [1, 2, 3, 4, 5]}
model = pickle.loads(models().get("1.2.0"))
rv = self.client.post("/predict",
data=json.dumps(features),
content_type="application/json")
assert rv.status_code == 200
assert rv.json == {
"model_version": "1.2.0",
"prediction": list(model.predict(np.array(features["features"]).reshape(1, -1)))
}
def test_post_prediction_valid_features_multiple_records(self):
features = {"features": [[1, 2, 3, 4, 5],
[6, 7, 8, 9, 1],
[2, 3, 4, 5, 6]]}
model = pickle.loads(models().get("1.2.0"))
rv = self.client.post("/predict",
data=json.dumps(features),
content_type="application/json")
assert rv.status_code == 200
assert rv.json == {
"model_version": "1.2.0",
"prediction": list(model.predict(np.array(features["features"])))
}
def test_post_prediction_invalid_features(self):
features = {"features": [1, 2, "lol", 4, 5]}
rv = self.client.post("/predict",
data=json.dumps(features),
content_type="application/json")
assert rv.status_code == 400
assert rv.json == {
"message": "Features [1, 2, 'lol', 4, 5] do not match expected input for model version 1.2.0"
}
def test_post_prediction_invalid_json(self):
features = '{"features: [1, 2, 3, 4, 5]'
rv = self.client.post("/predict",
data=features,
content_type="application/json")
assert rv.status_code == 400
assert rv.json == {
"message": "Failed to decode JSON object: Unterminated string starting at: line 1 column 2 (char 1)"
}
def test_post_prediction_wrong_key(self):
features = {"lol": [1, 2, 3, 4, 5]}
rv = self.client.post("/predict",
data=json.dumps(features),
content_type="application/json")
assert rv.status_code == 400
assert rv.json == {
"message": "Features not found in {'lol': [1, 2, 3, 4, 5]}"
} | 0.584271 | 0.32342 |
from pathlib import Path
import diplib as dip
import numpy as np
import os
import pandas as pd
def GaussianSmoothing(x, sigma, mask=None):
""" Compute n-dimentional gaussian smoothing on nd array.
Parameters
----------
x : numpy nd-array
The imput array to be smoothed
sigma : float
The gaussian standar deviation (smoothing coeficient)
mask : tuple of n 1d array of size l
coordonate of l points one wich the smoothing is wanted.
Returns
-------
gaussian_s : numpy nd-array
The nd array smoothed with a coeficient sigma
gaussian_s[mask] : numpy 1d array
if mask is not None, return an array of the value of the gaussian
smoothing for all points in mask
"""
gaussian_s = np.asarray(dip.Gauss(x, sigma))
if mask is not None:
return gaussian_s[mask]
else:
return gaussian_s
def Laplacian(x, mask=None):
""" Compute n-dimentional laplacian on nd array.
Parameters
----------
x : numpy nd-array
The imput array on wich the laplacian will be computed
mask : tuple of n 1d array of size l
coordonate of l points one wich the laplacian is wanted.
Returns
-------
laplacian : numpy nd-array
The nd array laplacian (same shape as x)
laplacian[mask] : numpy 1d array
if mask is not None, return an array with the value of the laplacian
for all points in mask
"""
laplacian = np.asarray(dip.Laplace(x))
if mask is not None:
return laplacian[mask]
else:
return laplacian
def HessianEigenvalues(x, mask=None):
""" Compute n-dimentional hessian eigenvalues on nd array.
Parameters
----------
x : numpy nd-array
The imput array on wich the hessian eigenvalues will be computed
mask : tuple of n 1d array of size l
coordonate of l points one wich the hessian eigenvalues are wanted.
Returns
-------
eigenvalues : numpy nd-array
The nd array with all n eigenvalues (shape as x shape + 1)
eigenvalues[mask] : numpy 2d array
if mask is not None, return an array with the value of the hessian
eigenvalues for all points in mask
"""
eigenvalues = np.asarray(dip.Eigenvalues(dip.Hessian(x)))
if mask is not None:
return eigenvalues[mask]
else:
return eigenvalues
def BuildFeatureFiles(x, features=["Gau", "Lap", "HGE"],
sigma = 1.0, dir_path = Path("")):
""" Build all feature files of an imput nd array for a given sigma
smoothing.
Parameters
----------
x : numpy nd-array
The imput array on wich the features will be computed
features : list
name of the features to be computed
sigma : float
The gaussian standar deviation (smoothing coeficient)
dir_path : str
path of the directory in wich the feature files are to be stored
"""
feature_dic={"Gau": GaussianSmoothing,
"Lap": Laplacian,
"HGE": HessianEigenvalues}
add_raw = False
if "Raw" in features:
add_raw = True
features.remove("Raw")
i=0
gauss = feature_dic["Gau"](x, sigma)
for feat in features:
if feat == "HGE":
eig = feature_dic[feat](gauss)
name = Path(feat + str(0) + "_sigma" + str(sigma))
np.save(dir_path / name, eig[:,:,:,0])
i += 1
name = Path(feat + str(1) + "_sigma" + str(sigma))
np.save(dir_path / name, eig[:,:,:,1])
i += 1
name = Path(feat + str(2) + "_sigma" + str(sigma))
np.save(dir_path / name, eig[:,:,:,2])
del eig
elif feat != "Gau":
name = Path(feat+"_sigma" + str(sigma))
np.save(dir_path / name, feature_dic[feat](gauss))
else:
name = Path(feat+"_sigma" + str(sigma))
np.save(dir_path / name, gauss)
i+=1
if add_raw:
np.save(dir_path / "Raw", x)
features.append("Raw")
def BuildFeatureFilesFromList(image_file, features_list, dir_path):
""" Build all feature files in a targeted directory from an imput
features list.
Parameters
----------
image_file : Path or str
Path to the immage 3d array on wich the features will be computed
features_list : list
name of the features to be computed.
Each feature must be writen in the following format,
<<feature_type>>_sigma<<range>>, ex: Gau_sigma2.0 or Lap_sigma1.0.
Only compatible features are computed, non compatible features
are to be separatly computed and mannualy added into the features
directory as numpy ndarray.
Compatible feature type:
- "Gau" for Gaussian filter,
- "Lap" for Laplacian filter,
- "HGE1", "HGE2", "HGE3" for 1st, 2nd and 3rd Hessian eigenvalues,
dir_path : str
path of the directory in wich the feature files are to be stored
"""
x = np.load(image_file)
for feature in features_list:
if "HGE" in feature or "Gau" in feature or "Lap" in feature:
split = feature.split("_sigma")
sigma = float(split[1])
feat = split[0]
gauss = GaussianSmoothing(x, sigma)
if "HGE" in feat:
eig = int(feat.split("HGE")[1])
eigenvalues = HessianEigenvalues(gauss)
np.save(dir_path / Path(feature), eigenvalues[:,:,:,eig])
elif "Gau" in feat:
np.save(dir_path / Path(feature), gauss)
elif "Lap" in feat:
np.save(dir_path / Path(feature), Laplacian(gauss))
else:
print("Please manualy add ", feature, "feature in " , dir_path)
def MultiFilesBuildFeatureFilesFromList(image_list, features_list, dir_path):
""" For a list of image files, build in the tageted folder a list of
sub_folder in wich for each image, all the features in the feature file
list are built
Parameters
----------
image_list : list
list of image files for wich the features are to be computed
features_list : list
name of the features to be computed.
Each feature must be writen in the following format,
<<feature_type>>_sigma<<range>>, ex: Gau_sigma2.0 or Lap_sigma1.0.
Only compatible features are computed, non compatible features
are to be separatly computed and mannualy added into the features
directory as numpy ndarray.
Compatible feature type:
- "Gau" for Gaussian filter,
- "Lap" for Laplacian filter,
- "HGE1", "HGE2", "HGE3" for 1st, 2nd and 3rd Hessian eigenvalues,
dir_path : str or Path
path of the directory in wich the subfolder for each image feature files
are to be stored
"""
dir_path = Path(dir_path)
if not(os.path.exists(dir_path)):
dir_path.mkdir()
for i, image_file in enumerate(image_list):
sub_folder = dir_path / ("feature_folder" + str(i))
if not(os.path.exists(sub_folder)):
sub_folder.mkdir()
BuildFeatureFilesFromList(image_file, features_list, sub_folder)
feature_files_list = [dir_path / ("feature_folder" + str(i))
for i in range(len(image_list))]
return feature_files_list
def LoadFeaturesDir(dir_path, mask, features_list = None):
""" Assuming only feature files are in a target directory, build a
dataframe with the value of each feature at each points of the mask
Parameters
----------
dir_path : str
path of the directory in wich the feature files are stored
mask : tuple of n 1d array of size l
coordonate of l points one wich the features are wanted.
features_list : list, optional
names of the features files to be loded without the ".npy".
Each feature must a nd numpy array the same size as the mask.
The default is None
if None, all feature files in the directory will be loded
Returns
-------
df : pandas DataFrame
a n * m dataframe where n is the number of points in mask and m the
number of features
"""
features_file_list = os.listdir(dir_path)
if features_list is None:
features_list = [feat.split(".npy")[0] for feat in features_file_list]
data = np.zeros((len(mask[0]), len(features_list)))
for i, name in enumerate(features_list):
if name + ".npy" in features_file_list:
feat = np.load(dir_path / Path(name + ".npy"))[mask]
data[:,i] = feat
else:
print("file :" + name + ".npy" +" not found in " + str(dir_path))
df = pd.DataFrame(data=data, columns=features_list)
df = df[sorted(df.columns)]
return df | pvtseg/features_3d.py | from pathlib import Path
import diplib as dip
import numpy as np
import os
import pandas as pd
def GaussianSmoothing(x, sigma, mask=None):
""" Compute n-dimentional gaussian smoothing on nd array.
Parameters
----------
x : numpy nd-array
The imput array to be smoothed
sigma : float
The gaussian standar deviation (smoothing coeficient)
mask : tuple of n 1d array of size l
coordonate of l points one wich the smoothing is wanted.
Returns
-------
gaussian_s : numpy nd-array
The nd array smoothed with a coeficient sigma
gaussian_s[mask] : numpy 1d array
if mask is not None, return an array of the value of the gaussian
smoothing for all points in mask
"""
gaussian_s = np.asarray(dip.Gauss(x, sigma))
if mask is not None:
return gaussian_s[mask]
else:
return gaussian_s
def Laplacian(x, mask=None):
""" Compute n-dimentional laplacian on nd array.
Parameters
----------
x : numpy nd-array
The imput array on wich the laplacian will be computed
mask : tuple of n 1d array of size l
coordonate of l points one wich the laplacian is wanted.
Returns
-------
laplacian : numpy nd-array
The nd array laplacian (same shape as x)
laplacian[mask] : numpy 1d array
if mask is not None, return an array with the value of the laplacian
for all points in mask
"""
laplacian = np.asarray(dip.Laplace(x))
if mask is not None:
return laplacian[mask]
else:
return laplacian
def HessianEigenvalues(x, mask=None):
""" Compute n-dimentional hessian eigenvalues on nd array.
Parameters
----------
x : numpy nd-array
The imput array on wich the hessian eigenvalues will be computed
mask : tuple of n 1d array of size l
coordonate of l points one wich the hessian eigenvalues are wanted.
Returns
-------
eigenvalues : numpy nd-array
The nd array with all n eigenvalues (shape as x shape + 1)
eigenvalues[mask] : numpy 2d array
if mask is not None, return an array with the value of the hessian
eigenvalues for all points in mask
"""
eigenvalues = np.asarray(dip.Eigenvalues(dip.Hessian(x)))
if mask is not None:
return eigenvalues[mask]
else:
return eigenvalues
def BuildFeatureFiles(x, features=["Gau", "Lap", "HGE"],
sigma = 1.0, dir_path = Path("")):
""" Build all feature files of an imput nd array for a given sigma
smoothing.
Parameters
----------
x : numpy nd-array
The imput array on wich the features will be computed
features : list
name of the features to be computed
sigma : float
The gaussian standar deviation (smoothing coeficient)
dir_path : str
path of the directory in wich the feature files are to be stored
"""
feature_dic={"Gau": GaussianSmoothing,
"Lap": Laplacian,
"HGE": HessianEigenvalues}
add_raw = False
if "Raw" in features:
add_raw = True
features.remove("Raw")
i=0
gauss = feature_dic["Gau"](x, sigma)
for feat in features:
if feat == "HGE":
eig = feature_dic[feat](gauss)
name = Path(feat + str(0) + "_sigma" + str(sigma))
np.save(dir_path / name, eig[:,:,:,0])
i += 1
name = Path(feat + str(1) + "_sigma" + str(sigma))
np.save(dir_path / name, eig[:,:,:,1])
i += 1
name = Path(feat + str(2) + "_sigma" + str(sigma))
np.save(dir_path / name, eig[:,:,:,2])
del eig
elif feat != "Gau":
name = Path(feat+"_sigma" + str(sigma))
np.save(dir_path / name, feature_dic[feat](gauss))
else:
name = Path(feat+"_sigma" + str(sigma))
np.save(dir_path / name, gauss)
i+=1
if add_raw:
np.save(dir_path / "Raw", x)
features.append("Raw")
def BuildFeatureFilesFromList(image_file, features_list, dir_path):
""" Build all feature files in a targeted directory from an imput
features list.
Parameters
----------
image_file : Path or str
Path to the immage 3d array on wich the features will be computed
features_list : list
name of the features to be computed.
Each feature must be writen in the following format,
<<feature_type>>_sigma<<range>>, ex: Gau_sigma2.0 or Lap_sigma1.0.
Only compatible features are computed, non compatible features
are to be separatly computed and mannualy added into the features
directory as numpy ndarray.
Compatible feature type:
- "Gau" for Gaussian filter,
- "Lap" for Laplacian filter,
- "HGE1", "HGE2", "HGE3" for 1st, 2nd and 3rd Hessian eigenvalues,
dir_path : str
path of the directory in wich the feature files are to be stored
"""
x = np.load(image_file)
for feature in features_list:
if "HGE" in feature or "Gau" in feature or "Lap" in feature:
split = feature.split("_sigma")
sigma = float(split[1])
feat = split[0]
gauss = GaussianSmoothing(x, sigma)
if "HGE" in feat:
eig = int(feat.split("HGE")[1])
eigenvalues = HessianEigenvalues(gauss)
np.save(dir_path / Path(feature), eigenvalues[:,:,:,eig])
elif "Gau" in feat:
np.save(dir_path / Path(feature), gauss)
elif "Lap" in feat:
np.save(dir_path / Path(feature), Laplacian(gauss))
else:
print("Please manualy add ", feature, "feature in " , dir_path)
def MultiFilesBuildFeatureFilesFromList(image_list, features_list, dir_path):
""" For a list of image files, build in the tageted folder a list of
sub_folder in wich for each image, all the features in the feature file
list are built
Parameters
----------
image_list : list
list of image files for wich the features are to be computed
features_list : list
name of the features to be computed.
Each feature must be writen in the following format,
<<feature_type>>_sigma<<range>>, ex: Gau_sigma2.0 or Lap_sigma1.0.
Only compatible features are computed, non compatible features
are to be separatly computed and mannualy added into the features
directory as numpy ndarray.
Compatible feature type:
- "Gau" for Gaussian filter,
- "Lap" for Laplacian filter,
- "HGE1", "HGE2", "HGE3" for 1st, 2nd and 3rd Hessian eigenvalues,
dir_path : str or Path
path of the directory in wich the subfolder for each image feature files
are to be stored
"""
dir_path = Path(dir_path)
if not(os.path.exists(dir_path)):
dir_path.mkdir()
for i, image_file in enumerate(image_list):
sub_folder = dir_path / ("feature_folder" + str(i))
if not(os.path.exists(sub_folder)):
sub_folder.mkdir()
BuildFeatureFilesFromList(image_file, features_list, sub_folder)
feature_files_list = [dir_path / ("feature_folder" + str(i))
for i in range(len(image_list))]
return feature_files_list
def LoadFeaturesDir(dir_path, mask, features_list = None):
""" Assuming only feature files are in a target directory, build a
dataframe with the value of each feature at each points of the mask
Parameters
----------
dir_path : str
path of the directory in wich the feature files are stored
mask : tuple of n 1d array of size l
coordonate of l points one wich the features are wanted.
features_list : list, optional
names of the features files to be loded without the ".npy".
Each feature must a nd numpy array the same size as the mask.
The default is None
if None, all feature files in the directory will be loded
Returns
-------
df : pandas DataFrame
a n * m dataframe where n is the number of points in mask and m the
number of features
"""
features_file_list = os.listdir(dir_path)
if features_list is None:
features_list = [feat.split(".npy")[0] for feat in features_file_list]
data = np.zeros((len(mask[0]), len(features_list)))
for i, name in enumerate(features_list):
if name + ".npy" in features_file_list:
feat = np.load(dir_path / Path(name + ".npy"))[mask]
data[:,i] = feat
else:
print("file :" + name + ".npy" +" not found in " + str(dir_path))
df = pd.DataFrame(data=data, columns=features_list)
df = df[sorted(df.columns)]
return df | 0.898805 | 0.739281 |
from __future__ import print_function
import numpy as np
from paddle.io import IterableDataset
import cv2
import os
class RecDataset(IterableDataset):
def __init__(self, file_list, config):
super(RecDataset, self).__init__()
self.file_list = file_list
self.config = config
self.n_way = 5
self.k_spt = 1
self.k_query = 15
self.imgsize = 28
np.random.seed(12345)
character_folders = [
os.path.join(family, character) for family in self.file_list
if os.path.isdir(family) for character in os.listdir(family)
]
imgs_list = []
for char_fold in character_folders:
char_list = []
for file in [
os.path.join(char_fold, f) for f in os.listdir(char_fold)
]:
img = cv2.imread(file)
img = cv2.resize(img, (28, 28))
img = np.transpose(img, (2, 0, 1))
img = img[0].astype('float32') # 只取零通道
img = img / 255.0
img = img * 2.0 - 1.0
char_list.append(img)
char_list = np.array(char_list)
imgs_list.append(char_list)
self.train_imgs = np.array(imgs_list)
self.train_imgs = self.train_imgs[:, :, np.newaxis, :, :]
#print('The shape of self.train_imgs: {}'.format(self.train_imgs.shape)) # [973,20,1,28,28]
def __iter__(self):
full_lines = []
self.data = []
for i in range(3200):
x_spt, y_spt, x_qry, y_qry = [], [], [], []
selected_cls = np.random.choice(
self.train_imgs.shape[0], self.n_way, replace=False)
for j, cur_class in enumerate(selected_cls):
selected_img = np.random.choice(
20, self.k_spt + self.k_query, replace=False)
# 构造support集和query集
x_spt.append(self.train_imgs[cur_class][
selected_img[:self.k_spt]])
x_qry.append(self.train_imgs[cur_class][selected_img[
self.k_spt:]])
y_spt.append([j for _ in range(self.k_spt)])
y_qry.append([j for _ in range(self.k_query)])
perm = np.random.permutation(self.n_way * self.k_spt)
x_spt = np.array(x_spt).reshape(
self.n_way * self.k_spt, 1, self.imgsize,
self.imgsize)[perm] # [5,1,1,28,28]=>[5,1,28,28]
y_spt = np.array(y_spt).reshape(self.n_way *
self.k_spt)[perm] # [5,1]=>[5,]
perm = np.random.permutation(self.n_way * self.k_query)
x_qry = np.array(x_qry).reshape(
self.n_way * self.k_query, 1, self.imgsize,
self.imgsize)[perm] # [5,15,1,28,28]=>[75,1,28,28]
y_qry = np.array(y_qry).reshape(
self.n_way * self.k_query)[perm] # [5,15]=>[75,]
output_list = []
output_list.append(np.array(x_spt).astype("float32"))
output_list.append(np.array(y_spt).astype("int64"))
output_list.append(np.array(x_qry).astype("float32"))
output_list.append(np.array(y_qry).astype("int64"))
yield output_list | models/multitask/maml/omniglot_reader.py |
from __future__ import print_function
import numpy as np
from paddle.io import IterableDataset
import cv2
import os
class RecDataset(IterableDataset):
def __init__(self, file_list, config):
super(RecDataset, self).__init__()
self.file_list = file_list
self.config = config
self.n_way = 5
self.k_spt = 1
self.k_query = 15
self.imgsize = 28
np.random.seed(12345)
character_folders = [
os.path.join(family, character) for family in self.file_list
if os.path.isdir(family) for character in os.listdir(family)
]
imgs_list = []
for char_fold in character_folders:
char_list = []
for file in [
os.path.join(char_fold, f) for f in os.listdir(char_fold)
]:
img = cv2.imread(file)
img = cv2.resize(img, (28, 28))
img = np.transpose(img, (2, 0, 1))
img = img[0].astype('float32') # 只取零通道
img = img / 255.0
img = img * 2.0 - 1.0
char_list.append(img)
char_list = np.array(char_list)
imgs_list.append(char_list)
self.train_imgs = np.array(imgs_list)
self.train_imgs = self.train_imgs[:, :, np.newaxis, :, :]
#print('The shape of self.train_imgs: {}'.format(self.train_imgs.shape)) # [973,20,1,28,28]
def __iter__(self):
full_lines = []
self.data = []
for i in range(3200):
x_spt, y_spt, x_qry, y_qry = [], [], [], []
selected_cls = np.random.choice(
self.train_imgs.shape[0], self.n_way, replace=False)
for j, cur_class in enumerate(selected_cls):
selected_img = np.random.choice(
20, self.k_spt + self.k_query, replace=False)
# 构造support集和query集
x_spt.append(self.train_imgs[cur_class][
selected_img[:self.k_spt]])
x_qry.append(self.train_imgs[cur_class][selected_img[
self.k_spt:]])
y_spt.append([j for _ in range(self.k_spt)])
y_qry.append([j for _ in range(self.k_query)])
perm = np.random.permutation(self.n_way * self.k_spt)
x_spt = np.array(x_spt).reshape(
self.n_way * self.k_spt, 1, self.imgsize,
self.imgsize)[perm] # [5,1,1,28,28]=>[5,1,28,28]
y_spt = np.array(y_spt).reshape(self.n_way *
self.k_spt)[perm] # [5,1]=>[5,]
perm = np.random.permutation(self.n_way * self.k_query)
x_qry = np.array(x_qry).reshape(
self.n_way * self.k_query, 1, self.imgsize,
self.imgsize)[perm] # [5,15,1,28,28]=>[75,1,28,28]
y_qry = np.array(y_qry).reshape(
self.n_way * self.k_query)[perm] # [5,15]=>[75,]
output_list = []
output_list.append(np.array(x_spt).astype("float32"))
output_list.append(np.array(y_spt).astype("int64"))
output_list.append(np.array(x_qry).astype("float32"))
output_list.append(np.array(y_qry).astype("int64"))
yield output_list | 0.295738 | 0.132374 |
from typing import TYPE_CHECKING, Dict
import anyio.abc
from .component import Component
if TYPE_CHECKING:
from ..base import ComponentInteraction
__all__ = ('ComponentHandler',)
class ComponentHandler:
"""Handler for components, dispatching waiting components.
Attributes:
components:
A dictionary of interaction IDs or message IDs to the component.
"""
# This is a dictionary with the key either being the interaction ID for
# the original response, or a message ID for followup messages
components: Dict[int, Component]
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.components = {}
def handle_component(
self,
interaction: 'ComponentInteraction',
*,
tg: anyio.abc.TaskGroup
) -> None:
"""Handle the component, setting waiting events and calling callbacks.
The lookup order here is first for message ID, then by interaction ID.
This because it is not know the ID of the message that the original
response created.
Parameters:
interaction: The interaction a component should handle.
tg: Task group to launch callbacks with.
"""
component = self.components.get(int(interaction.message['id']))
if not component:
interact = interaction.message.get('interaction')
if interact:
component = self.components.get(int(interact['id']))
if component is None:
# We know no components for this interaction
return
component.handle_interaction(interaction, tg=tg)
def add_component(self, snowflake: int, component: Component) -> None:
"""Add a component to be dispatched when an interaction is received.
If there is an existing component for the snowflake, it will be
replaced with the passed component.
Parameters:
snowflake: An interaction ID or message ID fitting.
component: Component to add that will be called to handle.
"""
self.components[snowflake] = component | library/wumpy-interactions/wumpy/interactions/components/handler.py | from typing import TYPE_CHECKING, Dict
import anyio.abc
from .component import Component
if TYPE_CHECKING:
from ..base import ComponentInteraction
__all__ = ('ComponentHandler',)
class ComponentHandler:
"""Handler for components, dispatching waiting components.
Attributes:
components:
A dictionary of interaction IDs or message IDs to the component.
"""
# This is a dictionary with the key either being the interaction ID for
# the original response, or a message ID for followup messages
components: Dict[int, Component]
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.components = {}
def handle_component(
self,
interaction: 'ComponentInteraction',
*,
tg: anyio.abc.TaskGroup
) -> None:
"""Handle the component, setting waiting events and calling callbacks.
The lookup order here is first for message ID, then by interaction ID.
This because it is not know the ID of the message that the original
response created.
Parameters:
interaction: The interaction a component should handle.
tg: Task group to launch callbacks with.
"""
component = self.components.get(int(interaction.message['id']))
if not component:
interact = interaction.message.get('interaction')
if interact:
component = self.components.get(int(interact['id']))
if component is None:
# We know no components for this interaction
return
component.handle_interaction(interaction, tg=tg)
def add_component(self, snowflake: int, component: Component) -> None:
"""Add a component to be dispatched when an interaction is received.
If there is an existing component for the snowflake, it will be
replaced with the passed component.
Parameters:
snowflake: An interaction ID or message ID fitting.
component: Component to add that will be called to handle.
"""
self.components[snowflake] = component | 0.889852 | 0.199503 |
from msrest.serialization import Model
class Model(Model):
"""An Azure Machine Learning Model.
:param id: The Model Id.
:type id: str
:param name: The Model name.
:type name: str
:param framework: The Model framework.
:type framework: str
:param framework_version: The Model framework version.
:type framework_version: str
:param version: The Model version assigned by Model Management Service.
:type version: long
:param datasets: The list of datasets associated with the model.
:type datasets: list[~_restclient.models.DatasetReference]
:param url: The URL of the Model. Usually a SAS URL.
:type url: str
:param mime_type: The MIME type of Model content. For more details about
MIME type, please open
https://www.iana.org/assignments/media-types/media-types.xhtml
:type mime_type: str
:param description: The Model description text.
:type description: str
:param created_time: The Model creation time (UTC).
:type created_time: datetime
:param modified_time: The Model last modified time (UTC).
:type modified_time: datetime
:param unpack: Indicates whether we need to unpack the Model during docker
Image creation.
:type unpack: bool
:param parent_model_id: The Parent Model Id.
:type parent_model_id: str
:param run_id: The RunId that created this model.
:type run_id: str
:param experiment_name: The name of the experiment where this model was
created.
:type experiment_name: str
:param kv_tags: The Model tag dictionary. Items are mutable.
:type kv_tags: dict[str, str]
:param properties: The Model property dictionary. Properties are
immutable.
:type properties: dict[str, str]
:param derived_model_ids: Models dervied from this model
:type derived_model_ids: list[str]
:param sample_input_data: Sample Input Data for the Model. A reference to
a dataset in the workspace in the format aml://dataset/{datasetId}
:type sample_input_data: str
:param sample_output_data: Sample Output Data for the Model. A reference
to a dataset in the workspace in the format aml://dataset/{datasetId}
:type sample_output_data: str
:param resource_requirements: Resource requirements for the model
:type resource_requirements: ~_restclient.models.ModelResourceRequirements
:param created_by: The User who created this entity.
:type created_by: ~_restclient.models.ModelCreatedBy
"""
_validation = {
'name': {'required': True},
'url': {'required': True},
'mime_type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'framework': {'key': 'framework', 'type': 'str'},
'framework_version': {'key': 'frameworkVersion', 'type': 'str'},
'version': {'key': 'version', 'type': 'long'},
'datasets': {'key': 'datasets', 'type': '[DatasetReference]'},
'url': {'key': 'url', 'type': 'str'},
'mime_type': {'key': 'mimeType', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'},
'unpack': {'key': 'unpack', 'type': 'bool'},
'parent_model_id': {'key': 'parentModelId', 'type': 'str'},
'run_id': {'key': 'runId', 'type': 'str'},
'experiment_name': {'key': 'experimentName', 'type': 'str'},
'kv_tags': {'key': 'kvTags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': '{str}'},
'derived_model_ids': {'key': 'derivedModelIds', 'type': '[str]'},
'sample_input_data': {'key': 'sampleInputData', 'type': 'str'},
'sample_output_data': {'key': 'sampleOutputData', 'type': 'str'},
'resource_requirements': {'key': 'resourceRequirements', 'type': 'ModelResourceRequirements'},
'created_by': {'key': 'createdBy', 'type': 'ModelCreatedBy'},
}
def __init__(self, name, url, mime_type, id=None, framework=None, framework_version=None, version=None, datasets=None, description=None, created_time=None, modified_time=None, unpack=None, parent_model_id=None, run_id=None, experiment_name=None, kv_tags=None, properties=None, derived_model_ids=None, sample_input_data=None, sample_output_data=None, resource_requirements=None, created_by=None):
super(Model, self).__init__()
self.id = id
self.name = name
self.framework = framework
self.framework_version = framework_version
self.version = version
self.datasets = datasets
self.url = url
self.mime_type = mime_type
self.description = description
self.created_time = created_time
self.modified_time = modified_time
self.unpack = unpack
self.parent_model_id = parent_model_id
self.run_id = run_id
self.experiment_name = experiment_name
self.kv_tags = kv_tags
self.properties = properties
self.derived_model_ids = derived_model_ids
self.sample_input_data = sample_input_data
self.sample_output_data = sample_output_data
self.resource_requirements = resource_requirements
self.created_by = created_by | venv/lib/python3.8/site-packages/azureml/_restclient/models/model.py |
from msrest.serialization import Model
class Model(Model):
"""An Azure Machine Learning Model.
:param id: The Model Id.
:type id: str
:param name: The Model name.
:type name: str
:param framework: The Model framework.
:type framework: str
:param framework_version: The Model framework version.
:type framework_version: str
:param version: The Model version assigned by Model Management Service.
:type version: long
:param datasets: The list of datasets associated with the model.
:type datasets: list[~_restclient.models.DatasetReference]
:param url: The URL of the Model. Usually a SAS URL.
:type url: str
:param mime_type: The MIME type of Model content. For more details about
MIME type, please open
https://www.iana.org/assignments/media-types/media-types.xhtml
:type mime_type: str
:param description: The Model description text.
:type description: str
:param created_time: The Model creation time (UTC).
:type created_time: datetime
:param modified_time: The Model last modified time (UTC).
:type modified_time: datetime
:param unpack: Indicates whether we need to unpack the Model during docker
Image creation.
:type unpack: bool
:param parent_model_id: The Parent Model Id.
:type parent_model_id: str
:param run_id: The RunId that created this model.
:type run_id: str
:param experiment_name: The name of the experiment where this model was
created.
:type experiment_name: str
:param kv_tags: The Model tag dictionary. Items are mutable.
:type kv_tags: dict[str, str]
:param properties: The Model property dictionary. Properties are
immutable.
:type properties: dict[str, str]
:param derived_model_ids: Models dervied from this model
:type derived_model_ids: list[str]
:param sample_input_data: Sample Input Data for the Model. A reference to
a dataset in the workspace in the format aml://dataset/{datasetId}
:type sample_input_data: str
:param sample_output_data: Sample Output Data for the Model. A reference
to a dataset in the workspace in the format aml://dataset/{datasetId}
:type sample_output_data: str
:param resource_requirements: Resource requirements for the model
:type resource_requirements: ~_restclient.models.ModelResourceRequirements
:param created_by: The User who created this entity.
:type created_by: ~_restclient.models.ModelCreatedBy
"""
_validation = {
'name': {'required': True},
'url': {'required': True},
'mime_type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'framework': {'key': 'framework', 'type': 'str'},
'framework_version': {'key': 'frameworkVersion', 'type': 'str'},
'version': {'key': 'version', 'type': 'long'},
'datasets': {'key': 'datasets', 'type': '[DatasetReference]'},
'url': {'key': 'url', 'type': 'str'},
'mime_type': {'key': 'mimeType', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'},
'unpack': {'key': 'unpack', 'type': 'bool'},
'parent_model_id': {'key': 'parentModelId', 'type': 'str'},
'run_id': {'key': 'runId', 'type': 'str'},
'experiment_name': {'key': 'experimentName', 'type': 'str'},
'kv_tags': {'key': 'kvTags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': '{str}'},
'derived_model_ids': {'key': 'derivedModelIds', 'type': '[str]'},
'sample_input_data': {'key': 'sampleInputData', 'type': 'str'},
'sample_output_data': {'key': 'sampleOutputData', 'type': 'str'},
'resource_requirements': {'key': 'resourceRequirements', 'type': 'ModelResourceRequirements'},
'created_by': {'key': 'createdBy', 'type': 'ModelCreatedBy'},
}
def __init__(self, name, url, mime_type, id=None, framework=None, framework_version=None, version=None, datasets=None, description=None, created_time=None, modified_time=None, unpack=None, parent_model_id=None, run_id=None, experiment_name=None, kv_tags=None, properties=None, derived_model_ids=None, sample_input_data=None, sample_output_data=None, resource_requirements=None, created_by=None):
super(Model, self).__init__()
self.id = id
self.name = name
self.framework = framework
self.framework_version = framework_version
self.version = version
self.datasets = datasets
self.url = url
self.mime_type = mime_type
self.description = description
self.created_time = created_time
self.modified_time = modified_time
self.unpack = unpack
self.parent_model_id = parent_model_id
self.run_id = run_id
self.experiment_name = experiment_name
self.kv_tags = kv_tags
self.properties = properties
self.derived_model_ids = derived_model_ids
self.sample_input_data = sample_input_data
self.sample_output_data = sample_output_data
self.resource_requirements = resource_requirements
self.created_by = created_by | 0.827271 | 0.484868 |
import abc
import configparser
import datetime
import logging
from typing import Any, Dict, Union
import requests_cache
from ..consts import CACHE_PATH, CONFIG, USE_CACHE
LOGGER = logging.getLogger(__name__)
class AbstractProvider(abc.ABC):
"""
Abstract class to indicate what other providers should provide
"""
class_id: str
session_header: Dict[str, str]
today_date: str = datetime.datetime.today().strftime("%Y-%m-%d")
def __init__(self, headers: Dict[str, str]):
super().__init__()
self.class_id = ""
self.session_header = headers
self.__install_cache()
# Abstract Methods
@abc.abstractmethod
def _build_http_header(self) -> Dict[str, str]:
"""
Construct the HTTP authorization header
:return: Authorization header
"""
@abc.abstractmethod
def download(self, url: str, params: Dict[str, Union[str, int]] = None) -> Any:
"""
Download an object from a service using appropriate authentication protocols
:param url: URL to download content from
:param params: Options to give to the GET request
"""
# Class Methods
@classmethod
def get_class_name(cls) -> str:
"""
Get the name of the calling class
:return: Calling class name
"""
return cls.__name__
@classmethod
def get_class_id(cls) -> str:
"""
Grab the class ID for hashing purposes
:return Class ID
"""
return cls.class_id
@staticmethod
def get_configs() -> configparser.ConfigParser:
"""
Parse the config for this specific setup
:return: Parsed config file
"""
return CONFIG
@staticmethod
def log_download(response: Any) -> None:
"""
Log how the URL was acquired
:param response: Response from Server
"""
LOGGER.debug(
f"Downloaded {response.url} (Cache = {response.from_cache if USE_CACHE else False})"
)
# Private Methods
def __install_cache(self) -> None:
"""
Initiate the MTGJSON cache for requests
(Useful for development and re-running often)
"""
if USE_CACHE:
CACHE_PATH.mkdir(exist_ok=True)
requests_cache.install_cache(
str(CACHE_PATH.joinpath(self.get_class_name()))
) | mtgjson5/providers/abstract.py | import abc
import configparser
import datetime
import logging
from typing import Any, Dict, Union
import requests_cache
from ..consts import CACHE_PATH, CONFIG, USE_CACHE
LOGGER = logging.getLogger(__name__)
class AbstractProvider(abc.ABC):
"""
Abstract class to indicate what other providers should provide
"""
class_id: str
session_header: Dict[str, str]
today_date: str = datetime.datetime.today().strftime("%Y-%m-%d")
def __init__(self, headers: Dict[str, str]):
super().__init__()
self.class_id = ""
self.session_header = headers
self.__install_cache()
# Abstract Methods
@abc.abstractmethod
def _build_http_header(self) -> Dict[str, str]:
"""
Construct the HTTP authorization header
:return: Authorization header
"""
@abc.abstractmethod
def download(self, url: str, params: Dict[str, Union[str, int]] = None) -> Any:
"""
Download an object from a service using appropriate authentication protocols
:param url: URL to download content from
:param params: Options to give to the GET request
"""
# Class Methods
@classmethod
def get_class_name(cls) -> str:
"""
Get the name of the calling class
:return: Calling class name
"""
return cls.__name__
@classmethod
def get_class_id(cls) -> str:
"""
Grab the class ID for hashing purposes
:return Class ID
"""
return cls.class_id
@staticmethod
def get_configs() -> configparser.ConfigParser:
"""
Parse the config for this specific setup
:return: Parsed config file
"""
return CONFIG
@staticmethod
def log_download(response: Any) -> None:
"""
Log how the URL was acquired
:param response: Response from Server
"""
LOGGER.debug(
f"Downloaded {response.url} (Cache = {response.from_cache if USE_CACHE else False})"
)
# Private Methods
def __install_cache(self) -> None:
"""
Initiate the MTGJSON cache for requests
(Useful for development and re-running often)
"""
if USE_CACHE:
CACHE_PATH.mkdir(exist_ok=True)
requests_cache.install_cache(
str(CACHE_PATH.joinpath(self.get_class_name()))
) | 0.773901 | 0.126812 |
from os.path import abspath, basename, join, dirname
from seisflows.tools import unix
from seisflows.tools.code import call, findpath, saveobj
from seisflows.tools.config import ParameterError, custom_import, \
SeisflowsParameters, SeisflowsPaths
PAR = SeisflowsParameters()
PATH = SeisflowsPaths()
class pbs_sm(custom_import('system', 'mpi')):
""" An interface through which to submit workflows, run tasks in serial or
parallel, and perform other system functions.
By hiding environment details behind a python interface layer, these
classes provide a consistent command set across different computing
environments.
Intermediate files are written to a global scratch path PATH.SCRATCH,
which must be accessible to all compute nodes.
Optionally, users can provide a local scratch path PATH.LOCAL if each
compute node has its own local filesystem.
For important additional information, please see
http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-configuration
"""
def check(self):
""" Checks parameters and paths
"""
super(pbs_sm, self).check()
# check parameters
if 'WALLTIME' not in PAR:
setattr(PAR, 'WALLTIME', 30.)
if 'MEMORY' not in PAR:
setattr(PAR, 'MEMORY', 0)
if 'NODESIZE' not in PAR:
raise ParameterError(PAR, 'NODESIZE')
if 'PBSARGS' not in PAR:
setattr(PAR, 'PBSARGS', '')
def submit(self, workflow):
"""Submits job
"""
unix.mkdir(PATH.OUTPUT)
unix.cd(PATH.OUTPUT)
# save current state
self.checkpoint()
# construct resource list
resources = []
nodes = int(PAR.NTASK / PAR.NODESIZE)
cores = PAR.NTASK % PAR.NODESIZE
hours = int(PAR.WALLTIME / 60)
minutes = PAR.WALLTIME % 60
if PAR.WALLTIME:
resources += ['walltime=%02d:%02d:00'%(hours, minutes)]
if PAR.MEMORY:
resources += ['mem=%dgb' % PAR.MEMORY]
if nodes == 0:
resources += ['nodes=1:ppn=%d'%(cores)]
elif cores == 0:
resources += ['nodes=%d:ppn=%d'%(nodes, PAR.NODESIZE)]
else:
resources += ['nodes=%d:ppn=%d+1:ppn=%d'%(nodes, PAR.NODESIZE, cores)]
# construct arguments list
call('qsub '
+ '%s ' % PAR.PBSARGS
+ '-N %s '%PAR.TITLE
+ '-o %s '%(PATH.SUBMIT +'/'+ 'output.log')
+ '-l %s '%resources.join(',')
+ '-j %s '%'oe'
+ findpath('seisflows.system') +'/'+ 'wrappers/submit '
+ '-F %s '%PATH.OUTPUT) | seisflows/system/pbs_sm.py | from os.path import abspath, basename, join, dirname
from seisflows.tools import unix
from seisflows.tools.code import call, findpath, saveobj
from seisflows.tools.config import ParameterError, custom_import, \
SeisflowsParameters, SeisflowsPaths
PAR = SeisflowsParameters()
PATH = SeisflowsPaths()
class pbs_sm(custom_import('system', 'mpi')):
""" An interface through which to submit workflows, run tasks in serial or
parallel, and perform other system functions.
By hiding environment details behind a python interface layer, these
classes provide a consistent command set across different computing
environments.
Intermediate files are written to a global scratch path PATH.SCRATCH,
which must be accessible to all compute nodes.
Optionally, users can provide a local scratch path PATH.LOCAL if each
compute node has its own local filesystem.
For important additional information, please see
http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-configuration
"""
def check(self):
""" Checks parameters and paths
"""
super(pbs_sm, self).check()
# check parameters
if 'WALLTIME' not in PAR:
setattr(PAR, 'WALLTIME', 30.)
if 'MEMORY' not in PAR:
setattr(PAR, 'MEMORY', 0)
if 'NODESIZE' not in PAR:
raise ParameterError(PAR, 'NODESIZE')
if 'PBSARGS' not in PAR:
setattr(PAR, 'PBSARGS', '')
def submit(self, workflow):
"""Submits job
"""
unix.mkdir(PATH.OUTPUT)
unix.cd(PATH.OUTPUT)
# save current state
self.checkpoint()
# construct resource list
resources = []
nodes = int(PAR.NTASK / PAR.NODESIZE)
cores = PAR.NTASK % PAR.NODESIZE
hours = int(PAR.WALLTIME / 60)
minutes = PAR.WALLTIME % 60
if PAR.WALLTIME:
resources += ['walltime=%02d:%02d:00'%(hours, minutes)]
if PAR.MEMORY:
resources += ['mem=%dgb' % PAR.MEMORY]
if nodes == 0:
resources += ['nodes=1:ppn=%d'%(cores)]
elif cores == 0:
resources += ['nodes=%d:ppn=%d'%(nodes, PAR.NODESIZE)]
else:
resources += ['nodes=%d:ppn=%d+1:ppn=%d'%(nodes, PAR.NODESIZE, cores)]
# construct arguments list
call('qsub '
+ '%s ' % PAR.PBSARGS
+ '-N %s '%PAR.TITLE
+ '-o %s '%(PATH.SUBMIT +'/'+ 'output.log')
+ '-l %s '%resources.join(',')
+ '-j %s '%'oe'
+ findpath('seisflows.system') +'/'+ 'wrappers/submit '
+ '-F %s '%PATH.OUTPUT) | 0.383641 | 0.182808 |
from devsetgo_lib.file_functions import save_json
from starlette.testclient import TestClient
from src.core.gen_user import user_test_info
from src.main import app
client = TestClient(app)
directory_to__files: str = "data"
def test_users_post_error(bearer_session):
test_password = "<PASSWORD>"
user_name = f"test-user-fail"
test_data = {
"user_name": user_name,
"password": <PASSWORD>,
"password": f"{<PASSWORD>",
"email": "<EMAIL>",
"notes": "Gumbo beet greens corn soko endive gumbo gourd. ",
}
url = f"/api/v1/users/create"
headers = {"Authorization": "Bearer " + bearer_session}
response = client.post(url, json=test_data, headers=headers)
assert response.status_code == 422
def test_users_post_error_email(bearer_session):
test_password = "<PASSWORD>"
user_name = f"test-user-fail"
test_data = {
"userName": user_name,
"password": <PASSWORD>,
"passwordTwo": <PASSWORD>,
"email": "bob@<EMAIL>",
"notes": "Gumbo beet greens corn soko endive gumbo gourd. ",
}
url = f"/api/v1/users/create"
headers = {"Authorization": "Bearer " + bearer_session}
response = client.post(url, json=test_data, headers=headers)
assert response.status_code == 422
def test_users_post(bearer_session):
test_user = user_test_info()
save_json("test_data_test_user.json", test_user)
url = f"/api/v1/users/create"
headers = {"Authorization": "Bearer " + bearer_session}
response = client.post(url, json=test_user, headers=headers)
assert response.status_code == 201
data = response.json()
user_data = {
"id": data["id"],
"userName": data["user_name"],
"password": <PASSWORD>["password"],
}
save_json("test_data_users.json", user_data)
def test_users_post_two(bearer_session):
for p in range(2):
test_user = user_test_info()
url = f"/api/v1/users/create"
headers = {"Authorization": "Bearer " + bearer_session}
response = client.post(url, json=test_user, headers=headers)
save_json(f"test_user_{p}.json", test_user)
assert response.status_code == 201 | src/tests/test_api_1_users/test_users_create.py |
from devsetgo_lib.file_functions import save_json
from starlette.testclient import TestClient
from src.core.gen_user import user_test_info
from src.main import app
client = TestClient(app)
directory_to__files: str = "data"
def test_users_post_error(bearer_session):
test_password = "<PASSWORD>"
user_name = f"test-user-fail"
test_data = {
"user_name": user_name,
"password": <PASSWORD>,
"password": f"{<PASSWORD>",
"email": "<EMAIL>",
"notes": "Gumbo beet greens corn soko endive gumbo gourd. ",
}
url = f"/api/v1/users/create"
headers = {"Authorization": "Bearer " + bearer_session}
response = client.post(url, json=test_data, headers=headers)
assert response.status_code == 422
def test_users_post_error_email(bearer_session):
test_password = "<PASSWORD>"
user_name = f"test-user-fail"
test_data = {
"userName": user_name,
"password": <PASSWORD>,
"passwordTwo": <PASSWORD>,
"email": "bob@<EMAIL>",
"notes": "Gumbo beet greens corn soko endive gumbo gourd. ",
}
url = f"/api/v1/users/create"
headers = {"Authorization": "Bearer " + bearer_session}
response = client.post(url, json=test_data, headers=headers)
assert response.status_code == 422
def test_users_post(bearer_session):
test_user = user_test_info()
save_json("test_data_test_user.json", test_user)
url = f"/api/v1/users/create"
headers = {"Authorization": "Bearer " + bearer_session}
response = client.post(url, json=test_user, headers=headers)
assert response.status_code == 201
data = response.json()
user_data = {
"id": data["id"],
"userName": data["user_name"],
"password": <PASSWORD>["password"],
}
save_json("test_data_users.json", user_data)
def test_users_post_two(bearer_session):
for p in range(2):
test_user = user_test_info()
url = f"/api/v1/users/create"
headers = {"Authorization": "Bearer " + bearer_session}
response = client.post(url, json=test_user, headers=headers)
save_json(f"test_user_{p}.json", test_user)
assert response.status_code == 201 | 0.383872 | 0.285248 |
import numpy as np
from pupil.models.clustering import FaissKMeansClustering
class RepresentativeSampler:
"""
Cluster your training data and your unlabeled data independently,
identify the clusters that are most representative of your
unlabeled data, and oversample from them.
This approach gives you a more diverse set of items than
representative sampling alone
"""
def __init__(self, n_clusters):
self.clustering_training_model = FaissKMeansClustering(n_clusters)
self.clustering_unlabeld_model = FaissKMeansClustering(n_clusters)
def _fit(self, training_data, unlabeled_data):
self.clustering_training_model.fit(training_data)
self.clustering_unlabeld_model.fit(unlabeled_data)
def _dist_to_cluster_center(self, model, data):
dists, _ = model.distance_to_cluster_centers(data)
return dists[:, 0]
def representativeness_score(self, unlabeled_data):
dist_to_cent_training_data = self._dist_to_cluster_center(
self.clustering_training_model, unlabeled_data
)
dist_to_cent_unlabled_data = self._dist_to_cluster_center(
self.clustering_unlabeld_model, unlabeled_data
)
representativeness = dist_to_cent_unlabled_data - dist_to_cent_training_data
return representativeness
def fit(self, training_data, unlabeled_data):
self._fit(training_data, unlabeled_data)
scores = self.representativeness_score(unlabeled_data)
self.indices_ = np.argsort(
scores,
)
if __name__ == "__main__":
from sklearn.datasets import make_blobs
centers = [[2, 2], [-2, -2]]
train, labels_true = make_blobs( # type: ignore
n_samples=50, centers=centers, cluster_std=0.1, random_state=42
)
centers = [[2, 2], [-2, 2]]
test, labels_true = make_blobs(
n_samples=10, centers=centers, cluster_std=0.1, random_state=42
)
print("teeee")
print(labels_true)
sampler = RepresentativeSampler(n_clusters=2)
sampler.fit(train, test)
print(sampler.indices_) | pupil/sampling/representative.py | import numpy as np
from pupil.models.clustering import FaissKMeansClustering
class RepresentativeSampler:
"""
Cluster your training data and your unlabeled data independently,
identify the clusters that are most representative of your
unlabeled data, and oversample from them.
This approach gives you a more diverse set of items than
representative sampling alone
"""
def __init__(self, n_clusters):
self.clustering_training_model = FaissKMeansClustering(n_clusters)
self.clustering_unlabeld_model = FaissKMeansClustering(n_clusters)
def _fit(self, training_data, unlabeled_data):
self.clustering_training_model.fit(training_data)
self.clustering_unlabeld_model.fit(unlabeled_data)
def _dist_to_cluster_center(self, model, data):
dists, _ = model.distance_to_cluster_centers(data)
return dists[:, 0]
def representativeness_score(self, unlabeled_data):
dist_to_cent_training_data = self._dist_to_cluster_center(
self.clustering_training_model, unlabeled_data
)
dist_to_cent_unlabled_data = self._dist_to_cluster_center(
self.clustering_unlabeld_model, unlabeled_data
)
representativeness = dist_to_cent_unlabled_data - dist_to_cent_training_data
return representativeness
def fit(self, training_data, unlabeled_data):
self._fit(training_data, unlabeled_data)
scores = self.representativeness_score(unlabeled_data)
self.indices_ = np.argsort(
scores,
)
if __name__ == "__main__":
from sklearn.datasets import make_blobs
centers = [[2, 2], [-2, -2]]
train, labels_true = make_blobs( # type: ignore
n_samples=50, centers=centers, cluster_std=0.1, random_state=42
)
centers = [[2, 2], [-2, 2]]
test, labels_true = make_blobs(
n_samples=10, centers=centers, cluster_std=0.1, random_state=42
)
print("teeee")
print(labels_true)
sampler = RepresentativeSampler(n_clusters=2)
sampler.fit(train, test)
print(sampler.indices_) | 0.816113 | 0.596991 |
import os
if False:
from shotgun_api3_registry import connect
sg = connect(use_cache=False)
else:
from tests import Shotgun
url = 'http://127.0.0.1:8010'
sg = Shotgun(url,
os.environ.get('SGCACHE_SHOTGUN_SCRIPT_name', 'script_name'),
os.environ.get('SGCACHE_SHOTGUN_API_KEY', 'api_key'),
)
if sg.server_info.get('sgcache') or sg.server_info.get('sgmock'):
sg.clear()
SHOT = sg.create('Shot', {'code': 'multi_entity_test'})
USER = sg.create('HumanUser', {'first_name': 'multi_entity_user'})
GRP1 = sg.create('Group', {'code': 'multi_entity_group1'})
GRP2 = sg.create('Group', {'code': 'multi_entity_group2'})
sg.create('Task', {'entity': SHOT, 'content': 'both', 'task_assignees': [USER, GRP1]})
sg.create('Task', {'entity': SHOT, 'content': 'user', 'task_assignees': [USER]})
sg.create('Task', {'entity': SHOT, 'content': 'group', 'task_assignees': [GRP1]})
sg.create('Task', {'entity': SHOT, 'content': 'none', 'task_assignees': []})
else:
SHOT = {'type': 'Shot', 'id': 10891}
AA = {'type': 'Asset', 'id': 1008}
AB = {'type': 'Asset', 'id': 1009}
AC = {'type': 'Asset', 'id': 1010}
USER = {'type': 'HumanUser', 'id': 108}
GRP1 = {'type': 'Group', 'id': 11}
GRP1 = {'type': 'Group', 'id': 13}
def find(filters):
filters = list(filters)
filters.append(('entity', 'is', SHOT))
return sg.find('Task', filters, ['content'])
def test(filters):
print '%d filters:' % len(filters)
for f in filters:
print ' %r' % (f, )
entities = find(filters)
print '%d entities:' % (len(entities))
for e in entities:
print ' {id} {content}'.format(**e)
print
def assertTasks(filters, expected, message=''):
tasks = find(filters)
found = sorted(t['content'] for t in tasks)
expected = sorted(expected)
if found == expected:
print '%s%sOk.' % (message or '', ': ' if message else '')
else:
print '%s%sERROR! Expected %s, found %s' % (message or '', ': ' if message else '', expected, found)
'''
HOLY SHIT!
>>> sg.find_one('Task', [('sg_assets.Task_sg_assets_Connection.asset.Asset.code', 'contains', 'Dummy')])
>>> sg.find_one('Task', [('sg_assets.Asset.code', 'contains', 'Dummy')])
'''
print '=== name_CONTAINS ==='
assertTasks([
('task_assignees', 'name_contains', 'Mike'),
], ['both', 'user'])
assertTasks([
('task_assignees', 'name_contains', 'GRP1'),
], ['both', 'group'])
print '=== name_NOT_CONTAINS ==='
assertTasks([
('task_assignees', 'name_not_contains', 'GRP1'),
], ['user', 'none']) | sandbox/multi_entities.py |
import os
if False:
from shotgun_api3_registry import connect
sg = connect(use_cache=False)
else:
from tests import Shotgun
url = 'http://127.0.0.1:8010'
sg = Shotgun(url,
os.environ.get('SGCACHE_SHOTGUN_SCRIPT_name', 'script_name'),
os.environ.get('SGCACHE_SHOTGUN_API_KEY', 'api_key'),
)
if sg.server_info.get('sgcache') or sg.server_info.get('sgmock'):
sg.clear()
SHOT = sg.create('Shot', {'code': 'multi_entity_test'})
USER = sg.create('HumanUser', {'first_name': 'multi_entity_user'})
GRP1 = sg.create('Group', {'code': 'multi_entity_group1'})
GRP2 = sg.create('Group', {'code': 'multi_entity_group2'})
sg.create('Task', {'entity': SHOT, 'content': 'both', 'task_assignees': [USER, GRP1]})
sg.create('Task', {'entity': SHOT, 'content': 'user', 'task_assignees': [USER]})
sg.create('Task', {'entity': SHOT, 'content': 'group', 'task_assignees': [GRP1]})
sg.create('Task', {'entity': SHOT, 'content': 'none', 'task_assignees': []})
else:
SHOT = {'type': 'Shot', 'id': 10891}
AA = {'type': 'Asset', 'id': 1008}
AB = {'type': 'Asset', 'id': 1009}
AC = {'type': 'Asset', 'id': 1010}
USER = {'type': 'HumanUser', 'id': 108}
GRP1 = {'type': 'Group', 'id': 11}
GRP1 = {'type': 'Group', 'id': 13}
def find(filters):
filters = list(filters)
filters.append(('entity', 'is', SHOT))
return sg.find('Task', filters, ['content'])
def test(filters):
print '%d filters:' % len(filters)
for f in filters:
print ' %r' % (f, )
entities = find(filters)
print '%d entities:' % (len(entities))
for e in entities:
print ' {id} {content}'.format(**e)
print
def assertTasks(filters, expected, message=''):
tasks = find(filters)
found = sorted(t['content'] for t in tasks)
expected = sorted(expected)
if found == expected:
print '%s%sOk.' % (message or '', ': ' if message else '')
else:
print '%s%sERROR! Expected %s, found %s' % (message or '', ': ' if message else '', expected, found)
'''
HOLY SHIT!
>>> sg.find_one('Task', [('sg_assets.Task_sg_assets_Connection.asset.Asset.code', 'contains', 'Dummy')])
>>> sg.find_one('Task', [('sg_assets.Asset.code', 'contains', 'Dummy')])
'''
print '=== name_CONTAINS ==='
assertTasks([
('task_assignees', 'name_contains', 'Mike'),
], ['both', 'user'])
assertTasks([
('task_assignees', 'name_contains', 'GRP1'),
], ['both', 'group'])
print '=== name_NOT_CONTAINS ==='
assertTasks([
('task_assignees', 'name_not_contains', 'GRP1'),
], ['user', 'none']) | 0.30013 | 0.134691 |
import re
import pytest
import responses
from quickbuild import AsyncQBClient
TOKEN_XML = r"""<?xml version="1.0" encoding="UTF-8"?>
<list>
<com.pmease.quickbuild.model.Token>
<id>120204</id>
<value>84858611-a1fe-4f88-a49c-f600cf0ecf11</value>
<ip>192.168.1.100</ip>
<port>8811</port>
<test>false</test>
<lastUsedDate>2021-02-08T20:01:09.426Z</lastUsedDate>
<lastUsedReason>Run step (configuration: root/pipelineC, build: B.50906, step: master>build)</lastUsedReason>
<hostName>quickbuild-agent-192-168-1-100</hostName>
<offlineAlert>true</offlineAlert>
</com.pmease.quickbuild.model.Token>
</list>
"""
TOKENS_XML = r"""<?xml version="1.0" encoding="UTF-8"?>
<list>
<com.pmease.quickbuild.model.Token>
<id>117554</id>
<value><PASSWORD>-<PASSWORD>-<PASSWORD>-<PASSWORD></value>
<ip>192.168.1.100</ip>
<port>8811</port>
<test>false</test>
<lastUsedDate>2021-02-08T20:08:29.360Z</lastUsedDate>
<lastUsedReason>Check build condition (configuration: root/pipelineA)</lastUsedReason>
<hostName>quickbuild-agent-192-168-1-100</hostName>
<offlineAlert>true</offlineAlert>
</com.pmease.quickbuild.model.Token>
<com.pmease.quickbuild.model.Token>
<id>115672</id>
<value>27350640-d9f9-4a10-96ae-b6ec8fee998b</value>
<ip>192.168.1.101</ip>
<port>8811</port>
<test>false</test>
<lastUsedDate>2021-02-08T20:01:10.175Z</lastUsedDate>
<lastUsedReason>Run step (configuration: root/pipelineA, build: B.1234, step: master>finalize)</lastUsedReason>
<hostName>quickbuild-agent-192-168-1-101</hostName>
<offlineAlert>true</offlineAlert>
</com.pmease.quickbuild.model.Token>
<com.pmease.quickbuild.model.Token>
<id>116545</id>
<value>8f604c48-b9f4-4bbe-847c-c073b2aebc81</value>
<ip>192.168.1.102</ip>
<port>8811</port>
<test>false</test>
<lastUsedDate>2021-02-08T20:01:10.013Z</lastUsedDate>
<lastUsedReason>Run step (configuration: root/pipelineB, build: B.123, step: master>publish)</lastUsedReason>
<hostName>quickbuild-agent-192-168-1-102</hostName>
<offlineAlert>true</offlineAlert>
</com.pmease.quickbuild.model.Token>
</list>
"""
EMPTY_TOKEN_XML = r"""<?xml version="1.0" encoding="UTF-8"?>
<list/>
"""
@responses.activate
def test_authorize(client):
RESPONSE_DATA = '120123'
responses.add(
responses.GET,
re.compile(r'.*/rest/tokens/authorize'),
content_type='text/plain',
body=RESPONSE_DATA,
match_querystring=True,
)
response = client.tokens.authorize('192.168.1.100', 8811)
assert response == RESPONSE_DATA
response = client.tokens.authorize('192.168.1.100')
assert response == RESPONSE_DATA
@responses.activate
def test_unauthorize(client):
RESPONSE_DATA = '120123'
responses.add(
responses.GET,
re.compile(r'.*/rest/tokens/unauthorize'),
content_type='text/plain',
body=RESPONSE_DATA,
match_querystring=True,
)
response = client.tokens.unauthorize('192.168.1.100', 8811)
assert response == RESPONSE_DATA
response = client.tokens.unauthorize('192.168.1.100')
assert response == RESPONSE_DATA
@responses.activate
def test_token_and_agent_details(client):
responses.add(
responses.GET,
re.compile(r'.*/rest/tokens\?address=quickbuild-agent-192-168-1-100%3A8811'),
content_type='application/xml',
body=TOKEN_XML
)
response = client.tokens.get('quickbuild-agent-192-168-1-100:8811')
assert len(response) == 1
assert response[0]['id'] == 120204
@responses.activate
def test_tokens_and_agent_details(client):
responses.add(
responses.GET,
re.compile(r'.*/rest/tokens'),
content_type='application/xml',
body=TOKENS_XML,
)
response = client.tokens.get()
assert len(response) == 3
assert response[0]['id'] == 117554
assert response[1]['id'] == 115672
assert response[2]['id'] == 116545
@responses.activate
def test_tokens_and_agent_details_with_unknown_address(client):
responses.add(
responses.GET,
re.compile(r'.*/rest/tokens\?address=unknown'),
content_type='application/xml',
body=EMPTY_TOKEN_XML,
match_querystring=True,
)
response = client.tokens.get('unknown')
assert len(response) == 0
assert response == []
@pytest.mark.asyncio
async def test_authorize_async(aiohttp_mock):
RESPONSE_DATA = '120123'
client = AsyncQBClient('http://server')
try:
aiohttp_mock.get(
re.compile(r'.*/rest/tokens/authorize'),
content_type='text/plain',
body=RESPONSE_DATA,
)
response = await client.tokens.authorize('192.168.1.100')
assert response == RESPONSE_DATA
finally:
await client.close()
@pytest.mark.asyncio
async def test_unauthorize_async(aiohttp_mock):
RESPONSE_DATA = '120123'
client = AsyncQBClient('http://server')
try:
aiohttp_mock.get(
re.compile(r'.*/rest/tokens/unauthorize'),
content_type='text/plain',
body=RESPONSE_DATA,
)
response = await client.tokens.unauthorize('192.168.1.100')
assert response == RESPONSE_DATA
finally:
await client.close() | tests/test_tokens.py | import re
import pytest
import responses
from quickbuild import AsyncQBClient
TOKEN_XML = r"""<?xml version="1.0" encoding="UTF-8"?>
<list>
<com.pmease.quickbuild.model.Token>
<id>120204</id>
<value>84858611-a1fe-4f88-a49c-f600cf0ecf11</value>
<ip>192.168.1.100</ip>
<port>8811</port>
<test>false</test>
<lastUsedDate>2021-02-08T20:01:09.426Z</lastUsedDate>
<lastUsedReason>Run step (configuration: root/pipelineC, build: B.50906, step: master>build)</lastUsedReason>
<hostName>quickbuild-agent-192-168-1-100</hostName>
<offlineAlert>true</offlineAlert>
</com.pmease.quickbuild.model.Token>
</list>
"""
TOKENS_XML = r"""<?xml version="1.0" encoding="UTF-8"?>
<list>
<com.pmease.quickbuild.model.Token>
<id>117554</id>
<value><PASSWORD>-<PASSWORD>-<PASSWORD>-<PASSWORD></value>
<ip>192.168.1.100</ip>
<port>8811</port>
<test>false</test>
<lastUsedDate>2021-02-08T20:08:29.360Z</lastUsedDate>
<lastUsedReason>Check build condition (configuration: root/pipelineA)</lastUsedReason>
<hostName>quickbuild-agent-192-168-1-100</hostName>
<offlineAlert>true</offlineAlert>
</com.pmease.quickbuild.model.Token>
<com.pmease.quickbuild.model.Token>
<id>115672</id>
<value>27350640-d9f9-4a10-96ae-b6ec8fee998b</value>
<ip>192.168.1.101</ip>
<port>8811</port>
<test>false</test>
<lastUsedDate>2021-02-08T20:01:10.175Z</lastUsedDate>
<lastUsedReason>Run step (configuration: root/pipelineA, build: B.1234, step: master>finalize)</lastUsedReason>
<hostName>quickbuild-agent-192-168-1-101</hostName>
<offlineAlert>true</offlineAlert>
</com.pmease.quickbuild.model.Token>
<com.pmease.quickbuild.model.Token>
<id>116545</id>
<value>8f604c48-b9f4-4bbe-847c-c073b2aebc81</value>
<ip>192.168.1.102</ip>
<port>8811</port>
<test>false</test>
<lastUsedDate>2021-02-08T20:01:10.013Z</lastUsedDate>
<lastUsedReason>Run step (configuration: root/pipelineB, build: B.123, step: master>publish)</lastUsedReason>
<hostName>quickbuild-agent-192-168-1-102</hostName>
<offlineAlert>true</offlineAlert>
</com.pmease.quickbuild.model.Token>
</list>
"""
EMPTY_TOKEN_XML = r"""<?xml version="1.0" encoding="UTF-8"?>
<list/>
"""
@responses.activate
def test_authorize(client):
RESPONSE_DATA = '120123'
responses.add(
responses.GET,
re.compile(r'.*/rest/tokens/authorize'),
content_type='text/plain',
body=RESPONSE_DATA,
match_querystring=True,
)
response = client.tokens.authorize('192.168.1.100', 8811)
assert response == RESPONSE_DATA
response = client.tokens.authorize('192.168.1.100')
assert response == RESPONSE_DATA
@responses.activate
def test_unauthorize(client):
RESPONSE_DATA = '120123'
responses.add(
responses.GET,
re.compile(r'.*/rest/tokens/unauthorize'),
content_type='text/plain',
body=RESPONSE_DATA,
match_querystring=True,
)
response = client.tokens.unauthorize('192.168.1.100', 8811)
assert response == RESPONSE_DATA
response = client.tokens.unauthorize('192.168.1.100')
assert response == RESPONSE_DATA
@responses.activate
def test_token_and_agent_details(client):
responses.add(
responses.GET,
re.compile(r'.*/rest/tokens\?address=quickbuild-agent-192-168-1-100%3A8811'),
content_type='application/xml',
body=TOKEN_XML
)
response = client.tokens.get('quickbuild-agent-192-168-1-100:8811')
assert len(response) == 1
assert response[0]['id'] == 120204
@responses.activate
def test_tokens_and_agent_details(client):
responses.add(
responses.GET,
re.compile(r'.*/rest/tokens'),
content_type='application/xml',
body=TOKENS_XML,
)
response = client.tokens.get()
assert len(response) == 3
assert response[0]['id'] == 117554
assert response[1]['id'] == 115672
assert response[2]['id'] == 116545
@responses.activate
def test_tokens_and_agent_details_with_unknown_address(client):
responses.add(
responses.GET,
re.compile(r'.*/rest/tokens\?address=unknown'),
content_type='application/xml',
body=EMPTY_TOKEN_XML,
match_querystring=True,
)
response = client.tokens.get('unknown')
assert len(response) == 0
assert response == []
@pytest.mark.asyncio
async def test_authorize_async(aiohttp_mock):
RESPONSE_DATA = '120123'
client = AsyncQBClient('http://server')
try:
aiohttp_mock.get(
re.compile(r'.*/rest/tokens/authorize'),
content_type='text/plain',
body=RESPONSE_DATA,
)
response = await client.tokens.authorize('192.168.1.100')
assert response == RESPONSE_DATA
finally:
await client.close()
@pytest.mark.asyncio
async def test_unauthorize_async(aiohttp_mock):
RESPONSE_DATA = '120123'
client = AsyncQBClient('http://server')
try:
aiohttp_mock.get(
re.compile(r'.*/rest/tokens/unauthorize'),
content_type='text/plain',
body=RESPONSE_DATA,
)
response = await client.tokens.unauthorize('192.168.1.100')
assert response == RESPONSE_DATA
finally:
await client.close() | 0.343672 | 0.222838 |
import pytest
import ngraph as ng
from ngraph.op_graph.comm_nodes import RecvOp, ScatterRecvOp, GatherRecvOp
from ngraph.op_graph.comm_nodes import SendOp, ScatterSendOp, GatherSendOp
from ngraph.testing.hetr_utils import create_send_recv_graph, create_scatter_gather_graph
from ngraph.transformers.hetr.hetr_utils import comm_path_exists, update_comm_deps, find_recvs
pytestmark = pytest.mark.hetr_only
def test_find_recvs():
z, recv_x, recv_x_plus_one, send_x, x_plus_one, from_node, send_x_plus_one = \
create_send_recv_graph()
assert set([recv_x]) == set(find_recvs(x_plus_one))
assert set([recv_x]) == set(find_recvs(recv_x))
assert len(find_recvs(from_node)) == 0
assert set([recv_x]) == set(find_recvs(send_x_plus_one))
assert set([recv_x_plus_one, recv_x]) == set(find_recvs(recv_x_plus_one))
assert set([recv_x_plus_one, recv_x]) == set(find_recvs(z))
def test_find_recvs_scatter_gather():
scatter_send_x, scatter_recv_a, scatter_recv_b, gather_send_a, gather_send_b, \
gather_recv_x_plus_one = create_scatter_gather_graph()
assert set([scatter_recv_a]) == set(find_recvs(gather_send_a))
assert set([scatter_recv_b]) == set(find_recvs(gather_send_b))
assert len(find_recvs(scatter_send_x)) == 0
assert set([gather_recv_x_plus_one, scatter_recv_a]) == set(find_recvs(gather_recv_x_plus_one))
assert set([scatter_recv_a]) == set(find_recvs(scatter_recv_a))
def test_comm_path_exists():
axes = ng.make_axes([ng.make_axis(length=10, name='A'), ng.make_axis(length=15, name='B')])
with ng.metadata(device=None, device_id=None, transformer=None, host_transformer=None):
from_node = ng.placeholder(axes)
to_node = ng.placeholder(axes)
send_x = SendOp(from_node=from_node)
recv_x = RecvOp(to_node=to_node, send_node=send_x)
with ng.metadata(device=None, device_id=None, transformer=None, host_transformer=None):
x_plus_one = recv_x + 1
assert comm_path_exists(recv_x, send_x)
assert comm_path_exists(x_plus_one, send_x)
def test_comm_path_exists_scatter_gather():
scatter_send_x, scatter_recv_a, scatter_recv_b, gather_send_a, gather_send_b, \
gather_recv_x_plus_one = create_scatter_gather_graph()
assert comm_path_exists(scatter_recv_a, scatter_send_x)
assert comm_path_exists(gather_recv_x_plus_one, gather_send_a)
assert comm_path_exists(gather_recv_x_plus_one, scatter_send_x)
assert comm_path_exists(scatter_recv_b, scatter_send_x)
assert not comm_path_exists(gather_recv_x_plus_one, gather_send_b)
assert not comm_path_exists(gather_send_a, gather_recv_x_plus_one)
def test_update_comm_deps():
with ng.metadata(transformer='cpu0'):
z, recv_x, recv_x_plus_one, send_x, x_plus_one, from_node, send_x_plus_one = \
create_send_recv_graph()
update_comm_deps((z, send_x))
assert recv_x_plus_one in z.all_deps
def test_update_comm_deps_scatter_gather():
ax_a = ng.make_axis(length=10, name='A')
ax_b = ng.make_axis(length=15, name='B')
axes = ng.make_axes([ax_a, ax_b])
parallel_metadata = dict(parallel=ax_a, device_id=(0, 1),
transformer=None, host_transformer=None, device=None)
with ng.metadata(transformer='cpu0'):
with ng.metadata(**parallel_metadata):
from_node_a = ng.placeholder(axes)
to_node_a = ng.placeholder(axes)
scatter_send_x = ScatterSendOp(from_node=from_node_a, to_node=to_node_a)
scatter_recv_a = ScatterRecvOp(to_node=to_node_a, send_node=scatter_send_x)
with ng.metadata(**parallel_metadata):
x_plus_one_a = scatter_recv_a + 1
gather_send_x_plus_one_a = GatherSendOp(from_node=x_plus_one_a)
with ng.metadata(transformer='cpu1'):
with ng.metadata(**parallel_metadata):
to_node_b = ng.placeholder(axes)
scatter_recv_b = ScatterRecvOp(to_node=to_node_b, send_node=scatter_send_x)
with ng.metadata(**parallel_metadata):
x_plus_one_b = scatter_recv_b + 1
gather_send_x_plus_one_b = GatherSendOp(from_node=x_plus_one_b)
with ng.metadata(transformer='cpu0'):
with ng.metadata(**parallel_metadata):
gather_recv_x_plus_one_a = GatherRecvOp(from_node=from_node_a, to_node=to_node_a,
send_node=gather_send_x_plus_one_a)
z_a = gather_recv_x_plus_one_a + 1
update_comm_deps((scatter_send_x, gather_send_x_plus_one_a, z_a))
update_comm_deps((gather_send_x_plus_one_b,))
assert set([scatter_send_x]) == set(scatter_recv_a.control_deps)
assert set([scatter_send_x, gather_send_x_plus_one_a]) == \
set(gather_recv_x_plus_one_a.control_deps)
def assert_axes_eq_len(expected_axes, actual_axes):
for exp, act in zip(expected_axes, actual_axes):
assert exp.length == act.length
@pytest.mark.parametrize('config', [
{
'axes': [64],
'parallel_axis': 0,
'slices': [[slice(0, 32, 1)], [slice(32, 64, 1)]],
'device_id': (0, 1)
},
{
'axes': [64, 128],
'parallel_axis': 0,
'slices': [[slice(0, 16, 1), slice(None)],
[slice(16, 32, 1), slice(None)],
[slice(32, 48, 1), slice(None)],
[slice(48, 64, 1), slice(None)]],
'device_id': (0, 1, 2, 3)
},
{
'axes': [64, 128, 256],
'parallel_axis': 0,
'slices': [[slice(0, 16, 1), slice(None), slice(None)],
[slice(16, 32, 1), slice(None), slice(None)],
[slice(32, 48, 1), slice(None), slice(None)],
[slice(48, 64, 1), slice(None), slice(None)]],
'device_id': (0, 1, 2, 3)
},
{
'axes': [64, 128, 256],
'parallel_axis': 2,
'slices': [[slice(0, 128, 1), slice(None), slice(None)],
[slice(128, 256, 1), slice(None), slice(None)]],
'device_id': (0, 1)
}
])
def test_scatter_gather_node_axes(config):
t = config
axes = ng.make_axes([ng.make_axis(length) for length in t['axes']])
parallel_axis = axes[t['parallel_axis']]
hetr_axes = parallel_axis + (axes - parallel_axis)
with ng.metadata(device=None, device_id='0', transformer='cpu0', host_transformer=None):
from_node = ng.placeholder(axes=axes)
to_node = ng.placeholder(axes=axes)
with ng.metadata(device=None, device_id=t['device_id'], transformer=None,
parallel=parallel_axis, host_transformer=None):
par_node = ng.placeholder(axes=axes)
scatter_send_op = ScatterSendOp(from_node=from_node,
to_node=par_node)
assert hetr_axes == scatter_send_op.axes
assert t['slices'] == scatter_send_op.slices
scatter_recv_op = ScatterRecvOp(to_node=par_node,
send_node=scatter_send_op)
for sct_a, a in zip(scatter_recv_op.axes, hetr_axes):
assert sct_a.length == a.length
gather_send_op = GatherSendOp(from_node=scatter_recv_op)
assert_axes_eq_len(scatter_recv_op.axes, gather_send_op.axes)
gather_recv_op = GatherRecvOp(from_node=par_node,
to_node=to_node,
send_node=gather_send_op)
assert_axes_eq_len(hetr_axes, gather_recv_op.axes)
assert t['slices'] == gather_recv_op.slices
# TODO: Add def test_clone_graph() - Issue #1864 | tests/hetr_tests/test_hetr_utils.py | import pytest
import ngraph as ng
from ngraph.op_graph.comm_nodes import RecvOp, ScatterRecvOp, GatherRecvOp
from ngraph.op_graph.comm_nodes import SendOp, ScatterSendOp, GatherSendOp
from ngraph.testing.hetr_utils import create_send_recv_graph, create_scatter_gather_graph
from ngraph.transformers.hetr.hetr_utils import comm_path_exists, update_comm_deps, find_recvs
pytestmark = pytest.mark.hetr_only
def test_find_recvs():
z, recv_x, recv_x_plus_one, send_x, x_plus_one, from_node, send_x_plus_one = \
create_send_recv_graph()
assert set([recv_x]) == set(find_recvs(x_plus_one))
assert set([recv_x]) == set(find_recvs(recv_x))
assert len(find_recvs(from_node)) == 0
assert set([recv_x]) == set(find_recvs(send_x_plus_one))
assert set([recv_x_plus_one, recv_x]) == set(find_recvs(recv_x_plus_one))
assert set([recv_x_plus_one, recv_x]) == set(find_recvs(z))
def test_find_recvs_scatter_gather():
scatter_send_x, scatter_recv_a, scatter_recv_b, gather_send_a, gather_send_b, \
gather_recv_x_plus_one = create_scatter_gather_graph()
assert set([scatter_recv_a]) == set(find_recvs(gather_send_a))
assert set([scatter_recv_b]) == set(find_recvs(gather_send_b))
assert len(find_recvs(scatter_send_x)) == 0
assert set([gather_recv_x_plus_one, scatter_recv_a]) == set(find_recvs(gather_recv_x_plus_one))
assert set([scatter_recv_a]) == set(find_recvs(scatter_recv_a))
def test_comm_path_exists():
axes = ng.make_axes([ng.make_axis(length=10, name='A'), ng.make_axis(length=15, name='B')])
with ng.metadata(device=None, device_id=None, transformer=None, host_transformer=None):
from_node = ng.placeholder(axes)
to_node = ng.placeholder(axes)
send_x = SendOp(from_node=from_node)
recv_x = RecvOp(to_node=to_node, send_node=send_x)
with ng.metadata(device=None, device_id=None, transformer=None, host_transformer=None):
x_plus_one = recv_x + 1
assert comm_path_exists(recv_x, send_x)
assert comm_path_exists(x_plus_one, send_x)
def test_comm_path_exists_scatter_gather():
scatter_send_x, scatter_recv_a, scatter_recv_b, gather_send_a, gather_send_b, \
gather_recv_x_plus_one = create_scatter_gather_graph()
assert comm_path_exists(scatter_recv_a, scatter_send_x)
assert comm_path_exists(gather_recv_x_plus_one, gather_send_a)
assert comm_path_exists(gather_recv_x_plus_one, scatter_send_x)
assert comm_path_exists(scatter_recv_b, scatter_send_x)
assert not comm_path_exists(gather_recv_x_plus_one, gather_send_b)
assert not comm_path_exists(gather_send_a, gather_recv_x_plus_one)
def test_update_comm_deps():
with ng.metadata(transformer='cpu0'):
z, recv_x, recv_x_plus_one, send_x, x_plus_one, from_node, send_x_plus_one = \
create_send_recv_graph()
update_comm_deps((z, send_x))
assert recv_x_plus_one in z.all_deps
def test_update_comm_deps_scatter_gather():
ax_a = ng.make_axis(length=10, name='A')
ax_b = ng.make_axis(length=15, name='B')
axes = ng.make_axes([ax_a, ax_b])
parallel_metadata = dict(parallel=ax_a, device_id=(0, 1),
transformer=None, host_transformer=None, device=None)
with ng.metadata(transformer='cpu0'):
with ng.metadata(**parallel_metadata):
from_node_a = ng.placeholder(axes)
to_node_a = ng.placeholder(axes)
scatter_send_x = ScatterSendOp(from_node=from_node_a, to_node=to_node_a)
scatter_recv_a = ScatterRecvOp(to_node=to_node_a, send_node=scatter_send_x)
with ng.metadata(**parallel_metadata):
x_plus_one_a = scatter_recv_a + 1
gather_send_x_plus_one_a = GatherSendOp(from_node=x_plus_one_a)
with ng.metadata(transformer='cpu1'):
with ng.metadata(**parallel_metadata):
to_node_b = ng.placeholder(axes)
scatter_recv_b = ScatterRecvOp(to_node=to_node_b, send_node=scatter_send_x)
with ng.metadata(**parallel_metadata):
x_plus_one_b = scatter_recv_b + 1
gather_send_x_plus_one_b = GatherSendOp(from_node=x_plus_one_b)
with ng.metadata(transformer='cpu0'):
with ng.metadata(**parallel_metadata):
gather_recv_x_plus_one_a = GatherRecvOp(from_node=from_node_a, to_node=to_node_a,
send_node=gather_send_x_plus_one_a)
z_a = gather_recv_x_plus_one_a + 1
update_comm_deps((scatter_send_x, gather_send_x_plus_one_a, z_a))
update_comm_deps((gather_send_x_plus_one_b,))
assert set([scatter_send_x]) == set(scatter_recv_a.control_deps)
assert set([scatter_send_x, gather_send_x_plus_one_a]) == \
set(gather_recv_x_plus_one_a.control_deps)
def assert_axes_eq_len(expected_axes, actual_axes):
for exp, act in zip(expected_axes, actual_axes):
assert exp.length == act.length
@pytest.mark.parametrize('config', [
{
'axes': [64],
'parallel_axis': 0,
'slices': [[slice(0, 32, 1)], [slice(32, 64, 1)]],
'device_id': (0, 1)
},
{
'axes': [64, 128],
'parallel_axis': 0,
'slices': [[slice(0, 16, 1), slice(None)],
[slice(16, 32, 1), slice(None)],
[slice(32, 48, 1), slice(None)],
[slice(48, 64, 1), slice(None)]],
'device_id': (0, 1, 2, 3)
},
{
'axes': [64, 128, 256],
'parallel_axis': 0,
'slices': [[slice(0, 16, 1), slice(None), slice(None)],
[slice(16, 32, 1), slice(None), slice(None)],
[slice(32, 48, 1), slice(None), slice(None)],
[slice(48, 64, 1), slice(None), slice(None)]],
'device_id': (0, 1, 2, 3)
},
{
'axes': [64, 128, 256],
'parallel_axis': 2,
'slices': [[slice(0, 128, 1), slice(None), slice(None)],
[slice(128, 256, 1), slice(None), slice(None)]],
'device_id': (0, 1)
}
])
def test_scatter_gather_node_axes(config):
t = config
axes = ng.make_axes([ng.make_axis(length) for length in t['axes']])
parallel_axis = axes[t['parallel_axis']]
hetr_axes = parallel_axis + (axes - parallel_axis)
with ng.metadata(device=None, device_id='0', transformer='cpu0', host_transformer=None):
from_node = ng.placeholder(axes=axes)
to_node = ng.placeholder(axes=axes)
with ng.metadata(device=None, device_id=t['device_id'], transformer=None,
parallel=parallel_axis, host_transformer=None):
par_node = ng.placeholder(axes=axes)
scatter_send_op = ScatterSendOp(from_node=from_node,
to_node=par_node)
assert hetr_axes == scatter_send_op.axes
assert t['slices'] == scatter_send_op.slices
scatter_recv_op = ScatterRecvOp(to_node=par_node,
send_node=scatter_send_op)
for sct_a, a in zip(scatter_recv_op.axes, hetr_axes):
assert sct_a.length == a.length
gather_send_op = GatherSendOp(from_node=scatter_recv_op)
assert_axes_eq_len(scatter_recv_op.axes, gather_send_op.axes)
gather_recv_op = GatherRecvOp(from_node=par_node,
to_node=to_node,
send_node=gather_send_op)
assert_axes_eq_len(hetr_axes, gather_recv_op.axes)
assert t['slices'] == gather_recv_op.slices
# TODO: Add def test_clone_graph() - Issue #1864 | 0.599368 | 0.443721 |
from PyQt6 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(468, 304)
self.logo = QtWidgets.QLabel(Dialog)
self.logo.setGeometry(QtCore.QRect(10, 10, 171, 281))
self.logo.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.IBeamCursor))
self.logo.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.logo.setText("")
self.logo.setPixmap(QtGui.QPixmap(":/logo/gui_resources/Updates.png"))
self.logo.setScaledContents(True)
self.logo.setObjectName("logo")
self.currentVersionLabel = QtWidgets.QLabel(Dialog)
self.currentVersionLabel.setGeometry(QtCore.QRect(210, 10, 121, 31))
self.currentVersionLabel.setObjectName("currentVersionLabel")
self.currentVersionBox = QtWidgets.QLabel(Dialog)
self.currentVersionBox.setGeometry(QtCore.QRect(330, 10, 91, 31))
self.currentVersionBox.setFrameShape(QtWidgets.QFrame.Shape.Panel)
self.currentVersionBox.setFrameShadow(QtWidgets.QFrame.Shadow.Plain)
self.currentVersionBox.setLineWidth(1)
self.currentVersionBox.setObjectName("currentVersionBox")
self.checkUpdatesButton = QtWidgets.QPushButton(Dialog)
self.checkUpdatesButton.setGeometry(QtCore.QRect(250, 270, 121, 23))
self.checkUpdatesButton.setObjectName("checkUpdatesButton")
self.closeButton = QtWidgets.QPushButton(Dialog)
self.closeButton.setGeometry(QtCore.QRect(380, 270, 75, 23))
self.closeButton.setObjectName("closeButton")
self.changelogBoxScrollArea = QtWidgets.QScrollArea(Dialog)
self.changelogBoxScrollArea.setGeometry(QtCore.QRect(190, 130, 271, 121))
self.changelogBoxScrollArea.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))
self.changelogBoxScrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOn)
self.changelogBoxScrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAsNeeded)
self.changelogBoxScrollArea.setWidgetResizable(True)
self.changelogBoxScrollArea.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeading|QtCore.Qt.AlignmentFlag.AlignLeft|QtCore.Qt.AlignmentFlag.AlignTop)
self.changelogBoxScrollArea.setObjectName("changelogBoxScrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 252, 119))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout.setObjectName("verticalLayout")
self.changelogBox = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.changelogBox.setScaledContents(True)
self.changelogBox.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeading|QtCore.Qt.AlignmentFlag.AlignLeft|QtCore.Qt.AlignmentFlag.AlignTop)
self.changelogBox.setWordWrap(True)
self.changelogBox.setIndent(0)
self.changelogBox.setObjectName("changelogBox")
self.verticalLayout.addWidget(self.changelogBox)
self.changelogBoxScrollArea.setWidget(self.scrollAreaWidgetContents)
self.latestVersionLabel = QtWidgets.QLabel(Dialog)
self.latestVersionLabel.setGeometry(QtCore.QRect(210, 50, 121, 31))
self.latestVersionLabel.setObjectName("latestVersionLabel")
self.latestVersionBox = QtWidgets.QLabel(Dialog)
self.latestVersionBox.setGeometry(QtCore.QRect(330, 50, 91, 31))
self.latestVersionBox.setFrameShape(QtWidgets.QFrame.Shape.Panel)
self.latestVersionBox.setFrameShadow(QtWidgets.QFrame.Shadow.Plain)
self.latestVersionBox.setLineWidth(1)
self.latestVersionBox.setObjectName("latestVersionBox")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(190, 105, 271, 21))
font = QtGui.QFont()
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label.setObjectName("label")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Update Manager - Mr. Worldwide"))
self.currentVersionLabel.setText(_translate("Dialog", "Current Version"))
self.currentVersionBox.setText(_translate("Dialog", "v1.0.0"))
self.checkUpdatesButton.setText(_translate("Dialog", "Check for Updates"))
self.closeButton.setText(_translate("Dialog", "Close"))
self.changelogBox.setText(_translate("Dialog", "No changelog available."))
self.latestVersionLabel.setText(_translate("Dialog", "Latest Version"))
self.latestVersionBox.setText(_translate("Dialog", "v1.0.0"))
self.label.setText(_translate("Dialog", "Latest Version Changelog")) | UpdateManagerUI.py |
from PyQt6 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(468, 304)
self.logo = QtWidgets.QLabel(Dialog)
self.logo.setGeometry(QtCore.QRect(10, 10, 171, 281))
self.logo.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.IBeamCursor))
self.logo.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.logo.setText("")
self.logo.setPixmap(QtGui.QPixmap(":/logo/gui_resources/Updates.png"))
self.logo.setScaledContents(True)
self.logo.setObjectName("logo")
self.currentVersionLabel = QtWidgets.QLabel(Dialog)
self.currentVersionLabel.setGeometry(QtCore.QRect(210, 10, 121, 31))
self.currentVersionLabel.setObjectName("currentVersionLabel")
self.currentVersionBox = QtWidgets.QLabel(Dialog)
self.currentVersionBox.setGeometry(QtCore.QRect(330, 10, 91, 31))
self.currentVersionBox.setFrameShape(QtWidgets.QFrame.Shape.Panel)
self.currentVersionBox.setFrameShadow(QtWidgets.QFrame.Shadow.Plain)
self.currentVersionBox.setLineWidth(1)
self.currentVersionBox.setObjectName("currentVersionBox")
self.checkUpdatesButton = QtWidgets.QPushButton(Dialog)
self.checkUpdatesButton.setGeometry(QtCore.QRect(250, 270, 121, 23))
self.checkUpdatesButton.setObjectName("checkUpdatesButton")
self.closeButton = QtWidgets.QPushButton(Dialog)
self.closeButton.setGeometry(QtCore.QRect(380, 270, 75, 23))
self.closeButton.setObjectName("closeButton")
self.changelogBoxScrollArea = QtWidgets.QScrollArea(Dialog)
self.changelogBoxScrollArea.setGeometry(QtCore.QRect(190, 130, 271, 121))
self.changelogBoxScrollArea.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))
self.changelogBoxScrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOn)
self.changelogBoxScrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAsNeeded)
self.changelogBoxScrollArea.setWidgetResizable(True)
self.changelogBoxScrollArea.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeading|QtCore.Qt.AlignmentFlag.AlignLeft|QtCore.Qt.AlignmentFlag.AlignTop)
self.changelogBoxScrollArea.setObjectName("changelogBoxScrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 252, 119))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout.setObjectName("verticalLayout")
self.changelogBox = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.changelogBox.setScaledContents(True)
self.changelogBox.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeading|QtCore.Qt.AlignmentFlag.AlignLeft|QtCore.Qt.AlignmentFlag.AlignTop)
self.changelogBox.setWordWrap(True)
self.changelogBox.setIndent(0)
self.changelogBox.setObjectName("changelogBox")
self.verticalLayout.addWidget(self.changelogBox)
self.changelogBoxScrollArea.setWidget(self.scrollAreaWidgetContents)
self.latestVersionLabel = QtWidgets.QLabel(Dialog)
self.latestVersionLabel.setGeometry(QtCore.QRect(210, 50, 121, 31))
self.latestVersionLabel.setObjectName("latestVersionLabel")
self.latestVersionBox = QtWidgets.QLabel(Dialog)
self.latestVersionBox.setGeometry(QtCore.QRect(330, 50, 91, 31))
self.latestVersionBox.setFrameShape(QtWidgets.QFrame.Shape.Panel)
self.latestVersionBox.setFrameShadow(QtWidgets.QFrame.Shadow.Plain)
self.latestVersionBox.setLineWidth(1)
self.latestVersionBox.setObjectName("latestVersionBox")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(190, 105, 271, 21))
font = QtGui.QFont()
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label.setObjectName("label")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Update Manager - Mr. Worldwide"))
self.currentVersionLabel.setText(_translate("Dialog", "Current Version"))
self.currentVersionBox.setText(_translate("Dialog", "v1.0.0"))
self.checkUpdatesButton.setText(_translate("Dialog", "Check for Updates"))
self.closeButton.setText(_translate("Dialog", "Close"))
self.changelogBox.setText(_translate("Dialog", "No changelog available."))
self.latestVersionLabel.setText(_translate("Dialog", "Latest Version"))
self.latestVersionBox.setText(_translate("Dialog", "v1.0.0"))
self.label.setText(_translate("Dialog", "Latest Version Changelog")) | 0.446253 | 0.070848 |
import json
import os
import time
from datetime import datetime
import cherrypy
from . import pdp_client
from .config import Config
from .deploy_handler import DeployHandler, PolicyUpdateMessage
from .onap.audit import Audit, AuditHttpCode
from .policy_receiver import PolicyReceiver
from .utils import Utils
class PolicyWeb(object):
"""run http API of policy-handler on 0.0.0.0:wservice_port - any incoming address"""
DATA_NOT_FOUND_ERROR = 404
HOST_INADDR_ANY = ".".join("0"*4)
logger = Utils.get_logger(__file__)
@staticmethod
def run_forever(audit):
"""run the web-server of the policy-handler forever"""
cherrypy.config.update({"server.socket_host": PolicyWeb.HOST_INADDR_ANY,
"server.socket_port": Config.wservice_port})
protocol = "http"
tls_info = ""
if Config.tls_server_cert_file and Config.tls_private_key_file:
tm_cert = os.path.getmtime(Config.tls_server_cert_file)
tm_key = os.path.getmtime(Config.tls_private_key_file)
cherrypy.server.ssl_module = 'builtin'
cherrypy.server.ssl_certificate = Config.tls_server_cert_file
cherrypy.server.ssl_private_key = Config.tls_private_key_file
if Config.tls_server_ca_chain_file:
cherrypy.server.ssl_certificate_chain = Config.tls_server_ca_chain_file
protocol = "https"
tls_info = "cert: {} {} {}".format(Config.tls_server_cert_file,
Config.tls_private_key_file,
Config.tls_server_ca_chain_file)
cherrypy.tree.mount(_PolicyWeb(), '/')
PolicyWeb.logger.info(
"%s with config: %s", audit.info("running policy_handler as {}://{}:{} {}".format(
protocol, cherrypy.server.socket_host, cherrypy.server.socket_port, tls_info)),
json.dumps(cherrypy.config))
cherrypy.engine.start()
# If HTTPS server certificate changes, exit to let kubernetes restart us
if Config.tls_server_cert_file and Config.tls_private_key_file:
while True:
time.sleep(600)
c_tm_cert = os.path.getmtime(Config.tls_server_cert_file)
c_tm_key = os.path.getmtime(Config.tls_private_key_file)
if c_tm_cert > tm_cert or c_tm_key > tm_key:
PolicyWeb.logger.info("cert or key file updated")
cherrypy.engine.stop()
cherrypy.engine.exit()
break
class _PolicyWeb(object):
"""REST API of policy-handler"""
@staticmethod
def _get_request_info(request):
"""returns info about the http request"""
return "{0} {1}{2}".format(request.method, request.script_name, request.path_info)
@cherrypy.expose
@cherrypy.popargs('policy_id')
@cherrypy.tools.json_out()
def policy_latest(self, policy_id):
"""retireves the latest policy identified by policy_id"""
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="get_latest_policy",
req_message=req_info, headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s policy_id=%s headers=%s",
req_info, policy_id, json.dumps(cherrypy.request.headers))
latest_policy = pdp_client.PolicyRest.get_latest_policy(
(audit, policy_id, None, None)) or {}
PolicyWeb.logger.info("res %s policy_id=%s latest_policy=%s",
req_info, policy_id, json.dumps(latest_policy))
_, http_status_code, _ = audit.audit_done(result=json.dumps(latest_policy))
if http_status_code == AuditHttpCode.DATA_NOT_FOUND_OK.value:
http_status_code = PolicyWeb.DATA_NOT_FOUND_ERROR
cherrypy.response.status = http_status_code
return latest_policy
def _get_all_policies_latest(self):
"""retireves all the latest policies on GET /policies_latest"""
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="get_all_policies_latest",
req_message=req_info, headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s", req_info)
result, policies, policy_filters = DeployHandler.get_deployed_policies(audit)
if not result:
result, policy_update = pdp_client.PolicyMatcher.build_catch_up_message(
audit, policies, policy_filters)
if policy_update and isinstance(policy_update, PolicyUpdateMessage):
result["policy_update"] = policy_update.get_message()
result_str = json.dumps(result, sort_keys=True)
PolicyWeb.logger.info("result %s: %s", req_info, result_str)
_, http_status_code, _ = audit.audit_done(result=result_str)
if http_status_code == AuditHttpCode.DATA_NOT_FOUND_OK.value:
http_status_code = PolicyWeb.DATA_NOT_FOUND_ERROR
cherrypy.response.status = http_status_code
return result
@cherrypy.expose
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
def policies_latest(self):
"""
on :GET: retrieves all the latest policies from policy-engine that are deployed
on :POST: expects to receive the params that mimic the /getConfig of policy-engine
and retrieves the matching policies from policy-engine and picks the latest on each policy.
sample request - policies filter
{
"configAttributes": { "key1":"value1" },
"configName": "alex_config_name",
"onapName": "DCAE",
"policyName": "DCAE_alex.Config_alex_.*",
"unique": false
}
sample response
{
"DCAE_alex.Config_alex_priority": {
"policy_body": {
"policyName": "DCAE_alex.Config_alex_priority.3.xml",
"policyConfigMessage": "Config Retrieved! ",
"responseAttributes": {},
"policyConfigStatus": "CONFIG_RETRIEVED",
"type": "JSON",
"matchingConditions": {
"priority": "10",
"key1": "value1",
"ONAPName": "DCAE",
"ConfigName": "alex_config_name"
},
"property": null,
"config": {
"foo": "bar",
"foo_updated": "2018-10-06T16:54:31.696Z"
},
"policyVersion": "3"
},
"policy_id": "DCAE_alex.Config_alex_priority"
}
}
"""
if cherrypy.request.method == "GET":
return self._get_all_policies_latest()
if Config.is_pdp_api_default():
raise cherrypy.HTTPError(404, "temporarily unsupported due to the new pdp API")
if cherrypy.request.method != "POST":
raise cherrypy.HTTPError(404, "unexpected method {0}".format(cherrypy.request.method))
policy_filter = cherrypy.request.json or {}
str_policy_filter = json.dumps(policy_filter)
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="get_latest_policies",
req_message="{0}: {1}".format(req_info, str_policy_filter),
headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s: policy_filter=%s headers=%s",
req_info, str_policy_filter, json.dumps(cherrypy.request.headers))
result = pdp_client.PolicyRest.get_latest_policies(audit, policy_filter=policy_filter) or {}
result_str = json.dumps(result, sort_keys=True)
PolicyWeb.logger.info("result %s: policy_filter=%s result=%s",
req_info, str_policy_filter, result_str)
_, http_status_code, _ = audit.audit_done(result=result_str)
if http_status_code == AuditHttpCode.DATA_NOT_FOUND_OK.value:
http_status_code = PolicyWeb.DATA_NOT_FOUND_ERROR
cherrypy.response.status = http_status_code
return result
@cherrypy.expose
@cherrypy.tools.json_out()
def catch_up(self):
"""catch up with all DCAE policies"""
started = str(datetime.utcnow())
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="catch_up", req_message=req_info, headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s", req_info)
PolicyReceiver.catch_up(audit)
res = {"catch-up requested": started, "request_id": audit.request_id}
PolicyWeb.logger.info("requested %s: %s", req_info, json.dumps(res))
audit.info_requested(started)
return res
@cherrypy.expose
@cherrypy.tools.json_out()
def reconfigure(self):
"""schedule reconfigure"""
started = str(datetime.utcnow())
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="reconfigure", req_message=req_info,
headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s", req_info)
PolicyReceiver.reconfigure(audit)
res = {"reconfigure requested": started, "request_id": audit.request_id}
PolicyWeb.logger.info("requested %s: %s", req_info, json.dumps(res))
audit.info_requested(started)
return res
@cherrypy.expose
def shutdown(self):
"""Shutdown the policy-handler"""
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="shutdown", req_message=req_info, headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s: --- stopping REST API of policy-handler ---", req_info)
cherrypy.engine.exit()
PolicyReceiver.shutdown(audit)
PolicyWeb.logger.info("policy_handler health: {0}"
.format(json.dumps(audit.health(full=True))))
PolicyWeb.logger.info("%s: --------- the end -----------", req_info)
res = str(datetime.utcnow())
audit.info_requested(res)
PolicyWeb.logger.info("process_info: %s", json.dumps(audit.process_info()))
return "goodbye! shutdown requested {0}".format(res)
@cherrypy.expose
@cherrypy.tools.json_out()
def healthcheck(self):
"""returns the healthcheck results"""
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="healthcheck",
req_message=req_info, headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s", req_info)
res = audit.health()
PolicyWeb.logger.info("healthcheck %s: res=%s", req_info, json.dumps(res))
audit.audit_done(result=json.dumps(res))
return res | policyhandler/web_server.py | import json
import os
import time
from datetime import datetime
import cherrypy
from . import pdp_client
from .config import Config
from .deploy_handler import DeployHandler, PolicyUpdateMessage
from .onap.audit import Audit, AuditHttpCode
from .policy_receiver import PolicyReceiver
from .utils import Utils
class PolicyWeb(object):
"""run http API of policy-handler on 0.0.0.0:wservice_port - any incoming address"""
DATA_NOT_FOUND_ERROR = 404
HOST_INADDR_ANY = ".".join("0"*4)
logger = Utils.get_logger(__file__)
@staticmethod
def run_forever(audit):
"""run the web-server of the policy-handler forever"""
cherrypy.config.update({"server.socket_host": PolicyWeb.HOST_INADDR_ANY,
"server.socket_port": Config.wservice_port})
protocol = "http"
tls_info = ""
if Config.tls_server_cert_file and Config.tls_private_key_file:
tm_cert = os.path.getmtime(Config.tls_server_cert_file)
tm_key = os.path.getmtime(Config.tls_private_key_file)
cherrypy.server.ssl_module = 'builtin'
cherrypy.server.ssl_certificate = Config.tls_server_cert_file
cherrypy.server.ssl_private_key = Config.tls_private_key_file
if Config.tls_server_ca_chain_file:
cherrypy.server.ssl_certificate_chain = Config.tls_server_ca_chain_file
protocol = "https"
tls_info = "cert: {} {} {}".format(Config.tls_server_cert_file,
Config.tls_private_key_file,
Config.tls_server_ca_chain_file)
cherrypy.tree.mount(_PolicyWeb(), '/')
PolicyWeb.logger.info(
"%s with config: %s", audit.info("running policy_handler as {}://{}:{} {}".format(
protocol, cherrypy.server.socket_host, cherrypy.server.socket_port, tls_info)),
json.dumps(cherrypy.config))
cherrypy.engine.start()
# If HTTPS server certificate changes, exit to let kubernetes restart us
if Config.tls_server_cert_file and Config.tls_private_key_file:
while True:
time.sleep(600)
c_tm_cert = os.path.getmtime(Config.tls_server_cert_file)
c_tm_key = os.path.getmtime(Config.tls_private_key_file)
if c_tm_cert > tm_cert or c_tm_key > tm_key:
PolicyWeb.logger.info("cert or key file updated")
cherrypy.engine.stop()
cherrypy.engine.exit()
break
class _PolicyWeb(object):
"""REST API of policy-handler"""
@staticmethod
def _get_request_info(request):
"""returns info about the http request"""
return "{0} {1}{2}".format(request.method, request.script_name, request.path_info)
@cherrypy.expose
@cherrypy.popargs('policy_id')
@cherrypy.tools.json_out()
def policy_latest(self, policy_id):
"""retireves the latest policy identified by policy_id"""
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="get_latest_policy",
req_message=req_info, headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s policy_id=%s headers=%s",
req_info, policy_id, json.dumps(cherrypy.request.headers))
latest_policy = pdp_client.PolicyRest.get_latest_policy(
(audit, policy_id, None, None)) or {}
PolicyWeb.logger.info("res %s policy_id=%s latest_policy=%s",
req_info, policy_id, json.dumps(latest_policy))
_, http_status_code, _ = audit.audit_done(result=json.dumps(latest_policy))
if http_status_code == AuditHttpCode.DATA_NOT_FOUND_OK.value:
http_status_code = PolicyWeb.DATA_NOT_FOUND_ERROR
cherrypy.response.status = http_status_code
return latest_policy
def _get_all_policies_latest(self):
"""retireves all the latest policies on GET /policies_latest"""
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="get_all_policies_latest",
req_message=req_info, headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s", req_info)
result, policies, policy_filters = DeployHandler.get_deployed_policies(audit)
if not result:
result, policy_update = pdp_client.PolicyMatcher.build_catch_up_message(
audit, policies, policy_filters)
if policy_update and isinstance(policy_update, PolicyUpdateMessage):
result["policy_update"] = policy_update.get_message()
result_str = json.dumps(result, sort_keys=True)
PolicyWeb.logger.info("result %s: %s", req_info, result_str)
_, http_status_code, _ = audit.audit_done(result=result_str)
if http_status_code == AuditHttpCode.DATA_NOT_FOUND_OK.value:
http_status_code = PolicyWeb.DATA_NOT_FOUND_ERROR
cherrypy.response.status = http_status_code
return result
@cherrypy.expose
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
def policies_latest(self):
"""
on :GET: retrieves all the latest policies from policy-engine that are deployed
on :POST: expects to receive the params that mimic the /getConfig of policy-engine
and retrieves the matching policies from policy-engine and picks the latest on each policy.
sample request - policies filter
{
"configAttributes": { "key1":"value1" },
"configName": "alex_config_name",
"onapName": "DCAE",
"policyName": "DCAE_alex.Config_alex_.*",
"unique": false
}
sample response
{
"DCAE_alex.Config_alex_priority": {
"policy_body": {
"policyName": "DCAE_alex.Config_alex_priority.3.xml",
"policyConfigMessage": "Config Retrieved! ",
"responseAttributes": {},
"policyConfigStatus": "CONFIG_RETRIEVED",
"type": "JSON",
"matchingConditions": {
"priority": "10",
"key1": "value1",
"ONAPName": "DCAE",
"ConfigName": "alex_config_name"
},
"property": null,
"config": {
"foo": "bar",
"foo_updated": "2018-10-06T16:54:31.696Z"
},
"policyVersion": "3"
},
"policy_id": "DCAE_alex.Config_alex_priority"
}
}
"""
if cherrypy.request.method == "GET":
return self._get_all_policies_latest()
if Config.is_pdp_api_default():
raise cherrypy.HTTPError(404, "temporarily unsupported due to the new pdp API")
if cherrypy.request.method != "POST":
raise cherrypy.HTTPError(404, "unexpected method {0}".format(cherrypy.request.method))
policy_filter = cherrypy.request.json or {}
str_policy_filter = json.dumps(policy_filter)
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="get_latest_policies",
req_message="{0}: {1}".format(req_info, str_policy_filter),
headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s: policy_filter=%s headers=%s",
req_info, str_policy_filter, json.dumps(cherrypy.request.headers))
result = pdp_client.PolicyRest.get_latest_policies(audit, policy_filter=policy_filter) or {}
result_str = json.dumps(result, sort_keys=True)
PolicyWeb.logger.info("result %s: policy_filter=%s result=%s",
req_info, str_policy_filter, result_str)
_, http_status_code, _ = audit.audit_done(result=result_str)
if http_status_code == AuditHttpCode.DATA_NOT_FOUND_OK.value:
http_status_code = PolicyWeb.DATA_NOT_FOUND_ERROR
cherrypy.response.status = http_status_code
return result
@cherrypy.expose
@cherrypy.tools.json_out()
def catch_up(self):
"""catch up with all DCAE policies"""
started = str(datetime.utcnow())
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="catch_up", req_message=req_info, headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s", req_info)
PolicyReceiver.catch_up(audit)
res = {"catch-up requested": started, "request_id": audit.request_id}
PolicyWeb.logger.info("requested %s: %s", req_info, json.dumps(res))
audit.info_requested(started)
return res
@cherrypy.expose
@cherrypy.tools.json_out()
def reconfigure(self):
"""schedule reconfigure"""
started = str(datetime.utcnow())
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="reconfigure", req_message=req_info,
headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s", req_info)
PolicyReceiver.reconfigure(audit)
res = {"reconfigure requested": started, "request_id": audit.request_id}
PolicyWeb.logger.info("requested %s: %s", req_info, json.dumps(res))
audit.info_requested(started)
return res
@cherrypy.expose
def shutdown(self):
"""Shutdown the policy-handler"""
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="shutdown", req_message=req_info, headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s: --- stopping REST API of policy-handler ---", req_info)
cherrypy.engine.exit()
PolicyReceiver.shutdown(audit)
PolicyWeb.logger.info("policy_handler health: {0}"
.format(json.dumps(audit.health(full=True))))
PolicyWeb.logger.info("%s: --------- the end -----------", req_info)
res = str(datetime.utcnow())
audit.info_requested(res)
PolicyWeb.logger.info("process_info: %s", json.dumps(audit.process_info()))
return "goodbye! shutdown requested {0}".format(res)
@cherrypy.expose
@cherrypy.tools.json_out()
def healthcheck(self):
"""returns the healthcheck results"""
req_info = _PolicyWeb._get_request_info(cherrypy.request)
audit = Audit(job_name="healthcheck",
req_message=req_info, headers=cherrypy.request.headers)
PolicyWeb.logger.info("%s", req_info)
res = audit.health()
PolicyWeb.logger.info("healthcheck %s: res=%s", req_info, json.dumps(res))
audit.audit_done(result=json.dumps(res))
return res | 0.474388 | 0.094929 |
from torch import nn, Tensor, Size
from typing import Optional, Union, List
import torch
from . import register_norm_fn
@register_norm_fn(name="layer_norm")
class LayerNorm(nn.LayerNorm):
"""
Applies `Layer Normalization <https://arxiv.org/abs/1607.06450>`_ over a input tensor
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \times \text{normalized\_shape}[0] \times \text{normalized\_shape}[1]
\times \ldots \times \text{normalized\_shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps (Optional, float): Value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine (bool): If ``True``, use learnable affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, *)` where :math:`N` is the batch size
- Output: same shape as the input
"""
def __init__(
self,
normalized_shape: Union[int, List[int], Size],
eps: Optional[float] = 1e-5,
elementwise_affine: Optional[bool] = True,
*args,
**kwargs
):
super().__init__(
normalized_shape=normalized_shape,
eps=eps,
elementwise_affine=elementwise_affine,
)
def profile_module(self, input: Tensor) -> (Tensor, float, float):
params = sum([p.numel() for p in self.parameters()])
return input, params, 0.0
@register_norm_fn(name="layer_norm_2d")
class LayerNorm2D(nn.GroupNorm):
"""
Applies `Layer Normalization <https://arxiv.org/abs/1607.06450>`_ over a 4D input tensor
Args:
num_features (int): :math:`C` from an expected input of size :math:`(N, C, H, W)`
eps (Optional, float): Value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine (bool): If ``True``, use learnable affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)` where :math:`N` is the batch size, :math:`C` is the number of input channels,
:math:`H` is the input height, and :math:`W` is the input width
- Output: same shape as the input
"""
def __init__(
self,
num_features: int,
eps: Optional[float] = 1e-5,
elementwise_affine: Optional[bool] = True,
*args,
**kwargs
) -> None:
super().__init__(
num_channels=num_features, eps=eps, affine=elementwise_affine, num_groups=1
)
self.num_channels = num_features
def __repr__(self):
return "{}(num_channels={}, eps={}, affine={})".format(
self.__class__.__name__, self.num_channels, self.eps, self.affine
)
def profile_module(self, input: Tensor) -> (Tensor, float, float):
params = sum([p.numel() for p in self.parameters()])
return input, params, 0.0 | cvnets/layers/normalization/layer_norm.py |
from torch import nn, Tensor, Size
from typing import Optional, Union, List
import torch
from . import register_norm_fn
@register_norm_fn(name="layer_norm")
class LayerNorm(nn.LayerNorm):
"""
Applies `Layer Normalization <https://arxiv.org/abs/1607.06450>`_ over a input tensor
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \times \text{normalized\_shape}[0] \times \text{normalized\_shape}[1]
\times \ldots \times \text{normalized\_shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps (Optional, float): Value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine (bool): If ``True``, use learnable affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, *)` where :math:`N` is the batch size
- Output: same shape as the input
"""
def __init__(
self,
normalized_shape: Union[int, List[int], Size],
eps: Optional[float] = 1e-5,
elementwise_affine: Optional[bool] = True,
*args,
**kwargs
):
super().__init__(
normalized_shape=normalized_shape,
eps=eps,
elementwise_affine=elementwise_affine,
)
def profile_module(self, input: Tensor) -> (Tensor, float, float):
params = sum([p.numel() for p in self.parameters()])
return input, params, 0.0
@register_norm_fn(name="layer_norm_2d")
class LayerNorm2D(nn.GroupNorm):
"""
Applies `Layer Normalization <https://arxiv.org/abs/1607.06450>`_ over a 4D input tensor
Args:
num_features (int): :math:`C` from an expected input of size :math:`(N, C, H, W)`
eps (Optional, float): Value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine (bool): If ``True``, use learnable affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)` where :math:`N` is the batch size, :math:`C` is the number of input channels,
:math:`H` is the input height, and :math:`W` is the input width
- Output: same shape as the input
"""
def __init__(
self,
num_features: int,
eps: Optional[float] = 1e-5,
elementwise_affine: Optional[bool] = True,
*args,
**kwargs
) -> None:
super().__init__(
num_channels=num_features, eps=eps, affine=elementwise_affine, num_groups=1
)
self.num_channels = num_features
def __repr__(self):
return "{}(num_channels={}, eps={}, affine={})".format(
self.__class__.__name__, self.num_channels, self.eps, self.affine
)
def profile_module(self, input: Tensor) -> (Tensor, float, float):
params = sum([p.numel() for p in self.parameters()])
return input, params, 0.0 | 0.97553 | 0.803367 |
import numpy as np
from pysgpp import HashGridPoint
from pysgpp.extensions.datadriven.uq.operations import createGrid, getBasis
from pysgpp.extensions.datadriven.uq.quadrature.linearform.LinearGaussQuadratureStrategy import LinearGaussQuadratureStrategy
from pysgpp.extensions.datadriven.uq.quadrature import getIntegral
def __doMarginalize(grid, alpha, linearForm, dd, measure=None):
gs = grid.getStorage()
dim = gs.getDimension()
if dim < 2:
raise AttributeError("The grid has to be at least of dimension 2")
if dd >= dim:
raise AttributeError("The grid has only %i dimensions, so I can't \
integrate over %i" % (dim, dd))
# create new grid
n_dim = dim - 1
n_grid = createGrid(grid, n_dim)
n_gs = n_grid.getStorage()
# insert grid points
n_gp = HashGridPoint(n_dim)
for i in xrange(gs.getSize()):
gp = gs.getPoint(i)
for d in range(dim):
if d == dd:
# omit marginalization direction
continue
elif d < dd:
n_gp.set(d, gp.getLevel(d), gp.getIndex(d))
else:
n_gp.set(d - 1, gp.getLevel(d), gp.getIndex(d))
# insert grid point
if not n_gs.isContaining(n_gp):
n_gs.insert(n_gp)
n_gs.recalcLeafProperty()
# create coefficient vector
n_alpha = np.zeros(n_gs.getSize())
basis = getBasis(grid)
# set function values for n_alpha
for i in xrange(gs.getSize()):
gp = gs.getPoint(i)
for d in range(dim):
if d == dd:
dd_level = gp.getLevel(d)
dd_index = gp.getIndex(d)
elif d < dd:
n_gp.set(d, gp.getLevel(d), gp.getIndex(d))
else:
n_gp.set(d - 1, gp.getLevel(d), gp.getIndex(d))
if not n_gs.isContaining(n_gp):
raise Exception("This should not happen!")
# compute the integral of the given basis
if measure is None:
q, err = getIntegral(grid, dd_level, dd_index), 0.
else:
dist, trans = measure[0][dd], measure[1][dd]
linearForm.setDistributionAndTransformation([dist], [trans])
gpdd = HashGridPoint(1)
gpdd.set(0, dd_level, dd_index)
q, err = linearForm.computeLinearFormByList(gs, [gpdd], basis)
q = q[0] * trans.vol()
err *= trans.vol()
# search for the corresponding index
j = n_gs.getSequenceNumber(n_gp)
n_alpha[j] += alpha[i] * q
return n_grid, n_alpha, err
def doMarginalize(grid, alpha, linearForm, dd, measure=None):
if isinstance(dd, (int, long)):
return __doMarginalize(grid, alpha, linearForm, dd)
n_grid, n_alpha = grid, alpha
for d in sorted(dd, reverse=True):
n_grid, n_alpha, err = __doMarginalize(n_grid, n_alpha, linearForm, d, measure=measure)
return n_grid, n_alpha, err | lib/pysgpp/extensions/datadriven/uq/quadrature/marginalization/marginalization.py | import numpy as np
from pysgpp import HashGridPoint
from pysgpp.extensions.datadriven.uq.operations import createGrid, getBasis
from pysgpp.extensions.datadriven.uq.quadrature.linearform.LinearGaussQuadratureStrategy import LinearGaussQuadratureStrategy
from pysgpp.extensions.datadriven.uq.quadrature import getIntegral
def __doMarginalize(grid, alpha, linearForm, dd, measure=None):
gs = grid.getStorage()
dim = gs.getDimension()
if dim < 2:
raise AttributeError("The grid has to be at least of dimension 2")
if dd >= dim:
raise AttributeError("The grid has only %i dimensions, so I can't \
integrate over %i" % (dim, dd))
# create new grid
n_dim = dim - 1
n_grid = createGrid(grid, n_dim)
n_gs = n_grid.getStorage()
# insert grid points
n_gp = HashGridPoint(n_dim)
for i in xrange(gs.getSize()):
gp = gs.getPoint(i)
for d in range(dim):
if d == dd:
# omit marginalization direction
continue
elif d < dd:
n_gp.set(d, gp.getLevel(d), gp.getIndex(d))
else:
n_gp.set(d - 1, gp.getLevel(d), gp.getIndex(d))
# insert grid point
if not n_gs.isContaining(n_gp):
n_gs.insert(n_gp)
n_gs.recalcLeafProperty()
# create coefficient vector
n_alpha = np.zeros(n_gs.getSize())
basis = getBasis(grid)
# set function values for n_alpha
for i in xrange(gs.getSize()):
gp = gs.getPoint(i)
for d in range(dim):
if d == dd:
dd_level = gp.getLevel(d)
dd_index = gp.getIndex(d)
elif d < dd:
n_gp.set(d, gp.getLevel(d), gp.getIndex(d))
else:
n_gp.set(d - 1, gp.getLevel(d), gp.getIndex(d))
if not n_gs.isContaining(n_gp):
raise Exception("This should not happen!")
# compute the integral of the given basis
if measure is None:
q, err = getIntegral(grid, dd_level, dd_index), 0.
else:
dist, trans = measure[0][dd], measure[1][dd]
linearForm.setDistributionAndTransformation([dist], [trans])
gpdd = HashGridPoint(1)
gpdd.set(0, dd_level, dd_index)
q, err = linearForm.computeLinearFormByList(gs, [gpdd], basis)
q = q[0] * trans.vol()
err *= trans.vol()
# search for the corresponding index
j = n_gs.getSequenceNumber(n_gp)
n_alpha[j] += alpha[i] * q
return n_grid, n_alpha, err
def doMarginalize(grid, alpha, linearForm, dd, measure=None):
if isinstance(dd, (int, long)):
return __doMarginalize(grid, alpha, linearForm, dd)
n_grid, n_alpha = grid, alpha
for d in sorted(dd, reverse=True):
n_grid, n_alpha, err = __doMarginalize(n_grid, n_alpha, linearForm, d, measure=measure)
return n_grid, n_alpha, err | 0.609175 | 0.619615 |
# Import TensorFlow and other library
import tensorflow as tf
import numpy as np
import os
import time
# Download the Shakespeare dataset
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org'
'/data/shakespeare.txt')
"""### Read the data
First, look in the text:
"""
# Read, then decode for py2 compat.
text = open(path_to_file, 'rb').read().decode(encoding='utf-8')
# length of text is the number of characters in it
print('Length of text: {} characters'.format(len(text)))
# Take a look at the first 250 characters in text
print(text[:250])
# The unique characters in the file
vocab = sorted(set(text))
print('{} unique characters'.format(len(vocab)))
"""## Process the text
### Vectorize the text
Before training, we need to map strings to a numerical representation. Create two lookup tables: one mapping characters to numbers, and another for numbers to characters.
"""
# Creating a mapping from unique characters to indices
char2idx = {u: i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
text_as_int = np.array([char2idx[c] for c in text])
"""Now we have an integer representation for each character. Notice that we mapped the character as indexes from 0 to
`len(unique)`. """
print('{')
for char, _ in zip(char2idx, range(20)):
print(' {:4s}: {:3d},'.format(repr(char), char2idx[char]))
print(' ...\n}')
# Show how the first 13 characters from the text are mapped to integers
print('{} ---- characters mapped to int ---- > {}'.format(repr(text[:13]), text_as_int[:13]))
"""### The prediction task
Given a character, or a sequence of characters, what is the most probable next character? This is the task we're
training the model to perform. The input to the model will be a sequence of characters, and we train the model to
predict the output—the following character at each time step.
Since RNNs maintain an internal state that depends on the previously seen elements, given all the characters computed until this moment, what is the next character?
### Create training examples and targets
Next divide the text into example sequences. Each input sequence will contain `seq_length` characters from the text.
For each input sequence, the corresponding targets contain the same length of text, except shifted one character to the right.
So break the text into chunks of `seq_length+1`. For example, say `seq_length` is 4 and our text is "Hello". The input sequence would be "Hell", and the target sequence "ello".
To do this first use the `tf.data.Dataset.from_tensor_slices` function to convert the text vector into a stream of character indices.
"""
# The maximum length sentence we want for a single input in characters
seq_length = 100
examples_per_epoch = len(text) // seq_length
# Create training examples / targets
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
for i in char_dataset.take(5):
print(idx2char[i.numpy()])
"""The `batch` method lets us easily convert these individual characters to sequences of the desired size."""
sequences = char_dataset.batch(seq_length + 1, drop_remainder=True)
for item in sequences.take(5):
print(repr(''.join(idx2char[item.numpy()])))
"""For each sequence, duplicate and shift it to form the input and target text by using the `map` method to apply a
simple function to each batch: """
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
dataset = sequences.map(split_input_target)
"""Print the first examples input and target values:"""
for input_example, target_example in dataset.take(1):
print('Input data: ', repr(''.join(idx2char[input_example.numpy()])))
print('Target data:', repr(''.join(idx2char[target_example.numpy()])))
"""Each index of these vectors are processed as one time step. For the input at time step 0, the model receives the index for "F" and trys to predict the index for "i" as the next character. At the next timestep, it does the same thing but the `RNN` considers the previous step context in addition to the current input character."""
for i, (input_idx, target_idx) in enumerate(zip(input_example[:5], target_example[:5])):
print("Step {:4d}".format(i))
print(" input: {} ({:s})".format(input_idx, repr(idx2char[input_idx])))
print(" expected output: {} ({:s})".format(target_idx, repr(idx2char[target_idx])))
"""### Create training batches
We used `tf.data` to split the text into manageable sequences. But before feeding this data into the model, we need to shuffle the data and pack it into batches.
"""
# Batch size
BATCH_SIZE = 64
# Buffer size to shuffle the dataset
# (TF data is designed to work with possibly infinite sequences,
# so it doesn't attempt to shuffle the entire sequence in memory. Instead,
# it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = 10000
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
"""## Build The Model
Use `tf.keras.Sequential` to define the model. For this simple example three layers are used to define our model:
* `tf.keras.layers.Embedding`: The input layer. A trainable lookup table that will map the numbers of each character to a vector with `embedding_dim` dimensions;
* `tf.keras.layers.GRU`: A type of RNN with size `units=rnn_units` (You can also use a LSTM layer here.)
* `tf.keras.layers.Dense`: The output layer, with `vocab_size` outputs.
"""
# Length of the vocabulary in chars
vocab_size = len(vocab)
# The embedding dimension
embedding_dim = 256
# Number of RNN units
rnn_units = 1024
def build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, None]),
tf.keras.layers.LSTM(rnn_units,
return_sequences=True,
stateful=True,
recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dense(vocab_size)
])
return model
model = build_model(
vocab_size=len(vocab),
embedding_dim=embedding_dim,
rnn_units=rnn_units,
batch_size=BATCH_SIZE)
"""For each character the model looks up the embedding, runs the GRU one timestep with the embedding as input, and applies the dense layer to generate logits predicting the log-liklihood of the next character:

## Try the model
Now run the model to see that it behaves as expected.
First check the shape of the output:
"""
for input_example_batch, target_example_batch in dataset.take(1):
example_batch_predictions = model(input_example_batch)
print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)")
"""In the above example the sequence length of the input is `100` but the model can be run on inputs of any length:"""
model.summary()
"""To get actual predictions from the model we need to sample from the output distribution, to get actual character indices. This distribution is defined by the logits over the character vocabulary.
Note: It is important to _sample_ from this distribution as taking the _argmax_ of the distribution can easily get the model stuck in a loop.
Try it for the first example in the batch:
"""
sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1)
sampled_indices = tf.squeeze(sampled_indices, axis=-1).numpy()
"""This gives us, at each timestep, a prediction of the next character index:"""
"""Decode these to see the text predicted by this untrained model:"""
print("Input: \n", repr("".join(idx2char[input_example_batch[0]])))
print()
print("Next Char Predictions: \n", repr("".join(idx2char[sampled_indices])))
"""## Train the model
At this point the problem can be treated as a standard classification problem. Given the previous RNN state, and the input this time step, predict the class of the next character.
### Attach an optimizer, and a loss function
The standard `tf.keras.losses.sparse_softmax_crossentropy` loss function works in this case because it is applied across the last dimension of the predictions.
Because our model returns logits, we need to set the `from_logits` flag.
"""
def loss(labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
example_batch_loss = loss(target_example_batch, example_batch_predictions)
print("Prediction shape: ", example_batch_predictions.shape, " # (batch_size, sequence_length, vocab_size)")
print("scalar_loss: ", example_batch_loss.numpy().mean())
"""Configure the training procedure using the `tf.keras.Model.compile` method. We'll use `tf.keras.optimizers.Adam` with default arguments and the loss function."""
model.compile(optimizer='adam', loss=loss)
"""### Configure checkpoints
Use a `tf.keras.callbacks.ModelCheckpoint` to ensure that checkpoints are saved during training:
"""
# Directory where the checkpoints will be saved
checkpoint_dir = './training_checkpoints'
# Name of the checkpoint files
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix,
save_weights_only=True)
"""### Execute the training
To keep training time reasonable, use 10 epochs to train the model. In Colab, set the runtime to GPU for faster training.
"""
EPOCHS = 10
history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback])
"""## Generate text
### Restore the latest checkpoint
To keep this prediction step simple, use a batch size of 1.
Because of the way the RNN state is passed from timestep to timestep, the model only accepts a fixed batch size once built.
To run the model with a different `batch_size`, we need to rebuild the model and restore the weights from the checkpoint.
"""
tf.train.latest_checkpoint(checkpoint_dir)
model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
model.build(tf.TensorShape([1, None]))
model.summary()
"""### The prediction loop
The following code block generates the text:
* It Starts by choosing a start string, initializing the RNN state and setting the number of characters to generate.
* Get the prediction distribution of the next character using the start string and the RNN state.
* Then, use a categorical distribution to calculate the index of the predicted character. Use this predicted character as our next input to the model.
* The RNN state returned by the model is fed back into the model so that it now has more context, instead than only one word. After predicting the next word, the modified RNN states are again fed back into the model, which is how it learns as it gets more context from the previously predicted words.

Looking at the generated text, you'll see the model knows when to capitalize, make paragraphs and imitates a Shakespeare-like writing vocabulary. With the small number of training epochs, it has not yet learned to form coherent sentences.
"""
def generate_text(model, start_string):
# Evaluation step (generating text using the learned model)
# Number of characters to generate
num_generate = 1000
# Converting our start string to numbers (vectorizing)
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
# Empty string to store our results
text_generated = []
# Low temperatures results in more predictable text.
# Higher temperatures results in more surprising text.
# Experiment to find the best setting.
temperature = 1.0
# Here batch size == 1
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
# remove the batch dimension
predictions = tf.squeeze(predictions, 0)
# using a categorical distribution to predict the word returned by the model
predictions = predictions / temperature
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1, 0].numpy()
# We pass the predicted word as the next input to the model
# along with the previous hidden state
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(idx2char[predicted_id])
return start_string + ''.join(text_generated)
print(generate_text(model, start_string=u"ROMEO: "))
"""The easiest thing you can do to improve the results it to train it for longer (try `EPOCHS=30`).
You can also experiment with a different start string, or try adding another RNN layer to improve the model's accuracy, or adjusting the temperature parameter to generate more or less random predictions.
## Advanced: Customized Training
The above training procedure is simple, but does not give you much control.
So now that you've seen how to run the model manually let's unpack the training loop, and implement it ourselves. This gives a starting point if, for example, to implement _curriculum learning_ to help stabilize the model's open-loop output.
We will use `tf.GradientTape` to track the gradiends. You can learn more about this approach by reading the [eager execution guide](https://www.tensorflow.org/guide/eager).
The procedure works as follows:
* First, initialize the RNN state. We do this by calling the `tf.keras.Model.reset_states` method.
* Next, iterate over the dataset (batch by batch) and calculate the *predictions* associated with each.
* Open a `tf.GradientTape`, and calculate the predictions and loss in that context.
* Calculate the gradients of the loss with respect to the model variables using the `tf.GradientTape.grads` method.
* Finally, take a step downwards by using the optimizer's `tf.train.Optimizer.apply_gradients` method.
"""
model = build_model(
vocab_size=len(vocab),
embedding_dim=embedding_dim,
rnn_units=rnn_units,
batch_size=BATCH_SIZE)
optimizer = tf.keras.optimizers.Adam()
@tf.function
def train_step(inp, target):
with tf.GradientTape() as tape:
predictions = model(inp)
loss = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(
target, predictions, from_logits=True))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
# Training step
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
# initializing the hidden state at the start of every epoch
# initally hidden is None
hidden = model.reset_states()
for (batch_n, (inp, target)) in enumerate(dataset):
loss = train_step(inp, target)
if batch_n % 100 == 0:
template = 'Epoch {} Batch {} Loss {}'
print(template.format(epoch + 1, batch_n, loss))
# saving (checkpoint) the model every 5 epochs
if (epoch + 1) % 5 == 0:
model.save_weights(checkpoint_prefix.format(epoch=epoch))
print('Epoch {} Loss {:.4f}'.format(epoch + 1, loss))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
model.save_weights(checkpoint_prefix.format(epoch=epoch)) | Experts_tutorial/Text/text_generation.py | # Import TensorFlow and other library
import tensorflow as tf
import numpy as np
import os
import time
# Download the Shakespeare dataset
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org'
'/data/shakespeare.txt')
"""### Read the data
First, look in the text:
"""
# Read, then decode for py2 compat.
text = open(path_to_file, 'rb').read().decode(encoding='utf-8')
# length of text is the number of characters in it
print('Length of text: {} characters'.format(len(text)))
# Take a look at the first 250 characters in text
print(text[:250])
# The unique characters in the file
vocab = sorted(set(text))
print('{} unique characters'.format(len(vocab)))
"""## Process the text
### Vectorize the text
Before training, we need to map strings to a numerical representation. Create two lookup tables: one mapping characters to numbers, and another for numbers to characters.
"""
# Creating a mapping from unique characters to indices
char2idx = {u: i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
text_as_int = np.array([char2idx[c] for c in text])
"""Now we have an integer representation for each character. Notice that we mapped the character as indexes from 0 to
`len(unique)`. """
print('{')
for char, _ in zip(char2idx, range(20)):
print(' {:4s}: {:3d},'.format(repr(char), char2idx[char]))
print(' ...\n}')
# Show how the first 13 characters from the text are mapped to integers
print('{} ---- characters mapped to int ---- > {}'.format(repr(text[:13]), text_as_int[:13]))
"""### The prediction task
Given a character, or a sequence of characters, what is the most probable next character? This is the task we're
training the model to perform. The input to the model will be a sequence of characters, and we train the model to
predict the output—the following character at each time step.
Since RNNs maintain an internal state that depends on the previously seen elements, given all the characters computed until this moment, what is the next character?
### Create training examples and targets
Next divide the text into example sequences. Each input sequence will contain `seq_length` characters from the text.
For each input sequence, the corresponding targets contain the same length of text, except shifted one character to the right.
So break the text into chunks of `seq_length+1`. For example, say `seq_length` is 4 and our text is "Hello". The input sequence would be "Hell", and the target sequence "ello".
To do this first use the `tf.data.Dataset.from_tensor_slices` function to convert the text vector into a stream of character indices.
"""
# The maximum length sentence we want for a single input in characters
seq_length = 100
examples_per_epoch = len(text) // seq_length
# Create training examples / targets
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
for i in char_dataset.take(5):
print(idx2char[i.numpy()])
"""The `batch` method lets us easily convert these individual characters to sequences of the desired size."""
sequences = char_dataset.batch(seq_length + 1, drop_remainder=True)
for item in sequences.take(5):
print(repr(''.join(idx2char[item.numpy()])))
"""For each sequence, duplicate and shift it to form the input and target text by using the `map` method to apply a
simple function to each batch: """
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
dataset = sequences.map(split_input_target)
"""Print the first examples input and target values:"""
for input_example, target_example in dataset.take(1):
print('Input data: ', repr(''.join(idx2char[input_example.numpy()])))
print('Target data:', repr(''.join(idx2char[target_example.numpy()])))
"""Each index of these vectors are processed as one time step. For the input at time step 0, the model receives the index for "F" and trys to predict the index for "i" as the next character. At the next timestep, it does the same thing but the `RNN` considers the previous step context in addition to the current input character."""
for i, (input_idx, target_idx) in enumerate(zip(input_example[:5], target_example[:5])):
print("Step {:4d}".format(i))
print(" input: {} ({:s})".format(input_idx, repr(idx2char[input_idx])))
print(" expected output: {} ({:s})".format(target_idx, repr(idx2char[target_idx])))
"""### Create training batches
We used `tf.data` to split the text into manageable sequences. But before feeding this data into the model, we need to shuffle the data and pack it into batches.
"""
# Batch size
BATCH_SIZE = 64
# Buffer size to shuffle the dataset
# (TF data is designed to work with possibly infinite sequences,
# so it doesn't attempt to shuffle the entire sequence in memory. Instead,
# it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = 10000
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
"""## Build The Model
Use `tf.keras.Sequential` to define the model. For this simple example three layers are used to define our model:
* `tf.keras.layers.Embedding`: The input layer. A trainable lookup table that will map the numbers of each character to a vector with `embedding_dim` dimensions;
* `tf.keras.layers.GRU`: A type of RNN with size `units=rnn_units` (You can also use a LSTM layer here.)
* `tf.keras.layers.Dense`: The output layer, with `vocab_size` outputs.
"""
# Length of the vocabulary in chars
vocab_size = len(vocab)
# The embedding dimension
embedding_dim = 256
# Number of RNN units
rnn_units = 1024
def build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, None]),
tf.keras.layers.LSTM(rnn_units,
return_sequences=True,
stateful=True,
recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dense(vocab_size)
])
return model
model = build_model(
vocab_size=len(vocab),
embedding_dim=embedding_dim,
rnn_units=rnn_units,
batch_size=BATCH_SIZE)
"""For each character the model looks up the embedding, runs the GRU one timestep with the embedding as input, and applies the dense layer to generate logits predicting the log-liklihood of the next character:

## Try the model
Now run the model to see that it behaves as expected.
First check the shape of the output:
"""
for input_example_batch, target_example_batch in dataset.take(1):
example_batch_predictions = model(input_example_batch)
print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)")
"""In the above example the sequence length of the input is `100` but the model can be run on inputs of any length:"""
model.summary()
"""To get actual predictions from the model we need to sample from the output distribution, to get actual character indices. This distribution is defined by the logits over the character vocabulary.
Note: It is important to _sample_ from this distribution as taking the _argmax_ of the distribution can easily get the model stuck in a loop.
Try it for the first example in the batch:
"""
sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1)
sampled_indices = tf.squeeze(sampled_indices, axis=-1).numpy()
"""This gives us, at each timestep, a prediction of the next character index:"""
"""Decode these to see the text predicted by this untrained model:"""
print("Input: \n", repr("".join(idx2char[input_example_batch[0]])))
print()
print("Next Char Predictions: \n", repr("".join(idx2char[sampled_indices])))
"""## Train the model
At this point the problem can be treated as a standard classification problem. Given the previous RNN state, and the input this time step, predict the class of the next character.
### Attach an optimizer, and a loss function
The standard `tf.keras.losses.sparse_softmax_crossentropy` loss function works in this case because it is applied across the last dimension of the predictions.
Because our model returns logits, we need to set the `from_logits` flag.
"""
def loss(labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
example_batch_loss = loss(target_example_batch, example_batch_predictions)
print("Prediction shape: ", example_batch_predictions.shape, " # (batch_size, sequence_length, vocab_size)")
print("scalar_loss: ", example_batch_loss.numpy().mean())
"""Configure the training procedure using the `tf.keras.Model.compile` method. We'll use `tf.keras.optimizers.Adam` with default arguments and the loss function."""
model.compile(optimizer='adam', loss=loss)
"""### Configure checkpoints
Use a `tf.keras.callbacks.ModelCheckpoint` to ensure that checkpoints are saved during training:
"""
# Directory where the checkpoints will be saved
checkpoint_dir = './training_checkpoints'
# Name of the checkpoint files
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix,
save_weights_only=True)
"""### Execute the training
To keep training time reasonable, use 10 epochs to train the model. In Colab, set the runtime to GPU for faster training.
"""
EPOCHS = 10
history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback])
"""## Generate text
### Restore the latest checkpoint
To keep this prediction step simple, use a batch size of 1.
Because of the way the RNN state is passed from timestep to timestep, the model only accepts a fixed batch size once built.
To run the model with a different `batch_size`, we need to rebuild the model and restore the weights from the checkpoint.
"""
tf.train.latest_checkpoint(checkpoint_dir)
model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
model.build(tf.TensorShape([1, None]))
model.summary()
"""### The prediction loop
The following code block generates the text:
* It Starts by choosing a start string, initializing the RNN state and setting the number of characters to generate.
* Get the prediction distribution of the next character using the start string and the RNN state.
* Then, use a categorical distribution to calculate the index of the predicted character. Use this predicted character as our next input to the model.
* The RNN state returned by the model is fed back into the model so that it now has more context, instead than only one word. After predicting the next word, the modified RNN states are again fed back into the model, which is how it learns as it gets more context from the previously predicted words.

Looking at the generated text, you'll see the model knows when to capitalize, make paragraphs and imitates a Shakespeare-like writing vocabulary. With the small number of training epochs, it has not yet learned to form coherent sentences.
"""
def generate_text(model, start_string):
# Evaluation step (generating text using the learned model)
# Number of characters to generate
num_generate = 1000
# Converting our start string to numbers (vectorizing)
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
# Empty string to store our results
text_generated = []
# Low temperatures results in more predictable text.
# Higher temperatures results in more surprising text.
# Experiment to find the best setting.
temperature = 1.0
# Here batch size == 1
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
# remove the batch dimension
predictions = tf.squeeze(predictions, 0)
# using a categorical distribution to predict the word returned by the model
predictions = predictions / temperature
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1, 0].numpy()
# We pass the predicted word as the next input to the model
# along with the previous hidden state
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(idx2char[predicted_id])
return start_string + ''.join(text_generated)
print(generate_text(model, start_string=u"ROMEO: "))
"""The easiest thing you can do to improve the results it to train it for longer (try `EPOCHS=30`).
You can also experiment with a different start string, or try adding another RNN layer to improve the model's accuracy, or adjusting the temperature parameter to generate more or less random predictions.
## Advanced: Customized Training
The above training procedure is simple, but does not give you much control.
So now that you've seen how to run the model manually let's unpack the training loop, and implement it ourselves. This gives a starting point if, for example, to implement _curriculum learning_ to help stabilize the model's open-loop output.
We will use `tf.GradientTape` to track the gradiends. You can learn more about this approach by reading the [eager execution guide](https://www.tensorflow.org/guide/eager).
The procedure works as follows:
* First, initialize the RNN state. We do this by calling the `tf.keras.Model.reset_states` method.
* Next, iterate over the dataset (batch by batch) and calculate the *predictions* associated with each.
* Open a `tf.GradientTape`, and calculate the predictions and loss in that context.
* Calculate the gradients of the loss with respect to the model variables using the `tf.GradientTape.grads` method.
* Finally, take a step downwards by using the optimizer's `tf.train.Optimizer.apply_gradients` method.
"""
model = build_model(
vocab_size=len(vocab),
embedding_dim=embedding_dim,
rnn_units=rnn_units,
batch_size=BATCH_SIZE)
optimizer = tf.keras.optimizers.Adam()
@tf.function
def train_step(inp, target):
with tf.GradientTape() as tape:
predictions = model(inp)
loss = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(
target, predictions, from_logits=True))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
# Training step
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
# initializing the hidden state at the start of every epoch
# initally hidden is None
hidden = model.reset_states()
for (batch_n, (inp, target)) in enumerate(dataset):
loss = train_step(inp, target)
if batch_n % 100 == 0:
template = 'Epoch {} Batch {} Loss {}'
print(template.format(epoch + 1, batch_n, loss))
# saving (checkpoint) the model every 5 epochs
if (epoch + 1) % 5 == 0:
model.save_weights(checkpoint_prefix.format(epoch=epoch))
print('Epoch {} Loss {:.4f}'.format(epoch + 1, loss))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
model.save_weights(checkpoint_prefix.format(epoch=epoch)) | 0.863794 | 0.696449 |
from django.urls import path
from userextensions import views
from userextensions.views import ajax
from userextensions.views import action
app_name = 'userextensions'
urlpatterns = [
# list views
path('list_recents/', views.ListRecents.as_view(), name='list_recents'),
path('list_favorites/', views.ListFavorites.as_view(), name='list_favorites'),
# detail views
path('detail_user/', views.DetailUser.as_view(), name='detail_user'),
# create views
path('add_favorite/', views.AddFavorite.as_view(), name='add_favorite'),
# update views
path('set_start_page/', views.SetStartPage.as_view(), name='set_start_page'),
# delete views
path('delete_favorite/<int:pk>', views.DeleteFavorite.as_view(), name='delete_favorite'),
path('delete_recent/<int:pk>', views.DeleteRecent.as_view(), name='delete_recent'),
# custom views
path('user_login_redirect/', views.UserLoginRedirect.as_view(), name='user_login_redirect'),
path('manage_service_accounts/', views.ManageServiceAccounts.as_view(), name='manage_service_accounts'),
# action views
path('refresh_api_token', views.RefreshApiToken.as_view(), name='refresh_api_token'),
path('refresh_srv_acct_token', views.RefreshSrvAcctApiToken.as_view(), name='refresh_srv_acct_token'),
path('create_srv_account', action.CreateServiceAccount.as_view(), name='create_srv_account'),
path('delete_srv_account', action.DeleteServiceAccount.as_view(), name='delete_srv_account'),
path('enable_srv_account', action.EnableServiceAccount.as_view(), name='enable_srv_account'),
path('disable_srv_account', action.DisableServiceAccount.as_view(), name='disable_srv_account'),
path('edit_favorite', action.EditFavorite.as_view(), name='edit_favorite'),
# ajax views
path('get_users_per_group', ajax.get_users_per_group, name='get_users_per_group'),
path('get_srv_acct_token_history', ajax.get_srv_acct_token_history, name='get_srv_acct_token_history'),
path('show_srv_acct_token', ajax.show_srv_acct_token, name='show_srv_acct_token'),
] | userextensions/urls.py | from django.urls import path
from userextensions import views
from userextensions.views import ajax
from userextensions.views import action
app_name = 'userextensions'
urlpatterns = [
# list views
path('list_recents/', views.ListRecents.as_view(), name='list_recents'),
path('list_favorites/', views.ListFavorites.as_view(), name='list_favorites'),
# detail views
path('detail_user/', views.DetailUser.as_view(), name='detail_user'),
# create views
path('add_favorite/', views.AddFavorite.as_view(), name='add_favorite'),
# update views
path('set_start_page/', views.SetStartPage.as_view(), name='set_start_page'),
# delete views
path('delete_favorite/<int:pk>', views.DeleteFavorite.as_view(), name='delete_favorite'),
path('delete_recent/<int:pk>', views.DeleteRecent.as_view(), name='delete_recent'),
# custom views
path('user_login_redirect/', views.UserLoginRedirect.as_view(), name='user_login_redirect'),
path('manage_service_accounts/', views.ManageServiceAccounts.as_view(), name='manage_service_accounts'),
# action views
path('refresh_api_token', views.RefreshApiToken.as_view(), name='refresh_api_token'),
path('refresh_srv_acct_token', views.RefreshSrvAcctApiToken.as_view(), name='refresh_srv_acct_token'),
path('create_srv_account', action.CreateServiceAccount.as_view(), name='create_srv_account'),
path('delete_srv_account', action.DeleteServiceAccount.as_view(), name='delete_srv_account'),
path('enable_srv_account', action.EnableServiceAccount.as_view(), name='enable_srv_account'),
path('disable_srv_account', action.DisableServiceAccount.as_view(), name='disable_srv_account'),
path('edit_favorite', action.EditFavorite.as_view(), name='edit_favorite'),
# ajax views
path('get_users_per_group', ajax.get_users_per_group, name='get_users_per_group'),
path('get_srv_acct_token_history', ajax.get_srv_acct_token_history, name='get_srv_acct_token_history'),
path('show_srv_acct_token', ajax.show_srv_acct_token, name='show_srv_acct_token'),
] | 0.293404 | 0.060502 |
__all__ = ('ClipboardAndroid', )
from kivy.core.clipboard import ClipboardBase
from kivy.clock import Clock
from jnius import autoclass, cast
from android.runnable import run_on_ui_thread
AndroidString = autoclass('java.lang.String')
PythonActivity = autoclass('org.renpy.android.PythonActivity')
Context = autoclass('android.content.Context')
ClipData = autoclass('android.content.ClipData')
ClipDescription = autoclass('android.content.ClipDescription')
class ClipboardAndroid(ClipboardBase):
def __init__(self):
super(ClipboardAndroid, self).__init__()
self._clipboard = None
self._data = dict()
self._data['text/plain'] = None
self._data['application/data'] = None
self._get_clipboard_service()
def get(self, mimetype='text/plain'):
return self._get(mimetype)
def put(self, data, mimetype='text/plain'):
self._set(data, mimetype)
def get_types(self):
return list(self._data.keys())
@run_on_ui_thread
def _initialize_clipboard(self):
PythonActivity._clipboard = PythonActivity.getSystemService(
Context.CLIPBOARD_SERVICE)
def _get_clipboard_service(self):
if not self._clipboard:
self._initialize_clipboard()
try:
self._clipboard = PythonActivity._clipboard
except AttributeError:
# don't know why but this happens when trying to access the
# clipboard for the first time. Works after that
Clock.schedule_once(lambda dt: self._get_clipboard_service())
return
return self._clipboard
def _get(self, mimetype='text/plain'):
clippy = self._get_clipboard_service()
primary_clip = clippy.getPrimaryClip()
if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(
ClipDescription.MIMETYPE_TEXT_PLAIN):
data = primary_clip.getItemAt(0).getText().toString()
else:
# TODO: non text data types Not yet implemented
data = ''
return data
def _set(self, data, mimetype):
clippy = self._get_clipboard_service()
new_clip = ClipData.newPlainText(AndroidString(""),
AndroidString(data))
# put text data onto clipboard
clippy.setPrimaryClip(new_clip) | kivy/core/clipboard/clipboard_android.py | __all__ = ('ClipboardAndroid', )
from kivy.core.clipboard import ClipboardBase
from kivy.clock import Clock
from jnius import autoclass, cast
from android.runnable import run_on_ui_thread
AndroidString = autoclass('java.lang.String')
PythonActivity = autoclass('org.renpy.android.PythonActivity')
Context = autoclass('android.content.Context')
ClipData = autoclass('android.content.ClipData')
ClipDescription = autoclass('android.content.ClipDescription')
class ClipboardAndroid(ClipboardBase):
def __init__(self):
super(ClipboardAndroid, self).__init__()
self._clipboard = None
self._data = dict()
self._data['text/plain'] = None
self._data['application/data'] = None
self._get_clipboard_service()
def get(self, mimetype='text/plain'):
return self._get(mimetype)
def put(self, data, mimetype='text/plain'):
self._set(data, mimetype)
def get_types(self):
return list(self._data.keys())
@run_on_ui_thread
def _initialize_clipboard(self):
PythonActivity._clipboard = PythonActivity.getSystemService(
Context.CLIPBOARD_SERVICE)
def _get_clipboard_service(self):
if not self._clipboard:
self._initialize_clipboard()
try:
self._clipboard = PythonActivity._clipboard
except AttributeError:
# don't know why but this happens when trying to access the
# clipboard for the first time. Works after that
Clock.schedule_once(lambda dt: self._get_clipboard_service())
return
return self._clipboard
def _get(self, mimetype='text/plain'):
clippy = self._get_clipboard_service()
primary_clip = clippy.getPrimaryClip()
if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(
ClipDescription.MIMETYPE_TEXT_PLAIN):
data = primary_clip.getItemAt(0).getText().toString()
else:
# TODO: non text data types Not yet implemented
data = ''
return data
def _set(self, data, mimetype):
clippy = self._get_clipboard_service()
new_clip = ClipData.newPlainText(AndroidString(""),
AndroidString(data))
# put text data onto clipboard
clippy.setPrimaryClip(new_clip) | 0.540439 | 0.10942 |
from yoti_python_sdk.doc_scan.constants import SUPPLEMENTARY_DOCUMENT
from yoti_python_sdk.utils import remove_null_values
from .required_document import RequiredDocument
class RequiredSupplementaryDocument(RequiredDocument):
def __init__(self, objective, document_types=None, country_codes=None):
"""
:param objective: the objective for the document
:type objective: Objective
:param document_types: the document types
:type document_types: list[str]
:param country_codes: the country codes
:type country_codes: list[str]
"""
self.__objective = objective
self.__document_types = document_types
self.__country_codes = country_codes
@property
def type(self):
return SUPPLEMENTARY_DOCUMENT
def to_json(self):
return remove_null_values(
{
"type": self.type,
"objective": self.__objective,
"document_types": self.__document_types,
"country_codes": self.__country_codes,
}
)
class RequiredSupplementaryDocumentBuilder(object):
"""
Builder used to assist the creation of a required supplementary document.
"""
def __init__(self):
self.__objective = None
self.__document_types = None
self.__country_codes = None
def with_objective(self, objective):
"""
Sets the supplementary document objective
:param objective: the objective
:type objective: Objective
:return: the builder
:rtype: RequiredSupplementaryDocumentBuilder
"""
self.__objective = objective
return self
def with_document_types(self, document_types):
"""
Sets the supplementary document types
:param document_types: the document types
:type document_types: list[str]
:return: the builder
:rtype: RequiredSupplementaryDocumentBuilder
"""
self.__document_types = document_types
return self
def with_country_codes(self, country_codes):
"""
Sets the supplementary document country codes
:param country_codes: the country codes
:type country_codes: list[str]
:return: the builder
:rtype: RequiredSupplementaryDocumentBuilder
"""
self.__country_codes = country_codes
return self
def build(self):
"""
Builds a required supplementary document, using the values supplied to the builder
:return: the required supplementary document
:rtype: RequiredSupplementaryDocument
"""
return RequiredSupplementaryDocument(
self.__objective, self.__document_types, self.__country_codes
) | yoti_python_sdk/doc_scan/session/create/filter/required_supplementary_document.py | from yoti_python_sdk.doc_scan.constants import SUPPLEMENTARY_DOCUMENT
from yoti_python_sdk.utils import remove_null_values
from .required_document import RequiredDocument
class RequiredSupplementaryDocument(RequiredDocument):
def __init__(self, objective, document_types=None, country_codes=None):
"""
:param objective: the objective for the document
:type objective: Objective
:param document_types: the document types
:type document_types: list[str]
:param country_codes: the country codes
:type country_codes: list[str]
"""
self.__objective = objective
self.__document_types = document_types
self.__country_codes = country_codes
@property
def type(self):
return SUPPLEMENTARY_DOCUMENT
def to_json(self):
return remove_null_values(
{
"type": self.type,
"objective": self.__objective,
"document_types": self.__document_types,
"country_codes": self.__country_codes,
}
)
class RequiredSupplementaryDocumentBuilder(object):
"""
Builder used to assist the creation of a required supplementary document.
"""
def __init__(self):
self.__objective = None
self.__document_types = None
self.__country_codes = None
def with_objective(self, objective):
"""
Sets the supplementary document objective
:param objective: the objective
:type objective: Objective
:return: the builder
:rtype: RequiredSupplementaryDocumentBuilder
"""
self.__objective = objective
return self
def with_document_types(self, document_types):
"""
Sets the supplementary document types
:param document_types: the document types
:type document_types: list[str]
:return: the builder
:rtype: RequiredSupplementaryDocumentBuilder
"""
self.__document_types = document_types
return self
def with_country_codes(self, country_codes):
"""
Sets the supplementary document country codes
:param country_codes: the country codes
:type country_codes: list[str]
:return: the builder
:rtype: RequiredSupplementaryDocumentBuilder
"""
self.__country_codes = country_codes
return self
def build(self):
"""
Builds a required supplementary document, using the values supplied to the builder
:return: the required supplementary document
:rtype: RequiredSupplementaryDocument
"""
return RequiredSupplementaryDocument(
self.__objective, self.__document_types, self.__country_codes
) | 0.830181 | 0.324222 |
import os
import copy
from box import Box
from kopf.structs.diffs import diff
import digi.util as util
from digi.util import deep_set
class ModelView:
"""
Return all models in the current world/root view
keyed by the namespaced name; if the nsn starts
with default, it will be trimmed off; the original
view is keyed by "root". Empty model without "spec"
will be skipped.
The __enter__ method constructs the model view from
the root_view and __exit__ applies the changes back
to the root_view.
TBD: add mounts recursively but trim off each's mounts
TBD: add trim hint to reduce the size of view
TBD: support source views besides root
"""
def __init__(self, root_view: dict):
self._root_view = root_view
self._old, self._new = None, None
self._nsn_gvr = dict()
def __enter__(self):
_view = {"root": self._root_view}
_mount = self._root_view.get("mount", {})
for typ, ms in _mount.items():
for n, m in ms.items():
if "spec" not in m:
continue
n = n.replace("default/", "")
_view.update({n: m["spec"]})
self._nsn_gvr[n] = typ
self._old, self._new = _view, copy.deepcopy(_view)
return self._new
def __exit__(self, typ, value, traceback):
# diff and apply
_root = self._root_view
_diffs = diff(self._old, self._new)
for op, path, old, new in _diffs:
nsn = path[0]
if nsn == "root":
deep_set(_root, ".".join(path[1:]), new)
else:
typ = self._nsn_gvr[nsn]
nsn = util.normalized_nsn(nsn)
path = ["mount", typ, nsn, "spec"] + list(path[1:])
deep_set(_root, path, new)
class TypeView:
"""
Return models group-by their gvr, if the gv is
the same as the parent's gv, it will be trimmed
to only the plural.
TBDs: ditto
"""
def __init__(self, root_view: dict, gvr_str: str = None):
self._root_view = root_view
self._old, self._new = None, None
if gvr_str is None:
assert "GROUP" in os.environ and \
"VERSION" in os.environ and \
"PLURAL" in os.environ
self._r = os.environ["PLURAL"]
self._gv_str = f"{os.environ['GROUP']}" \
f"/{os.environ['VERSION']}"
self._gvr_str = f"{self._gv_str}/{os.environ['PLURAL']}"
else:
gvr_str = util.full_gvr(gvr_str)
self._r = util.parse_gvr(gvr_str)[-1]
self._gv_str = "/".join(util.parse_gvr(gvr_str)[:-1])
self._gvr_str = gvr_str
self._typ_full_typ = dict()
def __enter__(self):
# _view = {self._r: {"root": self._root_view}}
_view = {"root": self._root_view}
_mount = self._root_view.get("mount", {})
for typ, ms in _mount.items():
_typ = typ.replace(self._gv_str + "/", "")
_view[_typ] = {}
self._typ_full_typ[_typ] = typ
for n, m in ms.items():
if "spec" not in m:
continue
n = n.replace("default/", "")
_view[_typ].update({n: m["spec"]})
self._old, self._new = _view, copy.deepcopy(_view)
return self._new
def __exit__(self, typ, value, traceback):
_root = self._root_view
_diffs = diff(self._old, self._new)
for op, path, old, new in _diffs:
typ = path[0]
if typ == "root":
deep_set(_root, ".".join(path[1:]), new)
else:
typ = self._typ_full_typ[typ]
nsn = util.normalized_nsn(path[1])
path = ["mount", typ, nsn, "spec"] + list(path[2:])
deep_set(_root, path, new)
class DotView:
"""Dot accessible models."""
_char_map = {
"-": "_",
".": "_",
"/": "_",
" ": "_",
"\\": "_",
}
def __init__(self, src_view):
self._src_view = src_view
self._dot_view = None
self._dot_view_old = None
# map between unsafe attributes
# to original ones
self._attr_map = dict()
def __enter__(self):
# box does not record nor expose a conversion
# map for the safe attributes, so we do so
# ahead of time and pass a safe dict to box;
# the self._attr_map keeps track of any conversion.
self._dot_view_old = self._to_safe_dict(self._src_view)
self._dot_view = Box(self._dot_view_old)
return self._dot_view
def __exit__(self, exc_type, exc_val, exc_tb):
_src = self._src_view
self._dot_view = self._dot_view.to_dict()
_diffs = diff(self._dot_view_old, self._dot_view)
for op, path, old, new in _diffs:
path = [self._attr_map.get(p, p) for p in path]
deep_set(_src, path, new)
def _to_safe_dict(self, d: dict) -> dict:
safe_d = dict()
for k, v in d.items():
orig_k = k
k = self._to_safe_attr(k)
self._attr_map[k] = orig_k
if isinstance(v, dict):
v = self._to_safe_dict(v)
safe_d[k] = v
return safe_d
@staticmethod
def _to_safe_attr(s: str):
for k, v in DotView._char_map.items():
s = s.replace(k, v)
return s | runtime/driver/digi/view.py | import os
import copy
from box import Box
from kopf.structs.diffs import diff
import digi.util as util
from digi.util import deep_set
class ModelView:
"""
Return all models in the current world/root view
keyed by the namespaced name; if the nsn starts
with default, it will be trimmed off; the original
view is keyed by "root". Empty model without "spec"
will be skipped.
The __enter__ method constructs the model view from
the root_view and __exit__ applies the changes back
to the root_view.
TBD: add mounts recursively but trim off each's mounts
TBD: add trim hint to reduce the size of view
TBD: support source views besides root
"""
def __init__(self, root_view: dict):
self._root_view = root_view
self._old, self._new = None, None
self._nsn_gvr = dict()
def __enter__(self):
_view = {"root": self._root_view}
_mount = self._root_view.get("mount", {})
for typ, ms in _mount.items():
for n, m in ms.items():
if "spec" not in m:
continue
n = n.replace("default/", "")
_view.update({n: m["spec"]})
self._nsn_gvr[n] = typ
self._old, self._new = _view, copy.deepcopy(_view)
return self._new
def __exit__(self, typ, value, traceback):
# diff and apply
_root = self._root_view
_diffs = diff(self._old, self._new)
for op, path, old, new in _diffs:
nsn = path[0]
if nsn == "root":
deep_set(_root, ".".join(path[1:]), new)
else:
typ = self._nsn_gvr[nsn]
nsn = util.normalized_nsn(nsn)
path = ["mount", typ, nsn, "spec"] + list(path[1:])
deep_set(_root, path, new)
class TypeView:
"""
Return models group-by their gvr, if the gv is
the same as the parent's gv, it will be trimmed
to only the plural.
TBDs: ditto
"""
def __init__(self, root_view: dict, gvr_str: str = None):
self._root_view = root_view
self._old, self._new = None, None
if gvr_str is None:
assert "GROUP" in os.environ and \
"VERSION" in os.environ and \
"PLURAL" in os.environ
self._r = os.environ["PLURAL"]
self._gv_str = f"{os.environ['GROUP']}" \
f"/{os.environ['VERSION']}"
self._gvr_str = f"{self._gv_str}/{os.environ['PLURAL']}"
else:
gvr_str = util.full_gvr(gvr_str)
self._r = util.parse_gvr(gvr_str)[-1]
self._gv_str = "/".join(util.parse_gvr(gvr_str)[:-1])
self._gvr_str = gvr_str
self._typ_full_typ = dict()
def __enter__(self):
# _view = {self._r: {"root": self._root_view}}
_view = {"root": self._root_view}
_mount = self._root_view.get("mount", {})
for typ, ms in _mount.items():
_typ = typ.replace(self._gv_str + "/", "")
_view[_typ] = {}
self._typ_full_typ[_typ] = typ
for n, m in ms.items():
if "spec" not in m:
continue
n = n.replace("default/", "")
_view[_typ].update({n: m["spec"]})
self._old, self._new = _view, copy.deepcopy(_view)
return self._new
def __exit__(self, typ, value, traceback):
_root = self._root_view
_diffs = diff(self._old, self._new)
for op, path, old, new in _diffs:
typ = path[0]
if typ == "root":
deep_set(_root, ".".join(path[1:]), new)
else:
typ = self._typ_full_typ[typ]
nsn = util.normalized_nsn(path[1])
path = ["mount", typ, nsn, "spec"] + list(path[2:])
deep_set(_root, path, new)
class DotView:
"""Dot accessible models."""
_char_map = {
"-": "_",
".": "_",
"/": "_",
" ": "_",
"\\": "_",
}
def __init__(self, src_view):
self._src_view = src_view
self._dot_view = None
self._dot_view_old = None
# map between unsafe attributes
# to original ones
self._attr_map = dict()
def __enter__(self):
# box does not record nor expose a conversion
# map for the safe attributes, so we do so
# ahead of time and pass a safe dict to box;
# the self._attr_map keeps track of any conversion.
self._dot_view_old = self._to_safe_dict(self._src_view)
self._dot_view = Box(self._dot_view_old)
return self._dot_view
def __exit__(self, exc_type, exc_val, exc_tb):
_src = self._src_view
self._dot_view = self._dot_view.to_dict()
_diffs = diff(self._dot_view_old, self._dot_view)
for op, path, old, new in _diffs:
path = [self._attr_map.get(p, p) for p in path]
deep_set(_src, path, new)
def _to_safe_dict(self, d: dict) -> dict:
safe_d = dict()
for k, v in d.items():
orig_k = k
k = self._to_safe_attr(k)
self._attr_map[k] = orig_k
if isinstance(v, dict):
v = self._to_safe_dict(v)
safe_d[k] = v
return safe_d
@staticmethod
def _to_safe_attr(s: str):
for k, v in DotView._char_map.items():
s = s.replace(k, v)
return s | 0.595022 | 0.264706 |
# pylint: disable=missing-docstring
import asyncio
import logging
import socket
import aiohttp
import async_timeout
from pycfdns.const import GET_EXT_IP_URL, NAME
from pycfdns.exceptions import (
CloudflareAuthenticationException,
CloudflareConnectionException,
CloudflareException,
)
_LOGGER = logging.getLogger(NAME)
class CFAPI:
"""Class used to call the API."""
def __init__(self, session, auth, timeout):
"""Initialize."""
self.session = session
self.auth = auth
self.timeout = timeout
async def get_json(self, url):
"""Return JSON response from the API."""
data = None
try:
async with async_timeout.timeout(self.timeout):
response = await self.session.get(url, headers=self.auth.header)
except asyncio.TimeoutError as error:
raise CloudflareConnectionException(
f"Timeout error fetching information from {url}, {error}"
) from error
except (KeyError, TypeError) as error:
raise CloudflareException(
f"Error parsing information from {url}, {error}"
) from error
except (aiohttp.ClientError, socket.gaierror) as error:
raise CloudflareConnectionException(
f"Error fetching information from {url}, {error}"
) from error
except Exception as error: # pylint: disable=broad-except
raise CloudflareException(
f"Something really wrong happend! - {error}"
) from error
else:
if response.status == 403:
raise CloudflareAuthenticationException(
"Access forbidden. Please ensure valid API Key is provided"
)
data = await response.json()
_LOGGER.debug(data)
if not data.get("success"):
for error in data.get("errors"):
raise CloudflareException(
f"[{error.get('code')}] {error.get('message')}"
)
return data
async def get_external_ip(self):
"""Return the external IP."""
data = None
try:
async with async_timeout.timeout(self.timeout):
response = await self.session.get(GET_EXT_IP_URL)
except asyncio.TimeoutError as error:
raise CloudflareConnectionException(
f"Timeout error fetching information from {GET_EXT_IP_URL}, {error}"
) from error
except (KeyError, TypeError) as error:
raise CloudflareException(
f"Error parsing information from {GET_EXT_IP_URL}, {error}"
) from error
except (aiohttp.ClientError, socket.gaierror) as error:
raise CloudflareConnectionException(
f"Error fetching information from {GET_EXT_IP_URL}, {error}"
) from error
except Exception as error: # pylint: disable=broad-except
raise CloudflareException(
f"Something really wrong happend! - {error}"
) from error
else:
data = await response.text()
_LOGGER.debug(data)
return data
async def put_json(self, url, json_data):
"""PUT JSON on the API."""
data = None
try:
async with async_timeout.timeout(self.timeout):
response = await self.session.put(
url, headers=self.auth.header, data=json_data
)
except asyncio.TimeoutError as error:
raise CloudflareConnectionException(
f"Timeout error fetching information from {url}, {error}"
) from error
except (KeyError, TypeError) as error:
raise CloudflareException(
f"Error parsing information from {url}, {error}"
) from error
except (aiohttp.ClientError, socket.gaierror) as error:
raise CloudflareConnectionException(
f"Error fetching information from {url}, {error}"
) from error
except Exception as error: # pylint: disable=broad-except
raise CloudflareException(
f"Something really wrong happend! - {error}"
) from error
else:
if response.status == 403:
raise CloudflareAuthenticationException(
"Access forbidden. Please ensure valid API Key is provided"
)
data = await response.json()
_LOGGER.debug(data)
return data
class CFAuth:
"""CF Auth."""
def __init__(self, token):
"""Initialize."""
self.token = token
@property
def header(self):
"""Return auth headers."""
return {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.token}",
}
class CFRecord:
"""CFRecord."""
def __init__(self, record):
"""Initialize."""
self.record = record
@property
def record_id(self):
return self.record.get("id")
@property
def record_type(self):
return self.record.get("type")
@property
def record_name(self):
return self.record.get("name")
@property
def record_proxied(self):
return self.record.get("proxied")
@property
def record_content(self):
return self.record.get("content") | pycfdns/models.py | # pylint: disable=missing-docstring
import asyncio
import logging
import socket
import aiohttp
import async_timeout
from pycfdns.const import GET_EXT_IP_URL, NAME
from pycfdns.exceptions import (
CloudflareAuthenticationException,
CloudflareConnectionException,
CloudflareException,
)
_LOGGER = logging.getLogger(NAME)
class CFAPI:
"""Class used to call the API."""
def __init__(self, session, auth, timeout):
"""Initialize."""
self.session = session
self.auth = auth
self.timeout = timeout
async def get_json(self, url):
"""Return JSON response from the API."""
data = None
try:
async with async_timeout.timeout(self.timeout):
response = await self.session.get(url, headers=self.auth.header)
except asyncio.TimeoutError as error:
raise CloudflareConnectionException(
f"Timeout error fetching information from {url}, {error}"
) from error
except (KeyError, TypeError) as error:
raise CloudflareException(
f"Error parsing information from {url}, {error}"
) from error
except (aiohttp.ClientError, socket.gaierror) as error:
raise CloudflareConnectionException(
f"Error fetching information from {url}, {error}"
) from error
except Exception as error: # pylint: disable=broad-except
raise CloudflareException(
f"Something really wrong happend! - {error}"
) from error
else:
if response.status == 403:
raise CloudflareAuthenticationException(
"Access forbidden. Please ensure valid API Key is provided"
)
data = await response.json()
_LOGGER.debug(data)
if not data.get("success"):
for error in data.get("errors"):
raise CloudflareException(
f"[{error.get('code')}] {error.get('message')}"
)
return data
async def get_external_ip(self):
"""Return the external IP."""
data = None
try:
async with async_timeout.timeout(self.timeout):
response = await self.session.get(GET_EXT_IP_URL)
except asyncio.TimeoutError as error:
raise CloudflareConnectionException(
f"Timeout error fetching information from {GET_EXT_IP_URL}, {error}"
) from error
except (KeyError, TypeError) as error:
raise CloudflareException(
f"Error parsing information from {GET_EXT_IP_URL}, {error}"
) from error
except (aiohttp.ClientError, socket.gaierror) as error:
raise CloudflareConnectionException(
f"Error fetching information from {GET_EXT_IP_URL}, {error}"
) from error
except Exception as error: # pylint: disable=broad-except
raise CloudflareException(
f"Something really wrong happend! - {error}"
) from error
else:
data = await response.text()
_LOGGER.debug(data)
return data
async def put_json(self, url, json_data):
"""PUT JSON on the API."""
data = None
try:
async with async_timeout.timeout(self.timeout):
response = await self.session.put(
url, headers=self.auth.header, data=json_data
)
except asyncio.TimeoutError as error:
raise CloudflareConnectionException(
f"Timeout error fetching information from {url}, {error}"
) from error
except (KeyError, TypeError) as error:
raise CloudflareException(
f"Error parsing information from {url}, {error}"
) from error
except (aiohttp.ClientError, socket.gaierror) as error:
raise CloudflareConnectionException(
f"Error fetching information from {url}, {error}"
) from error
except Exception as error: # pylint: disable=broad-except
raise CloudflareException(
f"Something really wrong happend! - {error}"
) from error
else:
if response.status == 403:
raise CloudflareAuthenticationException(
"Access forbidden. Please ensure valid API Key is provided"
)
data = await response.json()
_LOGGER.debug(data)
return data
class CFAuth:
"""CF Auth."""
def __init__(self, token):
"""Initialize."""
self.token = token
@property
def header(self):
"""Return auth headers."""
return {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.token}",
}
class CFRecord:
"""CFRecord."""
def __init__(self, record):
"""Initialize."""
self.record = record
@property
def record_id(self):
return self.record.get("id")
@property
def record_type(self):
return self.record.get("type")
@property
def record_name(self):
return self.record.get("name")
@property
def record_proxied(self):
return self.record.get("proxied")
@property
def record_content(self):
return self.record.get("content") | 0.719482 | 0.108425 |
import base64
import numpy as np
import io
from PIL import Image
import pandas as pd
from keras.models import load_model
from flask import request
from flask import jsonify
from flask import Flask
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import tensorflow as tf
import pymysql
import time
app = Flask(__name__)
graph = tf.get_default_graph()
def get_model():
global model
model = load_model("CNN_model6_2_2019.h5")
print("model loaded")
print("loading model")
get_model()
@app.route("/predict",methods=["get","post"])
def predict():
global graph
#get json from client
message = request.get_json(force=True)
encoded = message["image"]
#decode base64 image and convert it to np.array
decoded = base64.b64decode(encoded)
img = np.array(Image.open(io.BytesIO(decoded)))
#intialize parameter
size = 50
cnt = 0
thresh_pred = 0.85
h,w,c = img.shape
plt.imshow(img)
max_y = 0
while max_y+size < h:
max_x = 0
while max_x+size < w:
left = max_x
right = max_x+size
top = max_y
bottom = max_y + size
patch = img[top:bottom,left:right]
df = pd.DataFrame(np.ndarray.flatten(patch)).values.reshape(-1,50,50,3)
with graph.as_default():
prediction = model.predict(df)
if prediction > thresh_pred:
cnt+=1
plt.gca().add_patch(Rectangle((left,top),50,50,linewidth=1,edgecolor='r',facecolor='none'))
max_x += size-10
max_y += size-10
imgBytesIOBytes = io.BytesIO()
plt.savefig(imgBytesIOBytes,format = 'jpeg')
imgBytesIOBytes.seek(0)
encoded_imgBytesIOBytes = str(base64.b64encode(imgBytesIOBytes.read()))
response = {"predicted_image":encoded_imgBytesIOBytes[2:-1],"count":cnt}
return jsonify(response)
server = pymysql.connect(host = "localhost", user = "root", passwd = "password",db = 'malaria_diagno')
cur = server.cursor()
@app.route("/login",methods=["GET","POST"])
def login():
message = request.get_json(force=True)
user_id = message["user_id"]
password = message["password"]
try:
pswrd = cur.execute("SELECT password IN `login` WHERE User_ID==%s;",user_id)
server.commit()
if pswrd == password:
jsonify({"login_response":"You have loged in successfuly"})
else:
jsonify({"login_response":"Wrong password"})
except:
jsonify({"login_response":"Invalid Username."})
@app.route("/signup",methods=["GET","POST"])
def signup():
message = request.get_json(force=True)
user_id = message["user_id"]
name = message["name"]
age = message["age"]
gender= message["gender"]
email= message["email"]
password = message["password"]
try:
cur.execute("INSERT INTO `user`(User_ID,Name,Age,Gender,Email_ID,Password) VALUES (%s,%s,%s,%s,%s,%s);",(user_id,name,age,gender,email,password))
server.commit()
jsonify({"login_response":"Signed up sccessfully."})
except:
jsonify({"login_response":"User ID already exist."})
@app.route("/save_report",methods=["GET","POST"])
def save_report():
message = request.get_json(force=True)
report = message["report"]
user_id=
report_id=
date=
name=
age=
image_id=
gender=
try:
cur.execute("INSERT INTO `report`(Report_ID,User_ID,Report,Date,Name,Age,Image_ID,Gender) VALUES(%s,%s,%s,%s,%s,%s,%s,%s);",(report_id,user_id,report,date,name,age,image_id,gender))
server.commit()
jsonify({"save_report_response":"Report has been saved successfully!"})
except:
jsonify({"save_report_response":"Try again!"})
app.run() | predict.py | import base64
import numpy as np
import io
from PIL import Image
import pandas as pd
from keras.models import load_model
from flask import request
from flask import jsonify
from flask import Flask
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import tensorflow as tf
import pymysql
import time
app = Flask(__name__)
graph = tf.get_default_graph()
def get_model():
global model
model = load_model("CNN_model6_2_2019.h5")
print("model loaded")
print("loading model")
get_model()
@app.route("/predict",methods=["get","post"])
def predict():
global graph
#get json from client
message = request.get_json(force=True)
encoded = message["image"]
#decode base64 image and convert it to np.array
decoded = base64.b64decode(encoded)
img = np.array(Image.open(io.BytesIO(decoded)))
#intialize parameter
size = 50
cnt = 0
thresh_pred = 0.85
h,w,c = img.shape
plt.imshow(img)
max_y = 0
while max_y+size < h:
max_x = 0
while max_x+size < w:
left = max_x
right = max_x+size
top = max_y
bottom = max_y + size
patch = img[top:bottom,left:right]
df = pd.DataFrame(np.ndarray.flatten(patch)).values.reshape(-1,50,50,3)
with graph.as_default():
prediction = model.predict(df)
if prediction > thresh_pred:
cnt+=1
plt.gca().add_patch(Rectangle((left,top),50,50,linewidth=1,edgecolor='r',facecolor='none'))
max_x += size-10
max_y += size-10
imgBytesIOBytes = io.BytesIO()
plt.savefig(imgBytesIOBytes,format = 'jpeg')
imgBytesIOBytes.seek(0)
encoded_imgBytesIOBytes = str(base64.b64encode(imgBytesIOBytes.read()))
response = {"predicted_image":encoded_imgBytesIOBytes[2:-1],"count":cnt}
return jsonify(response)
server = pymysql.connect(host = "localhost", user = "root", passwd = "password",db = 'malaria_diagno')
cur = server.cursor()
@app.route("/login",methods=["GET","POST"])
def login():
message = request.get_json(force=True)
user_id = message["user_id"]
password = message["password"]
try:
pswrd = cur.execute("SELECT password IN `login` WHERE User_ID==%s;",user_id)
server.commit()
if pswrd == password:
jsonify({"login_response":"You have loged in successfuly"})
else:
jsonify({"login_response":"Wrong password"})
except:
jsonify({"login_response":"Invalid Username."})
@app.route("/signup",methods=["GET","POST"])
def signup():
message = request.get_json(force=True)
user_id = message["user_id"]
name = message["name"]
age = message["age"]
gender= message["gender"]
email= message["email"]
password = message["password"]
try:
cur.execute("INSERT INTO `user`(User_ID,Name,Age,Gender,Email_ID,Password) VALUES (%s,%s,%s,%s,%s,%s);",(user_id,name,age,gender,email,password))
server.commit()
jsonify({"login_response":"Signed up sccessfully."})
except:
jsonify({"login_response":"User ID already exist."})
@app.route("/save_report",methods=["GET","POST"])
def save_report():
message = request.get_json(force=True)
report = message["report"]
user_id=
report_id=
date=
name=
age=
image_id=
gender=
try:
cur.execute("INSERT INTO `report`(Report_ID,User_ID,Report,Date,Name,Age,Image_ID,Gender) VALUES(%s,%s,%s,%s,%s,%s,%s,%s);",(report_id,user_id,report,date,name,age,image_id,gender))
server.commit()
jsonify({"save_report_response":"Report has been saved successfully!"})
except:
jsonify({"save_report_response":"Try again!"})
app.run() | 0.199659 | 0.09401 |
import carla
import math
import numpy as np
from collections import deque
from agents.tools.misc import get_speed
import time
class VehiclePIDController:
"""
VehiclePIDController is the combination of two PID controllers (lateral and longitudinal)
"""
def __init__(self, vehicle, args_lateral=None, args_longitudinal=None):
"""
:param vehicle: actor to apply to local planner logic onto
:param args_lateral: dictionary of arguments to set the lateral PID controller
:param args_longitudinal: dictionary of arguments to set the longitudinal PID controller
"""
if not args_lateral:
args_lateral = {'K_P': 0.4, 'K_I': 0.2, 'K_D': 0.4, 'dt': 0.05, 'control_type': 'PID'}
if not args_longitudinal:
args_longitudinal = {'K_P': 1.0, 'K_I': 0.2, 'K_D': 0.6, 'dt': 0.05}
self._vehicle = vehicle
self._world = self._vehicle.get_world()
self._lon_controller = PIDLongitudinalController(self._vehicle, **args_longitudinal)
self._lat_controller = PIDLateralController(self._vehicle, **args_lateral)
def run_step(self, target_speed, waypoints, target_waypoint, current_waypoint):
"""
Execute one step of control invoking both lateral and longitudinal PID controllers to reach a target waypoint
at a given target_speed.
:param target_speed: desired vehicle speed
:param waypoint: target location encoded as a waypoint
:return: Carla.VehicleControl() instance
"""
throttle = self._lon_controller.run_step(target_speed)
steering = self._lat_controller.run_step(waypoints, target_waypoint, current_waypoint)
# throttle, steering = self._mpc.run_step(target_speed, waypoints)
control = carla.VehicleControl()
control.steer = steering
control.throttle = throttle
control.brake = 0.0
control.hand_brake = False
control.manual_gear_shift = False
return control
class PIDLongitudinalController:
"""
PIDLongitudinalController implements longitudinal control using a PID.
Speed longitudinal controller (Position longitudinal controller preferred)
"""
def __init__(self, vehicle, K_P=1.0, K_D=0.5, K_I=0.5, dt=0.05):
"""
:param vehicle: actor to apply to local planner logic onto
:param K_P: Proportional term
:param K_D: Differential term
:param K_I: Integral term
:param dt: time differential in seconds
"""
self._vehicle = vehicle
self._K_P = K_P
self._K_D = K_D
self._K_I = K_I
self._dt = dt
self._e_buffer = deque(maxlen=30)
def run_step(self, target_speed, debug=False):
"""
Execute one step of longitudinal control to reach a given target speed.
:param target_speed: target speed in Km/h
:return: throttle control in the range [0, 1]
"""
current_speed = get_speed(self._vehicle)
if debug:
print('Current speed = {}'.format(current_speed))
return self._pid_control(target_speed, current_speed)
def _pid_control(self, target_speed, current_speed):
"""
Estimate the throttle of the vehicle based on the PID equations
:param target_speed: target speed in Km/h
:param current_speed: current speed of the vehicle in Km/h
:return: throttle control in the range [0, 1]
when it is [-1, 0], it becomes brake control
"""
# speed error
_e = (target_speed - current_speed)
self._e_buffer.append(_e)
# d, i term of error
if len(self._e_buffer) >= 2:
_de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt
_ie = sum(self._e_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
# control signal
return np.clip((self._K_P * _e) + (self._K_D * _de / self._dt) + (self._K_I * _ie * self._dt), 0.0, 1.0)
class PIDLateralController:
"""
PIDLateralController implements lateral control using a PID.
Heading lateral controller (Stanley lateral controller preferred)
"""
def __init__(self, vehicle, K_P=0.5, K_D=0.5, K_I=0.2, dt=0.05, control_type='PID'):
"""
:param vehicle: actor to apply to local planner logic onto
:param K_P: Proportional term
:param K_D: Differential term
:param K_I: Integral term
:param dt: time differential in seconds
"""
self._vehicle = vehicle
self._K_P = K_P
self._K_D = K_D
self._K_I = K_I
self._dt = dt
self._e_buffer = deque(maxlen=10)
self._control_type = control_type
def run_step(self, waypoints, target_waypoint, current_waypoint):
"""
Execute one step of lateral control to steer the vehicle towards a certain waypoin.
:param waypoint: target waypoint
:return: steering control in the range [-1, 1] where:
-1 represent maximum steering to left
+1 maximum steering to right
"""
if self._control_type=='PID':
return self._pid_control(target_waypoint, self._vehicle.get_transform())
else:
return self._stanley_control(target_waypoint, current_waypoint, self._vehicle.get_transform())
def _pid_control(self, waypoint, vehicle_transform):
"""
Estimate the steering angle of the vehicle based on the PID equations
:param waypoint: target waypoint
:param vehicle_transform: current transform of the vehicle
:return: steering control in the range [-1, 1]
"""
# print(" ")
# print("================= PID Control ======================")
v_begin = vehicle_transform.location
v_end = v_begin + carla.Location(x=math.cos(math.radians(vehicle_transform.rotation.yaw)),
y=math.sin(math.radians(vehicle_transform.rotation.yaw)))
v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])
w_vec = np.array([waypoint.transform.location.x -
v_begin.x, waypoint.transform.location.y -
v_begin.y, 0.0])
_dot = math.acos(np.clip(np.dot(w_vec, v_vec) /
(np.linalg.norm(w_vec) * np.linalg.norm(v_vec)), -1.0, 1.0))
_cross = np.cross(v_vec, w_vec)
if _cross[2] < 0:
_dot *= -1.0
# _dot should range from -pi to pi
if _dot > 1.5708:
_dot = -(math.pi - _dot)
elif _dot < -1.5708:
_dot = math.pi + _dot
self._e_buffer.append(_dot)
if len(self._e_buffer) >= 2:
_de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt
_ie = sum(self._e_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._K_P * _dot) + (self._K_D * _de /
self._dt) + (self._K_I * _ie * self._dt), -1.0, 1.0)
def _stanley_control(self, target_waypoint, current_waypoint, vehicle_transform):
"""
Estimate the steering angle of the vehicle based on the PID equations
:param waypoint: target waypoint
:param vehicle_transform: current transform of the vehicle
:return: steering control in the range [-1, 1]
"""
# heading error
# print(" ")
# print("================= Stanley ======================")
yaw_path = np.arctan2(target_waypoint.transform.location.y-current_waypoint.transform.location.y, target_waypoint.transform.location.x - current_waypoint.transform.location.x)
v_begin = vehicle_transform.location
v_end = v_begin + carla.Location(x=math.cos(math.radians(vehicle_transform.rotation.yaw)),
y=math.sin(math.radians(vehicle_transform.rotation.yaw)))
# vehicle heading vector
v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])
yaw_vehicle = np.arctan2(v_vec[1], v_vec[0])
yaw_diff = yaw_path - yaw_vehicle
# Wrapping the yaw_diff
if yaw_diff > np.pi:
yaw_diff -= 2 * np.pi
if yaw_diff < - np.pi:
yaw_diff += 2 * np.pi
# Calculate cross-track error
cross_err_current = (v_begin.x - current_waypoint.transform.location.x)**2 + (v_begin.y - current_waypoint.transform.location.y)**2
cross_err_target = (v_begin.x - target_waypoint.transform.location.x)**2 + (v_begin.y - target_waypoint.transform.location.y)**2
crosstrack_error = np.min([cross_err_current, cross_err_target])
yaw_cross_track = np.arctan2(v_begin.y-target_waypoint.transform.location.y, v_begin.x-target_waypoint.transform.location.x)
yaw_path2ct = yaw_path - yaw_cross_track
if yaw_path2ct > np.pi:
yaw_path2ct -= 2 * np.pi
if yaw_path2ct < - np.pi:
yaw_path2ct += 2 * np.pi
if yaw_path2ct > 0:
crosstrack_error = abs(crosstrack_error)
else:
crosstrack_error = -abs(crosstrack_error)
v = get_speed(self._vehicle)
k_e = 3
k_v = 1
#print("crosstrack_error: ", crosstrack_error)
yaw_diff_crosstrack = np.arctan(k_e * crosstrack_error / (k_v + v))
steer_expect = yaw_diff + yaw_diff_crosstrack
steer_expect = min(2, steer_expect)
steer_expect = max(-2, steer_expect)
if steer_expect > np.pi:
steer_expect -= 2 * np.pi
if steer_expect < - np.pi:
steer_expect += 2 * np.pi
#print("steer expect: ", steer_expect)
return steer_expect | agents/navigation/pid_controller.py |
import carla
import math
import numpy as np
from collections import deque
from agents.tools.misc import get_speed
import time
class VehiclePIDController:
"""
VehiclePIDController is the combination of two PID controllers (lateral and longitudinal)
"""
def __init__(self, vehicle, args_lateral=None, args_longitudinal=None):
"""
:param vehicle: actor to apply to local planner logic onto
:param args_lateral: dictionary of arguments to set the lateral PID controller
:param args_longitudinal: dictionary of arguments to set the longitudinal PID controller
"""
if not args_lateral:
args_lateral = {'K_P': 0.4, 'K_I': 0.2, 'K_D': 0.4, 'dt': 0.05, 'control_type': 'PID'}
if not args_longitudinal:
args_longitudinal = {'K_P': 1.0, 'K_I': 0.2, 'K_D': 0.6, 'dt': 0.05}
self._vehicle = vehicle
self._world = self._vehicle.get_world()
self._lon_controller = PIDLongitudinalController(self._vehicle, **args_longitudinal)
self._lat_controller = PIDLateralController(self._vehicle, **args_lateral)
def run_step(self, target_speed, waypoints, target_waypoint, current_waypoint):
"""
Execute one step of control invoking both lateral and longitudinal PID controllers to reach a target waypoint
at a given target_speed.
:param target_speed: desired vehicle speed
:param waypoint: target location encoded as a waypoint
:return: Carla.VehicleControl() instance
"""
throttle = self._lon_controller.run_step(target_speed)
steering = self._lat_controller.run_step(waypoints, target_waypoint, current_waypoint)
# throttle, steering = self._mpc.run_step(target_speed, waypoints)
control = carla.VehicleControl()
control.steer = steering
control.throttle = throttle
control.brake = 0.0
control.hand_brake = False
control.manual_gear_shift = False
return control
class PIDLongitudinalController:
"""
PIDLongitudinalController implements longitudinal control using a PID.
Speed longitudinal controller (Position longitudinal controller preferred)
"""
def __init__(self, vehicle, K_P=1.0, K_D=0.5, K_I=0.5, dt=0.05):
"""
:param vehicle: actor to apply to local planner logic onto
:param K_P: Proportional term
:param K_D: Differential term
:param K_I: Integral term
:param dt: time differential in seconds
"""
self._vehicle = vehicle
self._K_P = K_P
self._K_D = K_D
self._K_I = K_I
self._dt = dt
self._e_buffer = deque(maxlen=30)
def run_step(self, target_speed, debug=False):
"""
Execute one step of longitudinal control to reach a given target speed.
:param target_speed: target speed in Km/h
:return: throttle control in the range [0, 1]
"""
current_speed = get_speed(self._vehicle)
if debug:
print('Current speed = {}'.format(current_speed))
return self._pid_control(target_speed, current_speed)
def _pid_control(self, target_speed, current_speed):
"""
Estimate the throttle of the vehicle based on the PID equations
:param target_speed: target speed in Km/h
:param current_speed: current speed of the vehicle in Km/h
:return: throttle control in the range [0, 1]
when it is [-1, 0], it becomes brake control
"""
# speed error
_e = (target_speed - current_speed)
self._e_buffer.append(_e)
# d, i term of error
if len(self._e_buffer) >= 2:
_de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt
_ie = sum(self._e_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
# control signal
return np.clip((self._K_P * _e) + (self._K_D * _de / self._dt) + (self._K_I * _ie * self._dt), 0.0, 1.0)
class PIDLateralController:
"""
PIDLateralController implements lateral control using a PID.
Heading lateral controller (Stanley lateral controller preferred)
"""
def __init__(self, vehicle, K_P=0.5, K_D=0.5, K_I=0.2, dt=0.05, control_type='PID'):
"""
:param vehicle: actor to apply to local planner logic onto
:param K_P: Proportional term
:param K_D: Differential term
:param K_I: Integral term
:param dt: time differential in seconds
"""
self._vehicle = vehicle
self._K_P = K_P
self._K_D = K_D
self._K_I = K_I
self._dt = dt
self._e_buffer = deque(maxlen=10)
self._control_type = control_type
def run_step(self, waypoints, target_waypoint, current_waypoint):
"""
Execute one step of lateral control to steer the vehicle towards a certain waypoin.
:param waypoint: target waypoint
:return: steering control in the range [-1, 1] where:
-1 represent maximum steering to left
+1 maximum steering to right
"""
if self._control_type=='PID':
return self._pid_control(target_waypoint, self._vehicle.get_transform())
else:
return self._stanley_control(target_waypoint, current_waypoint, self._vehicle.get_transform())
def _pid_control(self, waypoint, vehicle_transform):
"""
Estimate the steering angle of the vehicle based on the PID equations
:param waypoint: target waypoint
:param vehicle_transform: current transform of the vehicle
:return: steering control in the range [-1, 1]
"""
# print(" ")
# print("================= PID Control ======================")
v_begin = vehicle_transform.location
v_end = v_begin + carla.Location(x=math.cos(math.radians(vehicle_transform.rotation.yaw)),
y=math.sin(math.radians(vehicle_transform.rotation.yaw)))
v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])
w_vec = np.array([waypoint.transform.location.x -
v_begin.x, waypoint.transform.location.y -
v_begin.y, 0.0])
_dot = math.acos(np.clip(np.dot(w_vec, v_vec) /
(np.linalg.norm(w_vec) * np.linalg.norm(v_vec)), -1.0, 1.0))
_cross = np.cross(v_vec, w_vec)
if _cross[2] < 0:
_dot *= -1.0
# _dot should range from -pi to pi
if _dot > 1.5708:
_dot = -(math.pi - _dot)
elif _dot < -1.5708:
_dot = math.pi + _dot
self._e_buffer.append(_dot)
if len(self._e_buffer) >= 2:
_de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt
_ie = sum(self._e_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._K_P * _dot) + (self._K_D * _de /
self._dt) + (self._K_I * _ie * self._dt), -1.0, 1.0)
def _stanley_control(self, target_waypoint, current_waypoint, vehicle_transform):
"""
Estimate the steering angle of the vehicle based on the PID equations
:param waypoint: target waypoint
:param vehicle_transform: current transform of the vehicle
:return: steering control in the range [-1, 1]
"""
# heading error
# print(" ")
# print("================= Stanley ======================")
yaw_path = np.arctan2(target_waypoint.transform.location.y-current_waypoint.transform.location.y, target_waypoint.transform.location.x - current_waypoint.transform.location.x)
v_begin = vehicle_transform.location
v_end = v_begin + carla.Location(x=math.cos(math.radians(vehicle_transform.rotation.yaw)),
y=math.sin(math.radians(vehicle_transform.rotation.yaw)))
# vehicle heading vector
v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])
yaw_vehicle = np.arctan2(v_vec[1], v_vec[0])
yaw_diff = yaw_path - yaw_vehicle
# Wrapping the yaw_diff
if yaw_diff > np.pi:
yaw_diff -= 2 * np.pi
if yaw_diff < - np.pi:
yaw_diff += 2 * np.pi
# Calculate cross-track error
cross_err_current = (v_begin.x - current_waypoint.transform.location.x)**2 + (v_begin.y - current_waypoint.transform.location.y)**2
cross_err_target = (v_begin.x - target_waypoint.transform.location.x)**2 + (v_begin.y - target_waypoint.transform.location.y)**2
crosstrack_error = np.min([cross_err_current, cross_err_target])
yaw_cross_track = np.arctan2(v_begin.y-target_waypoint.transform.location.y, v_begin.x-target_waypoint.transform.location.x)
yaw_path2ct = yaw_path - yaw_cross_track
if yaw_path2ct > np.pi:
yaw_path2ct -= 2 * np.pi
if yaw_path2ct < - np.pi:
yaw_path2ct += 2 * np.pi
if yaw_path2ct > 0:
crosstrack_error = abs(crosstrack_error)
else:
crosstrack_error = -abs(crosstrack_error)
v = get_speed(self._vehicle)
k_e = 3
k_v = 1
#print("crosstrack_error: ", crosstrack_error)
yaw_diff_crosstrack = np.arctan(k_e * crosstrack_error / (k_v + v))
steer_expect = yaw_diff + yaw_diff_crosstrack
steer_expect = min(2, steer_expect)
steer_expect = max(-2, steer_expect)
if steer_expect > np.pi:
steer_expect -= 2 * np.pi
if steer_expect < - np.pi:
steer_expect += 2 * np.pi
#print("steer expect: ", steer_expect)
return steer_expect | 0.741955 | 0.345975 |
import torch.nn as nn
import dgl
from net.blocks import MLPReadout
from net.layer import GraphTransformerLayer
class GraphTransformerNet(nn.Module):
def __init__(self, net_params):
super().__init__()
num_atom_features = net_params['num_atom_features']
num_edge_input_dim = net_params['num_edge_input_dim']
hidden_dim = net_params['hidden_dim']
num_heads = net_params['n_heads']
out_dim = net_params['out_dim']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
mlp_dropout = net_params['mlp_dropout']
n_layers = net_params['L']
pos_enc_dim = net_params['pos_enc_dim']
type_loss = net_params['type_loss']
self.readout = net_params['readout']
self.layer_norm = net_params['layer_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.embedding_lap_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(num_atom_features, hidden_dim)
self.embedding_e = nn.Linear(num_edge_input_dim, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphTransformerLayer(hidden_dim, hidden_dim, num_heads, dropout,
self.layer_norm, self.batch_norm, self.residual) for _ in
range(n_layers - 1)])
self.layers.append(
GraphTransformerLayer(hidden_dim, out_dim, num_heads, dropout, self.layer_norm, self.batch_norm,
self.residual))
self.MLP_layer = MLPReadout(out_dim, 1, drop=mlp_dropout) # 1 out dim since regression problem
if type_loss == "MSE":
self.func_loss = nn.MSELoss()
elif type_loss == "MAE":
self.func_loss = nn.L1Loss()
def forward(self, g, h, e, h_lap_pos_enc):
# input embedding
# Node Embedding and Positional Encoding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
h_lap_pos_enc = self.embedding_lap_pos_enc(h_lap_pos_enc.float())
h = h + h_lap_pos_enc
# Edge Embedding
e = self.embedding_e(e)
# convnets
for conv in self.layers:
h, e = conv(g, h, e)
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h')
return self.MLP_layer(hg)
def loss(self, scores, targets):
return self.func_loss(scores.float(), targets.float()) | net/model.py | import torch.nn as nn
import dgl
from net.blocks import MLPReadout
from net.layer import GraphTransformerLayer
class GraphTransformerNet(nn.Module):
def __init__(self, net_params):
super().__init__()
num_atom_features = net_params['num_atom_features']
num_edge_input_dim = net_params['num_edge_input_dim']
hidden_dim = net_params['hidden_dim']
num_heads = net_params['n_heads']
out_dim = net_params['out_dim']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
mlp_dropout = net_params['mlp_dropout']
n_layers = net_params['L']
pos_enc_dim = net_params['pos_enc_dim']
type_loss = net_params['type_loss']
self.readout = net_params['readout']
self.layer_norm = net_params['layer_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.embedding_lap_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(num_atom_features, hidden_dim)
self.embedding_e = nn.Linear(num_edge_input_dim, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphTransformerLayer(hidden_dim, hidden_dim, num_heads, dropout,
self.layer_norm, self.batch_norm, self.residual) for _ in
range(n_layers - 1)])
self.layers.append(
GraphTransformerLayer(hidden_dim, out_dim, num_heads, dropout, self.layer_norm, self.batch_norm,
self.residual))
self.MLP_layer = MLPReadout(out_dim, 1, drop=mlp_dropout) # 1 out dim since regression problem
if type_loss == "MSE":
self.func_loss = nn.MSELoss()
elif type_loss == "MAE":
self.func_loss = nn.L1Loss()
def forward(self, g, h, e, h_lap_pos_enc):
# input embedding
# Node Embedding and Positional Encoding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
h_lap_pos_enc = self.embedding_lap_pos_enc(h_lap_pos_enc.float())
h = h + h_lap_pos_enc
# Edge Embedding
e = self.embedding_e(e)
# convnets
for conv in self.layers:
h, e = conv(g, h, e)
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h')
return self.MLP_layer(hg)
def loss(self, scores, targets):
return self.func_loss(scores.float(), targets.float()) | 0.925949 | 0.303833 |
import dataclasses
import os
from typing import Any, Dict, Optional
import yahp as hp
from composer.utils.libcloud_object_store import LibcloudObjectStore
@dataclasses.dataclass
class LibcloudObjectStoreHparams(hp.Hparams):
""":class:`~.LibcloudObjectStore` hyperparameters.
.. rubric:: Example
Here's an example on how to connect to an Amazon S3 bucket. This example assumes:
* The container is named named ``MY_CONTAINER``.
* The AWS Access Key ID is stored in an environment variable named ``AWS_ACCESS_KEY_ID``.
* The Secret Access Key is in an environmental variable named ``AWS_SECRET_ACCESS_KEY``.
.. testsetup:: composer.utils.libcloud_object_store.LibcloudObjectStoreHparams.__init__.s3
import os
os.environ["AWS_ACCESS_KEY_ID"] = "key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "secret"
.. doctest:: composer.utils.libcloud_object_store.LibcloudObjectStoreHparams.__init__.s3
>>> from composer.utils.libcloud_object_store_hparams import LibcloudObjectStoreHparams
>>> provider_hparams = LibcloudObjectStoreHparams(
... provider="s3",
... container="MY_CONTAINER",
... key_environ="AWS_ACCESS_KEY_ID",
... secret_environ="AWS_SECRET_ACCESS_KEY",
... )
>>> provider = provider_hparams.initialize_object()
>>> provider
<composer.utils.libcloud_object_store.LibcloudObjectStore object at ...>
Args:
provider (str): Cloud provider to use.
See :class:`LibcloudObjectStore` for documentation.
container (str): The name of the container (i.e. bucket) to use.
key_environ (str, optional): The name of an environment variable containing the API key or username
to use to connect to the provider. If no key is required, then set this field to ``None``.
(default: ``None``)
For security reasons, composer requires that the key be specified via an environment variable.
For example, if your key is an environment variable called ``OBJECT_STORE_KEY`` that is set to ``MY_KEY``,
then you should set this parameter equal to ``OBJECT_STORE_KEY``. Composer will read the key like this:
.. testsetup:: composer.utils.libcloud_object_store.LibcloudObjectStoreHparams.__init__.key
import os
import functools
from composer.utils.libcloud_object_store_hparams import LibcloudObjectStoreHparams
os.environ["OBJECT_STORE_KEY"] = "MY_KEY"
LibcloudObjectStoreHparams = functools.partial(LibcloudObjectStoreHparams, provider="s3", container="container")
.. doctest:: composer.utils.libcloud_object_store.LibcloudObjectStoreHparams.__init__.key
>>> import os
>>> params = LibcloudObjectStoreHparams(key_environ="OBJECT_STORE_KEY")
>>> key = os.environ[params.key_environ]
>>> key
'MY_KEY'
secret_environ (str, optional): The name of an environment variable containing the API secret or password
to use for the provider. If no secret is required, then set this field to ``None``. (default: ``None``)
For security reasons, composer requires that the secret be specified via an environment variable.
For example, if your secret is an environment variable called ``OBJECT_STORE_SECRET`` that is set to ``MY_SECRET``,
then you should set this parameter equal to ``OBJECT_STORE_SECRET``. Composer will read the secret like this:
.. testsetup:: composer.utils.libcloud_object_store.LibcloudObjectStoreHparams.__init__.secret
import os
import functools
from composer.utils.libcloud_object_store_hparams import LibcloudObjectStoreHparams
original_secret = os.environ.get("OBJECT_STORE_SECRET")
os.environ["OBJECT_STORE_SECRET"] = "MY_SECRET"
LibcloudObjectStoreHparams = functools.partial(LibcloudObjectStoreHparams, provider="s3", container="container")
.. doctest:: composer.utils.libcloud_object_store.LibcloudObjectStoreHparams.__init__.secret
>>> import os
>>> params = LibcloudObjectStoreHparams(secret_environ="OBJECT_STORE_SECRET")
>>> secret = os.environ[params.secret_environ]
>>> secret
'MY_SECRET'
region (str, optional): Cloud region to use for the cloud provider.
Most providers do not require the region to be specified. (default: ``None``)
host (str, optional): Override the hostname for the cloud provider. (default: ``None``)
port (int, optional): Override the port for the cloud provider. (default: ``None``)
extra_init_kwargs (Dict[str, Any], optional): Extra keyword arguments to pass into the constructor
for the specified provider. (default: ``None``, which is equivalent to an empty dictionary)
.. seealso:: :class:`libcloud.storage.base.StorageDriver`
"""
provider: str = hp.auto(LibcloudObjectStore, "provider")
container: str = hp.auto(LibcloudObjectStore, "container")
key_environ: Optional[str] = hp.optional(("The name of an environment variable containing "
"an API key or username to use to connect to the provider."),
default=None)
secret_environ: Optional[str] = hp.optional(("The name of an environment variable containing "
"an API secret or password to use to connect to the provider."),
default=None)
region: Optional[str] = hp.optional("Cloud region to use", default=None)
host: Optional[str] = hp.optional("Override hostname for connections", default=None)
port: Optional[int] = hp.optional("Override port for connections", default=None)
extra_init_kwargs: Dict[str, Any] = hp.optional(
"Extra keyword arguments to pass into the constructor for the specified provider.", default_factory=dict)
def get_provider_kwargs(self) -> Dict[str, Any]:
"""Returns the ``provider_kwargs`` argument, which is used to construct a :class:`.LibcloudObjectStore`.
Returns:
Dict[str, Any]: The ``provider_kwargs`` for use in constructing an :class:`.LibcloudObjectStore`.
"""
init_kwargs = {}
for key in ("host", "port", "region"):
kwarg = getattr(self, key)
if getattr(self, key) is not None:
init_kwargs[key] = kwarg
init_kwargs["key"] = None if self.key_environ is None else os.environ[self.key_environ]
init_kwargs["secret"] = None if self.secret_environ is None else os.environ[self.secret_environ]
init_kwargs.update(self.extra_init_kwargs)
return init_kwargs
def initialize_object(self):
"""Returns an instance of :class:`.LibcloudObjectStore`.
Returns:
LibcloudObjectStore: The object_store.
"""
return LibcloudObjectStore(
provider=self.provider,
container=self.container,
provider_kwargs=self.get_provider_kwargs(),
) | composer/utils/libcloud_object_store_hparams.py | import dataclasses
import os
from typing import Any, Dict, Optional
import yahp as hp
from composer.utils.libcloud_object_store import LibcloudObjectStore
@dataclasses.dataclass
class LibcloudObjectStoreHparams(hp.Hparams):
""":class:`~.LibcloudObjectStore` hyperparameters.
.. rubric:: Example
Here's an example on how to connect to an Amazon S3 bucket. This example assumes:
* The container is named named ``MY_CONTAINER``.
* The AWS Access Key ID is stored in an environment variable named ``AWS_ACCESS_KEY_ID``.
* The Secret Access Key is in an environmental variable named ``AWS_SECRET_ACCESS_KEY``.
.. testsetup:: composer.utils.libcloud_object_store.LibcloudObjectStoreHparams.__init__.s3
import os
os.environ["AWS_ACCESS_KEY_ID"] = "key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "secret"
.. doctest:: composer.utils.libcloud_object_store.LibcloudObjectStoreHparams.__init__.s3
>>> from composer.utils.libcloud_object_store_hparams import LibcloudObjectStoreHparams
>>> provider_hparams = LibcloudObjectStoreHparams(
... provider="s3",
... container="MY_CONTAINER",
... key_environ="AWS_ACCESS_KEY_ID",
... secret_environ="AWS_SECRET_ACCESS_KEY",
... )
>>> provider = provider_hparams.initialize_object()
>>> provider
<composer.utils.libcloud_object_store.LibcloudObjectStore object at ...>
Args:
provider (str): Cloud provider to use.
See :class:`LibcloudObjectStore` for documentation.
container (str): The name of the container (i.e. bucket) to use.
key_environ (str, optional): The name of an environment variable containing the API key or username
to use to connect to the provider. If no key is required, then set this field to ``None``.
(default: ``None``)
For security reasons, composer requires that the key be specified via an environment variable.
For example, if your key is an environment variable called ``OBJECT_STORE_KEY`` that is set to ``MY_KEY``,
then you should set this parameter equal to ``OBJECT_STORE_KEY``. Composer will read the key like this:
.. testsetup:: composer.utils.libcloud_object_store.LibcloudObjectStoreHparams.__init__.key
import os
import functools
from composer.utils.libcloud_object_store_hparams import LibcloudObjectStoreHparams
os.environ["OBJECT_STORE_KEY"] = "MY_KEY"
LibcloudObjectStoreHparams = functools.partial(LibcloudObjectStoreHparams, provider="s3", container="container")
.. doctest:: composer.utils.libcloud_object_store.LibcloudObjectStoreHparams.__init__.key
>>> import os
>>> params = LibcloudObjectStoreHparams(key_environ="OBJECT_STORE_KEY")
>>> key = os.environ[params.key_environ]
>>> key
'MY_KEY'
secret_environ (str, optional): The name of an environment variable containing the API secret or password
to use for the provider. If no secret is required, then set this field to ``None``. (default: ``None``)
For security reasons, composer requires that the secret be specified via an environment variable.
For example, if your secret is an environment variable called ``OBJECT_STORE_SECRET`` that is set to ``MY_SECRET``,
then you should set this parameter equal to ``OBJECT_STORE_SECRET``. Composer will read the secret like this:
.. testsetup:: composer.utils.libcloud_object_store.LibcloudObjectStoreHparams.__init__.secret
import os
import functools
from composer.utils.libcloud_object_store_hparams import LibcloudObjectStoreHparams
original_secret = os.environ.get("OBJECT_STORE_SECRET")
os.environ["OBJECT_STORE_SECRET"] = "MY_SECRET"
LibcloudObjectStoreHparams = functools.partial(LibcloudObjectStoreHparams, provider="s3", container="container")
.. doctest:: composer.utils.libcloud_object_store.LibcloudObjectStoreHparams.__init__.secret
>>> import os
>>> params = LibcloudObjectStoreHparams(secret_environ="OBJECT_STORE_SECRET")
>>> secret = os.environ[params.secret_environ]
>>> secret
'MY_SECRET'
region (str, optional): Cloud region to use for the cloud provider.
Most providers do not require the region to be specified. (default: ``None``)
host (str, optional): Override the hostname for the cloud provider. (default: ``None``)
port (int, optional): Override the port for the cloud provider. (default: ``None``)
extra_init_kwargs (Dict[str, Any], optional): Extra keyword arguments to pass into the constructor
for the specified provider. (default: ``None``, which is equivalent to an empty dictionary)
.. seealso:: :class:`libcloud.storage.base.StorageDriver`
"""
provider: str = hp.auto(LibcloudObjectStore, "provider")
container: str = hp.auto(LibcloudObjectStore, "container")
key_environ: Optional[str] = hp.optional(("The name of an environment variable containing "
"an API key or username to use to connect to the provider."),
default=None)
secret_environ: Optional[str] = hp.optional(("The name of an environment variable containing "
"an API secret or password to use to connect to the provider."),
default=None)
region: Optional[str] = hp.optional("Cloud region to use", default=None)
host: Optional[str] = hp.optional("Override hostname for connections", default=None)
port: Optional[int] = hp.optional("Override port for connections", default=None)
extra_init_kwargs: Dict[str, Any] = hp.optional(
"Extra keyword arguments to pass into the constructor for the specified provider.", default_factory=dict)
def get_provider_kwargs(self) -> Dict[str, Any]:
"""Returns the ``provider_kwargs`` argument, which is used to construct a :class:`.LibcloudObjectStore`.
Returns:
Dict[str, Any]: The ``provider_kwargs`` for use in constructing an :class:`.LibcloudObjectStore`.
"""
init_kwargs = {}
for key in ("host", "port", "region"):
kwarg = getattr(self, key)
if getattr(self, key) is not None:
init_kwargs[key] = kwarg
init_kwargs["key"] = None if self.key_environ is None else os.environ[self.key_environ]
init_kwargs["secret"] = None if self.secret_environ is None else os.environ[self.secret_environ]
init_kwargs.update(self.extra_init_kwargs)
return init_kwargs
def initialize_object(self):
"""Returns an instance of :class:`.LibcloudObjectStore`.
Returns:
LibcloudObjectStore: The object_store.
"""
return LibcloudObjectStore(
provider=self.provider,
container=self.container,
provider_kwargs=self.get_provider_kwargs(),
) | 0.808672 | 0.171408 |
from rest_framework import serializers
from website.models import Monster, MonsterBase, MonsterFamily, Rune, RuneSet, Artifact, SiegeRecord, DungeonRun
class RuneFullSerializer(serializers.ModelSerializer):
quality = serializers.CharField(source='get_quality_display')
quality_original = serializers.CharField(
source='get_quality_original_display')
rune_set = serializers.StringRelatedField()
primary = serializers.CharField(source='get_primary_display')
innate = serializers.SerializerMethodField()
innate_value = serializers.SerializerMethodField()
substats = serializers.SerializerMethodField()
image = serializers.SerializerMethodField()
stars = serializers.SerializerMethodField()
ancient = serializers.SerializerMethodField()
class Meta:
model = Rune
fields = [
'id', 'slot', 'quality', 'quality_original', 'stars', 'rune_set', 'upgrade_curr', 'primary',
'primary_value', 'innate', 'innate_value', 'substats', 'efficiency', 'efficiency_max',
'equipped', 'equipped_rta', 'locked', 'image', 'ancient'
]
def get_image(self, obj):
return obj.get_full_image()
def get_innate(self, obj):
disp = obj.get_innate_display()
return disp if disp != 0 else None
def get_innate_value(self, obj):
disp = obj.get_innate_display()
return obj.innate_value if disp != 0 else None
def get_substats(self, obj):
return obj.get_substats_row()
def get_stars(self, obj):
return obj.stars % 10
def get_ancient(self, obj):
return obj.is_ancient()
class RuneSerializer(serializers.ModelSerializer):
quality = serializers.CharField(source='get_quality_display')
quality_original = serializers.CharField(
source='get_quality_original_display')
rune_set = serializers.StringRelatedField()
level = serializers.IntegerField(source='upgrade_curr')
primary = serializers.CharField(source='get_primary_display')
innate = serializers.CharField(source='get_innate_display')
substats = serializers.SerializerMethodField()
image = serializers.SerializerMethodField()
ancient = serializers.SerializerMethodField()
class Meta:
model = Rune
fields = [
'id', 'slot', 'quality', 'quality_original', 'stars', 'rune_set', 'level', 'primary',
'primary_value', 'innate', 'innate_value', 'substats', 'efficiency', 'efficiency_max',
'equipped', 'equipped_rta', 'locked', 'image', 'ancient'
]
def get_image(self, obj):
return obj.get_image()
def get_substats(self, obj):
return obj.get_substats()
def get_ancient(self, obj):
return obj.is_ancient()
class ArtifactSerializer(serializers.ModelSerializer):
rtype = serializers.SerializerMethodField()
primary = serializers.CharField(source='get_primary_display')
substats = serializers.SerializerMethodField()
quality = serializers.CharField(source='get_quality_display')
quality_original = serializers.CharField(
source='get_quality_original_display')
image = serializers.SerializerMethodField()
class Meta:
model = Artifact
fields = [
'id', 'rtype', 'level', 'primary', 'primary_value', 'substats', 'quality', 'quality_original',
'efficiency', 'efficiency_max', 'equipped', 'equipped_rta', 'locked', 'image'
]
def get_rtype(self, obj):
return obj.get_slot_type()
def get_substats(self, obj):
return obj.get_substats_with_values()
def get_image(self, obj):
return obj.get_image()
class MonsterBaseSerializer(serializers.ModelSerializer):
image = serializers.SerializerMethodField()
family = serializers.StringRelatedField()
attribute = serializers.CharField(source='get_attribute_display')
archetype = serializers.CharField(source='get_archetype_display')
awaken = serializers.CharField(source='get_awaken_display')
class Meta:
model = MonsterBase
fields = [
'id', 'family', 'base_class', 'name', 'attribute', 'archetype', 'max_skills', 'awaken', 'image',
]
def get_image(self, obj):
return obj.get_image()
class MonsterSerializer(serializers.ModelSerializer):
image = serializers.SerializerMethodField()
base_monster = MonsterBaseSerializer()
runes = RuneSerializer(many=True)
runes_rta = RuneSerializer(many=True)
artifacts = ArtifactSerializer(many=True)
artifacts_rta = ArtifactSerializer(many=True)
class Meta:
model = Monster
fields = [
'id', 'base_monster', 'level', 'stars', 'hp', 'attack', 'defense', 'speed',
'res', 'acc', 'crit_rate', 'crit_dmg', 'avg_eff_total', 'eff_hp',
'skills', 'runes', 'runes_rta', 'artifacts', 'artifacts_rta', 'created', 'image',
]
def get_image(self, obj):
return obj.get_image()
class MonsterImageSerializer(serializers.ModelSerializer):
image = serializers.SerializerMethodField()
class Meta:
model = Monster
fields = [
'id', 'image',
]
def get_image(self, obj):
return obj.get_image()
class SiegeSerializer(serializers.ModelSerializer):
monsters = MonsterSerializer(many=True)
leader = MonsterSerializer()
ranking = serializers.SerializerMethodField()
class Meta:
model = SiegeRecord
fields = ['monsters', 'leader', 'win', 'lose', 'ratio', 'ranking']
def get_ranking(self, obj):
return obj.wizard.guild.get_siege_ranking_display() if obj.wizard.guild else "Unknown" | swstats_web/serializers.py | from rest_framework import serializers
from website.models import Monster, MonsterBase, MonsterFamily, Rune, RuneSet, Artifact, SiegeRecord, DungeonRun
class RuneFullSerializer(serializers.ModelSerializer):
quality = serializers.CharField(source='get_quality_display')
quality_original = serializers.CharField(
source='get_quality_original_display')
rune_set = serializers.StringRelatedField()
primary = serializers.CharField(source='get_primary_display')
innate = serializers.SerializerMethodField()
innate_value = serializers.SerializerMethodField()
substats = serializers.SerializerMethodField()
image = serializers.SerializerMethodField()
stars = serializers.SerializerMethodField()
ancient = serializers.SerializerMethodField()
class Meta:
model = Rune
fields = [
'id', 'slot', 'quality', 'quality_original', 'stars', 'rune_set', 'upgrade_curr', 'primary',
'primary_value', 'innate', 'innate_value', 'substats', 'efficiency', 'efficiency_max',
'equipped', 'equipped_rta', 'locked', 'image', 'ancient'
]
def get_image(self, obj):
return obj.get_full_image()
def get_innate(self, obj):
disp = obj.get_innate_display()
return disp if disp != 0 else None
def get_innate_value(self, obj):
disp = obj.get_innate_display()
return obj.innate_value if disp != 0 else None
def get_substats(self, obj):
return obj.get_substats_row()
def get_stars(self, obj):
return obj.stars % 10
def get_ancient(self, obj):
return obj.is_ancient()
class RuneSerializer(serializers.ModelSerializer):
quality = serializers.CharField(source='get_quality_display')
quality_original = serializers.CharField(
source='get_quality_original_display')
rune_set = serializers.StringRelatedField()
level = serializers.IntegerField(source='upgrade_curr')
primary = serializers.CharField(source='get_primary_display')
innate = serializers.CharField(source='get_innate_display')
substats = serializers.SerializerMethodField()
image = serializers.SerializerMethodField()
ancient = serializers.SerializerMethodField()
class Meta:
model = Rune
fields = [
'id', 'slot', 'quality', 'quality_original', 'stars', 'rune_set', 'level', 'primary',
'primary_value', 'innate', 'innate_value', 'substats', 'efficiency', 'efficiency_max',
'equipped', 'equipped_rta', 'locked', 'image', 'ancient'
]
def get_image(self, obj):
return obj.get_image()
def get_substats(self, obj):
return obj.get_substats()
def get_ancient(self, obj):
return obj.is_ancient()
class ArtifactSerializer(serializers.ModelSerializer):
rtype = serializers.SerializerMethodField()
primary = serializers.CharField(source='get_primary_display')
substats = serializers.SerializerMethodField()
quality = serializers.CharField(source='get_quality_display')
quality_original = serializers.CharField(
source='get_quality_original_display')
image = serializers.SerializerMethodField()
class Meta:
model = Artifact
fields = [
'id', 'rtype', 'level', 'primary', 'primary_value', 'substats', 'quality', 'quality_original',
'efficiency', 'efficiency_max', 'equipped', 'equipped_rta', 'locked', 'image'
]
def get_rtype(self, obj):
return obj.get_slot_type()
def get_substats(self, obj):
return obj.get_substats_with_values()
def get_image(self, obj):
return obj.get_image()
class MonsterBaseSerializer(serializers.ModelSerializer):
image = serializers.SerializerMethodField()
family = serializers.StringRelatedField()
attribute = serializers.CharField(source='get_attribute_display')
archetype = serializers.CharField(source='get_archetype_display')
awaken = serializers.CharField(source='get_awaken_display')
class Meta:
model = MonsterBase
fields = [
'id', 'family', 'base_class', 'name', 'attribute', 'archetype', 'max_skills', 'awaken', 'image',
]
def get_image(self, obj):
return obj.get_image()
class MonsterSerializer(serializers.ModelSerializer):
image = serializers.SerializerMethodField()
base_monster = MonsterBaseSerializer()
runes = RuneSerializer(many=True)
runes_rta = RuneSerializer(many=True)
artifacts = ArtifactSerializer(many=True)
artifacts_rta = ArtifactSerializer(many=True)
class Meta:
model = Monster
fields = [
'id', 'base_monster', 'level', 'stars', 'hp', 'attack', 'defense', 'speed',
'res', 'acc', 'crit_rate', 'crit_dmg', 'avg_eff_total', 'eff_hp',
'skills', 'runes', 'runes_rta', 'artifacts', 'artifacts_rta', 'created', 'image',
]
def get_image(self, obj):
return obj.get_image()
class MonsterImageSerializer(serializers.ModelSerializer):
image = serializers.SerializerMethodField()
class Meta:
model = Monster
fields = [
'id', 'image',
]
def get_image(self, obj):
return obj.get_image()
class SiegeSerializer(serializers.ModelSerializer):
monsters = MonsterSerializer(many=True)
leader = MonsterSerializer()
ranking = serializers.SerializerMethodField()
class Meta:
model = SiegeRecord
fields = ['monsters', 'leader', 'win', 'lose', 'ratio', 'ranking']
def get_ranking(self, obj):
return obj.wizard.guild.get_siege_ranking_display() if obj.wizard.guild else "Unknown" | 0.711732 | 0.11088 |
import discord
from discord.ext import commands
from src.file_verification import file_verification
# Le token de votre Bot Discord:
token = "TOKEN"
# Les mots bannis dans les fichiers envoyés, par défaut 'token':
key = "token"
# Fichiers autorisés, laisser vide pour enlever la restriction:
authorized = ['py', 'txt', 'png', 'jpg', 'jpeg', 'gif', 'mp3', 'mp4', 'json', 'bat']
# Voulez-vous recevoir les logs en message privé?
logs = False
# Votre identifiant Discord, si les logs sont activés:
user_logs = 0
# Taille maximum autorisée pour le renvoi des fichiers, si les logs sont activés:
# Exemple : 1KB = 1000 | 1MB = 1000000 | ∞ = 0
max_size = 100000
# N'oubliez pas de débloquer vos messages privés si les logs sont activés!
intents = discord.Intents.all()
intents.members = True
keter = commands.Bot( command_prefix= "keter", description= "keter", intents=intents)
def content_type(file):
return file.filename.split('.')[-1]
@keter.event
async def on_ready():
global user_logs
await keter.change_presence(activity=discord.Game(name='src -> github.com/billythegoat356/Keter'))
print("Prêt!")
user_logs = keter.get_user(user_logs) if logs else user_logs
@keter.listen()
async def on_message(message):
author = message.author
channel = message.channel
if author.bot:
return
for file in message.attachments:
if len(authorized) and content_type(file).lower() not in authorized:
await message.delete()
await channel.send(content=f"Mmmh, l'extension de ton fichier ['{content_type(file).lower()}'] ne fait pas partie de celles autorisées {authorized} {author.mention}!")
return
if await file_verification(file, author, key, max_size, user_logs) if logs else await file_verification(file, author, key, max_size):
await message.delete()
await channel.send(content=f"Mmmh, ton fichier m'a l'air suspect {author.mention}!")
keter.run(token) | File Verification/keter.py |
import discord
from discord.ext import commands
from src.file_verification import file_verification
# Le token de votre Bot Discord:
token = "TOKEN"
# Les mots bannis dans les fichiers envoyés, par défaut 'token':
key = "token"
# Fichiers autorisés, laisser vide pour enlever la restriction:
authorized = ['py', 'txt', 'png', 'jpg', 'jpeg', 'gif', 'mp3', 'mp4', 'json', 'bat']
# Voulez-vous recevoir les logs en message privé?
logs = False
# Votre identifiant Discord, si les logs sont activés:
user_logs = 0
# Taille maximum autorisée pour le renvoi des fichiers, si les logs sont activés:
# Exemple : 1KB = 1000 | 1MB = 1000000 | ∞ = 0
max_size = 100000
# N'oubliez pas de débloquer vos messages privés si les logs sont activés!
intents = discord.Intents.all()
intents.members = True
keter = commands.Bot( command_prefix= "keter", description= "keter", intents=intents)
def content_type(file):
return file.filename.split('.')[-1]
@keter.event
async def on_ready():
global user_logs
await keter.change_presence(activity=discord.Game(name='src -> github.com/billythegoat356/Keter'))
print("Prêt!")
user_logs = keter.get_user(user_logs) if logs else user_logs
@keter.listen()
async def on_message(message):
author = message.author
channel = message.channel
if author.bot:
return
for file in message.attachments:
if len(authorized) and content_type(file).lower() not in authorized:
await message.delete()
await channel.send(content=f"Mmmh, l'extension de ton fichier ['{content_type(file).lower()}'] ne fait pas partie de celles autorisées {authorized} {author.mention}!")
return
if await file_verification(file, author, key, max_size, user_logs) if logs else await file_verification(file, author, key, max_size):
await message.delete()
await channel.send(content=f"Mmmh, ton fichier m'a l'air suspect {author.mention}!")
keter.run(token) | 0.23292 | 0.202621 |
import os
import re
import sys
from lxml import etree
from skilletlib import Panoply
from skilletlib.exceptions import LoginException
from skilletlib.exceptions import SkilletLoaderException
config_source = os.environ.get("skillet_source", "offline")
if config_source == "offline":
# grab our two configs from the environment
base_config_path = os.environ.get("BASE_CONFIG", "")
latest_config_path = os.environ.get("LATEST_CONFIG", "")
with open(base_config_path, "r") as bcf:
base_config = bcf.read()
with open(latest_config_path, "r") as lcf:
latest_config = lcf.read()
p = Panoply()
snippets = p.generate_skillet_from_configs(base_config, latest_config)
else:
# each variable will be present in the environ dict on the 'os' module
username = os.environ.get("TARGET_USERNAME", "admin")
password = os.environ.get("TARGET_PASSWORD", "")
ip = os.environ.get("TARGET_IP", "")
config_source = os.environ.get("CONFIG_SOURCE", "candidate")
snippets = list()
try:
device = Panoply(hostname=ip, api_username=username, api_password=password, debug=False)
if config_source == "specific":
config_version = os.environ.get("CONFIG_VERSION", "-1")
previous_config = device.get_configuration(config_source=config_version)
latest_config = device.get_configuration(config_source="running")
elif config_source == "candidate":
previous_config = device.get_configuration(config_source="running")
latest_config = device.get_configuration(config_source="candidate")
else:
# use previous config by default
previous_config = device.get_configuration(config_source="-1")
latest_config = device.get_configuration(config_source="running")
snippets = device.generate_skillet_from_configs(previous_config, latest_config)
if len(snippets) == 0 and config_source == "candidate":
print("No Candidate Configuration can be found to use to build a skillet!")
sys.exit(2)
elif len(snippets) == 0:
print(f"No changes found between {previous_config} and {latest_config}")
sys.exit(2)
except SkilletLoaderException as se:
print("Error Executing Skillet")
print(se)
sys.exit(1)
except LoginException as le:
print("Error Logging into device")
print(le)
sys.exit(1)
latest_doc = etree.fromstring(latest_config)
print("#" * 80)
print(" ")
print("The following xpaths were found to be modified:")
print(" ")
print("-" * 80)
print(" ")
for s in snippets:
name = s.get("name", "")
snippet_xpath = s.get("xpath")
full_xpath = s.get("full_xpath", "")
print(f'<a href="#{name}">{full_xpath}</a>')
xpath = re.sub("^/config", ".", snippet_xpath)
# parent_element_xpath = '.' + "/".join(xpath.split('/')[:-1])
parent_elements = latest_doc.xpath(xpath)
if not parent_elements:
print("something is broken here")
continue
parent_element = parent_elements[0]
element_string = s.get("element", "")
# find child element index
index = 0
found = False
for child in parent_element:
cs = etree.tostring(child).decode("UTF-8")
cs_stripped = cs.strip()
whitespace_match = re.search(r"(\s+)$", cs)
if whitespace_match:
whitespace = whitespace_match.group()
else:
whitespace = ""
if element_string == cs_stripped:
# found our child index
found = True
parent_element.remove(child)
title = snippet_xpath.replace('"', "'")
wrapped_child_element = etree.fromstring(
f'<span id="{name}" class="text-danger" title="{title}">{element_string}{whitespace}</span>'
)
parent_element.insert(index, wrapped_child_element)
break
index = index + 1
if not found:
print("did not find this, odd")
def rp(match):
return "&nsbp;" * len(match.group())
latest_config_formatted = etree.tostring(latest_doc, pretty_print=True).decode("UTF-8")
latest_config_html = latest_config_formatted.replace("<", "<").replace(">", ">")
fixed_config_html_1 = re.sub(
r'<span id="(.*?)" class="(.*?)" title="(.*?)">', r'<span class="\2" id="\1" title="\3">', latest_config_html
)
fixed_config_html_2 = re.sub(r"</span>", r"</span>", fixed_config_html_1)
print("-" * 80)
print(fixed_config_html_2)
print("-" * 80)
print("#" * 80)
# later gator
sys.exit(0) | generate_skillet_preview.py | import os
import re
import sys
from lxml import etree
from skilletlib import Panoply
from skilletlib.exceptions import LoginException
from skilletlib.exceptions import SkilletLoaderException
config_source = os.environ.get("skillet_source", "offline")
if config_source == "offline":
# grab our two configs from the environment
base_config_path = os.environ.get("BASE_CONFIG", "")
latest_config_path = os.environ.get("LATEST_CONFIG", "")
with open(base_config_path, "r") as bcf:
base_config = bcf.read()
with open(latest_config_path, "r") as lcf:
latest_config = lcf.read()
p = Panoply()
snippets = p.generate_skillet_from_configs(base_config, latest_config)
else:
# each variable will be present in the environ dict on the 'os' module
username = os.environ.get("TARGET_USERNAME", "admin")
password = os.environ.get("TARGET_PASSWORD", "")
ip = os.environ.get("TARGET_IP", "")
config_source = os.environ.get("CONFIG_SOURCE", "candidate")
snippets = list()
try:
device = Panoply(hostname=ip, api_username=username, api_password=password, debug=False)
if config_source == "specific":
config_version = os.environ.get("CONFIG_VERSION", "-1")
previous_config = device.get_configuration(config_source=config_version)
latest_config = device.get_configuration(config_source="running")
elif config_source == "candidate":
previous_config = device.get_configuration(config_source="running")
latest_config = device.get_configuration(config_source="candidate")
else:
# use previous config by default
previous_config = device.get_configuration(config_source="-1")
latest_config = device.get_configuration(config_source="running")
snippets = device.generate_skillet_from_configs(previous_config, latest_config)
if len(snippets) == 0 and config_source == "candidate":
print("No Candidate Configuration can be found to use to build a skillet!")
sys.exit(2)
elif len(snippets) == 0:
print(f"No changes found between {previous_config} and {latest_config}")
sys.exit(2)
except SkilletLoaderException as se:
print("Error Executing Skillet")
print(se)
sys.exit(1)
except LoginException as le:
print("Error Logging into device")
print(le)
sys.exit(1)
latest_doc = etree.fromstring(latest_config)
print("#" * 80)
print(" ")
print("The following xpaths were found to be modified:")
print(" ")
print("-" * 80)
print(" ")
for s in snippets:
name = s.get("name", "")
snippet_xpath = s.get("xpath")
full_xpath = s.get("full_xpath", "")
print(f'<a href="#{name}">{full_xpath}</a>')
xpath = re.sub("^/config", ".", snippet_xpath)
# parent_element_xpath = '.' + "/".join(xpath.split('/')[:-1])
parent_elements = latest_doc.xpath(xpath)
if not parent_elements:
print("something is broken here")
continue
parent_element = parent_elements[0]
element_string = s.get("element", "")
# find child element index
index = 0
found = False
for child in parent_element:
cs = etree.tostring(child).decode("UTF-8")
cs_stripped = cs.strip()
whitespace_match = re.search(r"(\s+)$", cs)
if whitespace_match:
whitespace = whitespace_match.group()
else:
whitespace = ""
if element_string == cs_stripped:
# found our child index
found = True
parent_element.remove(child)
title = snippet_xpath.replace('"', "'")
wrapped_child_element = etree.fromstring(
f'<span id="{name}" class="text-danger" title="{title}">{element_string}{whitespace}</span>'
)
parent_element.insert(index, wrapped_child_element)
break
index = index + 1
if not found:
print("did not find this, odd")
def rp(match):
return "&nsbp;" * len(match.group())
latest_config_formatted = etree.tostring(latest_doc, pretty_print=True).decode("UTF-8")
latest_config_html = latest_config_formatted.replace("<", "<").replace(">", ">")
fixed_config_html_1 = re.sub(
r'<span id="(.*?)" class="(.*?)" title="(.*?)">', r'<span class="\2" id="\1" title="\3">', latest_config_html
)
fixed_config_html_2 = re.sub(r"</span>", r"</span>", fixed_config_html_1)
print("-" * 80)
print(fixed_config_html_2)
print("-" * 80)
print("#" * 80)
# later gator
sys.exit(0) | 0.120322 | 0.088112 |
from Compartilhados.utilitarios import utilitarios
from Compartilhados.Excecoes.valoresInvalidosException import ValoresInvalidosException
from Servicos.UsuariosServico import UsuariosServico
class UsuariosControlador:
def __init__(self):
self.usuariosServico = UsuariosServico()
def createUsuario(self, usuario):
self.validarConsistensiaDeUsuario(usuario)
return self.usuariosServico.createUsuario(usuario)
def readUsuarios(self):
return self.usuariosServico.readUsuarios()
def readUsuario(self, id_usuario):
idValido = self.usuariosServico.idValido(id_usuario)
if (not idValido):
raise ValoresInvalidosException(menssagem=f"O id da usuario informado não é válido!")
return self.usuariosServico.readUsuario(id_usuario)
def updateUsuario(self, usuario):
self.possuiId(usuario.getIdUsuario())
self.validarConsistensiaDeUsuario(usuario)
self.usuariosServico.updateUsuario(usuario)
def deleteUsuario(self, id_usuario):
self.possuiId(id_usuario)
self.usuariosServico.deleteUsuario(id_usuario)
def validarConsistensiaDeUsuario(self, usuario):
vlrsObrgNaoPreech = self.usuariosServico.valoresObrigatoriosNaoPreenchidos(usuario)
if (len(vlrsObrgNaoPreech) > 0):
raise ValoresInvalidosException(menssagem=f"Os valores a seguir são obrigatórios: {utilitarios.listarPorExtenso(vlrsObrgNaoPreech)}. Por favor, preencha-os.")
emailJaExiste = self.usuariosServico.emailJaExiste(usuario)
if (emailJaExiste):
raise ValoresInvalidosException(menssagem=f"O email informado já existe!")
def possuiId(self, id_usuario):
possuiId = self.usuariosServico.possuiId(id_usuario)
if (not possuiId):
raise ValoresInvalidosException(menssagem=f"O id da usuario informado não é válido!")
def validarEmailSenha(self, email, senha):
return self.usuariosServico.validarEmailSenha(email, senha)
def validarFormatoEmail(self, email):
return self.usuariosServico.validarFormatoEmail(email) | Controladores/UsuariosControlador.py | from Compartilhados.utilitarios import utilitarios
from Compartilhados.Excecoes.valoresInvalidosException import ValoresInvalidosException
from Servicos.UsuariosServico import UsuariosServico
class UsuariosControlador:
def __init__(self):
self.usuariosServico = UsuariosServico()
def createUsuario(self, usuario):
self.validarConsistensiaDeUsuario(usuario)
return self.usuariosServico.createUsuario(usuario)
def readUsuarios(self):
return self.usuariosServico.readUsuarios()
def readUsuario(self, id_usuario):
idValido = self.usuariosServico.idValido(id_usuario)
if (not idValido):
raise ValoresInvalidosException(menssagem=f"O id da usuario informado não é válido!")
return self.usuariosServico.readUsuario(id_usuario)
def updateUsuario(self, usuario):
self.possuiId(usuario.getIdUsuario())
self.validarConsistensiaDeUsuario(usuario)
self.usuariosServico.updateUsuario(usuario)
def deleteUsuario(self, id_usuario):
self.possuiId(id_usuario)
self.usuariosServico.deleteUsuario(id_usuario)
def validarConsistensiaDeUsuario(self, usuario):
vlrsObrgNaoPreech = self.usuariosServico.valoresObrigatoriosNaoPreenchidos(usuario)
if (len(vlrsObrgNaoPreech) > 0):
raise ValoresInvalidosException(menssagem=f"Os valores a seguir são obrigatórios: {utilitarios.listarPorExtenso(vlrsObrgNaoPreech)}. Por favor, preencha-os.")
emailJaExiste = self.usuariosServico.emailJaExiste(usuario)
if (emailJaExiste):
raise ValoresInvalidosException(menssagem=f"O email informado já existe!")
def possuiId(self, id_usuario):
possuiId = self.usuariosServico.possuiId(id_usuario)
if (not possuiId):
raise ValoresInvalidosException(menssagem=f"O id da usuario informado não é válido!")
def validarEmailSenha(self, email, senha):
return self.usuariosServico.validarEmailSenha(email, senha)
def validarFormatoEmail(self, email):
return self.usuariosServico.validarFormatoEmail(email) | 0.40028 | 0.153899 |
import torch
import pdb
import torch.nn.functional as F
def mse_loss(input, target, mask=None, needSigmoid=True):
if needSigmoid:
input = torch.sigmoid(input)
if mask is not None:
input = input * mask
#target = target * mask
loss = F.mse_loss(input, target)
return loss
def nmse_loss(input, target, mask=None, needSigmoid=True):
'''
train the 2D CNN based model
'''
if needSigmoid:
input = torch.sigmoid(input)
if mask is not None:
input = input * mask
res = input-target
res_norm = torch.norm(res, dim=(2,3))
res_norm = res_norm**2
res_norm = torch.sum(res_norm, dim=1)
#res_norm = torch.sqrt(res_norm)
target_norm = torch.norm(target, dim=(2,3))
target_norm = target_norm**2
target_norm = torch.sum(target_norm, dim=1)
#target_norm = torch.sqrt(target_norm)
nmse = res_norm/target_norm
return torch.mean(nmse)
def nmse_loss_v2(input, target):
'''
train the LISTA model, batch size is at axis 1
'''
res = input - target
res_norm = torch.norm(res, dim=0)**2
target_norm = torch.norm(target, dim=0)**2
mask = target_norm != 0
nmse = res_norm[mask] /target_norm[mask]
return torch.mean(nmse)
def bce_loss(input, target, needSigmoid=True):
pos_weight = torch.Tensor([0.05]).to(input.device)
if needSigmoid:
return F.binary_cross_entropy_with_logits(input, target,pos_weight=pos_weight)
else:
return F.binary_cross_entropy(input, target)
def dice_loss(input, target):
input = torch.sigmoid(input)
input = input.contiguous().view(input.size()[0],-1)
target = target.contiguous().view(target.size()[0], -1)
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + 0.0001
c = torch.sum(target * target, 1) + 0.0001
d = (2*a) / (b+c)
dice_loss = torch.mean(d)
return 1 - dice_loss
def focal_loss(input, target, alpha=1, gamma=2, logits=True, reduce=True):
if logits:
bce_loss = F.binary_cross_entropy_with_logits(input, target, reduce=False)
else:
bce_loss = F.binary_cross_entropy(input, target, reduce=False)
pt = torch.exp(-bce_loss)
focal_loss = alpha * (1 - pt)**gamma * bce_loss
if reduce:
return torch.mean(focal_loss)
else:
return focal_loss
def focal_loss_v2(input, target, alpha=0.95, gamma=2, size_average=True):
epsilon = 1e-6
input = input.contiguous().view(input.size()[0],-1)
target = target.contiguous().view(target.size()[0], -1)
pt = torch.sigmoid(input)
pt = torch.clamp(pt, min=epsilon, max=1-epsilon)
loss = - alpha * (1 - pt) ** gamma * target * torch.log(pt) - \
(1 - alpha) * pt ** gamma * (1 - target) * torch.log(1 - pt)
#pdb.set_trace()
if size_average:
loss = torch.mean(loss)
else:
loss = torch.sum(loss)
return loss | python/util/loss.py | import torch
import pdb
import torch.nn.functional as F
def mse_loss(input, target, mask=None, needSigmoid=True):
if needSigmoid:
input = torch.sigmoid(input)
if mask is not None:
input = input * mask
#target = target * mask
loss = F.mse_loss(input, target)
return loss
def nmse_loss(input, target, mask=None, needSigmoid=True):
'''
train the 2D CNN based model
'''
if needSigmoid:
input = torch.sigmoid(input)
if mask is not None:
input = input * mask
res = input-target
res_norm = torch.norm(res, dim=(2,3))
res_norm = res_norm**2
res_norm = torch.sum(res_norm, dim=1)
#res_norm = torch.sqrt(res_norm)
target_norm = torch.norm(target, dim=(2,3))
target_norm = target_norm**2
target_norm = torch.sum(target_norm, dim=1)
#target_norm = torch.sqrt(target_norm)
nmse = res_norm/target_norm
return torch.mean(nmse)
def nmse_loss_v2(input, target):
'''
train the LISTA model, batch size is at axis 1
'''
res = input - target
res_norm = torch.norm(res, dim=0)**2
target_norm = torch.norm(target, dim=0)**2
mask = target_norm != 0
nmse = res_norm[mask] /target_norm[mask]
return torch.mean(nmse)
def bce_loss(input, target, needSigmoid=True):
pos_weight = torch.Tensor([0.05]).to(input.device)
if needSigmoid:
return F.binary_cross_entropy_with_logits(input, target,pos_weight=pos_weight)
else:
return F.binary_cross_entropy(input, target)
def dice_loss(input, target):
input = torch.sigmoid(input)
input = input.contiguous().view(input.size()[0],-1)
target = target.contiguous().view(target.size()[0], -1)
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + 0.0001
c = torch.sum(target * target, 1) + 0.0001
d = (2*a) / (b+c)
dice_loss = torch.mean(d)
return 1 - dice_loss
def focal_loss(input, target, alpha=1, gamma=2, logits=True, reduce=True):
if logits:
bce_loss = F.binary_cross_entropy_with_logits(input, target, reduce=False)
else:
bce_loss = F.binary_cross_entropy(input, target, reduce=False)
pt = torch.exp(-bce_loss)
focal_loss = alpha * (1 - pt)**gamma * bce_loss
if reduce:
return torch.mean(focal_loss)
else:
return focal_loss
def focal_loss_v2(input, target, alpha=0.95, gamma=2, size_average=True):
epsilon = 1e-6
input = input.contiguous().view(input.size()[0],-1)
target = target.contiguous().view(target.size()[0], -1)
pt = torch.sigmoid(input)
pt = torch.clamp(pt, min=epsilon, max=1-epsilon)
loss = - alpha * (1 - pt) ** gamma * target * torch.log(pt) - \
(1 - alpha) * pt ** gamma * (1 - target) * torch.log(1 - pt)
#pdb.set_trace()
if size_average:
loss = torch.mean(loss)
else:
loss = torch.sum(loss)
return loss | 0.66061 | 0.506897 |
import json
import os
from Utils.WorkspaceAdminUtils import WorkspaceAdminUtils
class MiscIndexer:
def __init__(self, config):
self.ws = WorkspaceAdminUtils(config)
self.schema_dir = config['schema-dir']
def _tf(self, val):
if val == 0:
return False
else:
return True
def _guid(self, upa):
(wsid, objid, ver) = upa.split('/')
return "WS:%s:%s:%s" % (wsid, objid, ver)
def assembly_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'name': data.get('name', ''),
'dna_size': int(data['dna_size']),
'gc_content': float(data.get('gc_content')),
'external_source_id': data.get('external_source_id', ''),
'contig_count': len(data['contigs']),
'contigs': len(data['contigs'])}
schema = self.mapping('assembly_schema.json')
return {'data': rec, 'schema': schema}
def assemblycontig_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'parent': {}}
features_rec = []
for _id in data['contigs']:
feature = data['contigs'][_id]
frec = {'contig_id': feature['contig_id'],
'description': feature.get('description'),
'gc_content': feature['gc_content'],
'length': feature['length'],
'guid': f'{self._guid(upa)}:{feature["contig_id"]}'}
features_rec.append(frec)
rec['documents'] = features_rec
rec['schema'] = self.mapping('assemblycontig_schema.json')
return rec
def narrative_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'title': data['metadata'].get('name', ''),
'source': [],
'code_output': [],
'app_output': [],
'app_info': [],
'app_input': [],
'job_ids': []}
if 'cells' in data:
cells = data['cells']
elif 'worksheets' in data and 'cells' in data['worksheets']:
cells = data['worksheets']['cells']
else:
cells = []
for cell in cells:
rec['source'].append(cell.get('source'))
# Skip output since it isn't used
# - path: cells/[*]/outputs/[*]/data
if 'metadata' in cell and 'kbase' in cell['metadata']:
kb = cell['metadata']['kbase']
# - path: cells/[*]/metadata/kbase/outputCell/widget/params
# - path: cells/[*]/metadata/kbase/appCell/app/spec/info
if 'appCell' in kb:
ac = kb['appCell']
rec['app_info'].append(ac['app']['spec']['info'])
rec['app_input'].append(ac['params'])
if 'outputCell' in kb:
rec['job_ids'].append(kb['outputCell'].get('jobid'))
# - path: cells/[*]/metadata/kbase/outputCell/jobId
schema = self.mapping('narrative_schema.json')
return {'data': rec, 'schema': schema}
def ontologyterm_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {
'parent': {
'ontology_id': data.get('ontology', None),
'ontology_name': data.get('default_namespace', None)
}
}
features_rec = []
for name in data['term_hash'].keys():
feature = data['term_hash'][name]
frec = {'guid': f'{self._guid(upa)}:{feature["id"]}',
'id': feature['id'],
'name': feature['name'],
'namespace': feature.get('namespace'),
'definition': feature.get('def'),
'synonyms': feature.get('synonym')}
features_rec.append(frec)
rec['documents'] = features_rec
rec['schema'] = self.mapping('ontologyterm_schema.json')
return rec
def pairedend_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'technology': data['sequencing_tech'],
'files': [data['lib1']['file']['file_name']],
'phred_type': data['phred_type'],
'read_count': int(data['read_count']),
'read_length': int(data.get('read_length_mean')),
'quality': float(data.get('qual_mean')),
'gc_content': float(data.get('gc_content'))}
if 'lib2' in data:
data['files'].append(data['lib2']['file']['file_name'])
if data.get('insert_size_mean') is not None:
rec['insert_size'] = int(data.get('insert_size_mean'))
else:
rec['insert_size'] = None
schema = self.mapping('pairedendlibrary_schema.json')
return {'data': rec, 'schema': schema}
def singleend_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'technology': data['sequencing_tech'],
'phred_type': data['phred_type'],
'read_count': int(data['read_count']),
'read_length': int(data.get('read_length_mean')),
'quality': float(data.get('qual_mean')),
'gc_content': float(data.get('gc_content'))}
if 'lib' in data:
rec['file'] = data['lib']['file']['file_name']
elif 'lib1' in data:
rec['file'] = data['lib1']['file']['file_name']
schema = self.mapping('singleendlibrary_schema.json')
return {'data': rec, 'schema': schema}
def pangenome_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'name': data['name'],
'type': data['type'],
'genomes': len(data['genome_refs']),
'orthologs': len(data['orthologs']),
'genome_names': []}
schema = self.mapping('pangenome_schema.json')
return {'data': rec, 'schema': schema}
def pangenomeorthologyfamily_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'parent': {}}
features_rec = []
for feature in data['orthologs']:
frec = {'guid': f'{self._guid(upa)}:{feature["id"]}',
'function': feature['function'],
'id': feature['id']}
genes = []
for g in feature['orthologs']:
genes.append(g[0])
frec['ortholog_genes'] = genes
features_rec.append(frec)
rec['documents'] = features_rec
rec['schema'] = self.mapping('pangenome_schema.json')
return rec
def rnaseqsampleset_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'sampleset_desc': data['sampleset_desc'],
'num_replicates': int(data.get('num_replicates', 0)),
'source': data['source'],
'num_samples': int(data['num_samples'])}
schema = self.mapping('rnaseqsampleset_schema.json')
return {'data': rec, 'schema': schema}
def taxon_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'scientific_name': data['scientific_name'],
'scientific_lineage': data['scientific_lineage'],
'domain': data['domain'],
'genetic_code': int(data['genetic_code']),
'aliases': data['aliases']}
schema = self.mapping('taxon_schema.json')
return {'data': rec, 'schema': schema}
def tree_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'labels': data['default_node_labels'],
'type': data['type']}
schema = self.mapping('tree_schema.json')
return {'data': rec, 'schema': schema}
def mapping(self, filename):
with open(os.path.join(self.schema_dir, filename)) as f:
schema = json.loads(f.read())
return schema['schema'] | lib/Utils/MiscIndexer.py | import json
import os
from Utils.WorkspaceAdminUtils import WorkspaceAdminUtils
class MiscIndexer:
def __init__(self, config):
self.ws = WorkspaceAdminUtils(config)
self.schema_dir = config['schema-dir']
def _tf(self, val):
if val == 0:
return False
else:
return True
def _guid(self, upa):
(wsid, objid, ver) = upa.split('/')
return "WS:%s:%s:%s" % (wsid, objid, ver)
def assembly_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'name': data.get('name', ''),
'dna_size': int(data['dna_size']),
'gc_content': float(data.get('gc_content')),
'external_source_id': data.get('external_source_id', ''),
'contig_count': len(data['contigs']),
'contigs': len(data['contigs'])}
schema = self.mapping('assembly_schema.json')
return {'data': rec, 'schema': schema}
def assemblycontig_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'parent': {}}
features_rec = []
for _id in data['contigs']:
feature = data['contigs'][_id]
frec = {'contig_id': feature['contig_id'],
'description': feature.get('description'),
'gc_content': feature['gc_content'],
'length': feature['length'],
'guid': f'{self._guid(upa)}:{feature["contig_id"]}'}
features_rec.append(frec)
rec['documents'] = features_rec
rec['schema'] = self.mapping('assemblycontig_schema.json')
return rec
def narrative_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'title': data['metadata'].get('name', ''),
'source': [],
'code_output': [],
'app_output': [],
'app_info': [],
'app_input': [],
'job_ids': []}
if 'cells' in data:
cells = data['cells']
elif 'worksheets' in data and 'cells' in data['worksheets']:
cells = data['worksheets']['cells']
else:
cells = []
for cell in cells:
rec['source'].append(cell.get('source'))
# Skip output since it isn't used
# - path: cells/[*]/outputs/[*]/data
if 'metadata' in cell and 'kbase' in cell['metadata']:
kb = cell['metadata']['kbase']
# - path: cells/[*]/metadata/kbase/outputCell/widget/params
# - path: cells/[*]/metadata/kbase/appCell/app/spec/info
if 'appCell' in kb:
ac = kb['appCell']
rec['app_info'].append(ac['app']['spec']['info'])
rec['app_input'].append(ac['params'])
if 'outputCell' in kb:
rec['job_ids'].append(kb['outputCell'].get('jobid'))
# - path: cells/[*]/metadata/kbase/outputCell/jobId
schema = self.mapping('narrative_schema.json')
return {'data': rec, 'schema': schema}
def ontologyterm_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {
'parent': {
'ontology_id': data.get('ontology', None),
'ontology_name': data.get('default_namespace', None)
}
}
features_rec = []
for name in data['term_hash'].keys():
feature = data['term_hash'][name]
frec = {'guid': f'{self._guid(upa)}:{feature["id"]}',
'id': feature['id'],
'name': feature['name'],
'namespace': feature.get('namespace'),
'definition': feature.get('def'),
'synonyms': feature.get('synonym')}
features_rec.append(frec)
rec['documents'] = features_rec
rec['schema'] = self.mapping('ontologyterm_schema.json')
return rec
def pairedend_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'technology': data['sequencing_tech'],
'files': [data['lib1']['file']['file_name']],
'phred_type': data['phred_type'],
'read_count': int(data['read_count']),
'read_length': int(data.get('read_length_mean')),
'quality': float(data.get('qual_mean')),
'gc_content': float(data.get('gc_content'))}
if 'lib2' in data:
data['files'].append(data['lib2']['file']['file_name'])
if data.get('insert_size_mean') is not None:
rec['insert_size'] = int(data.get('insert_size_mean'))
else:
rec['insert_size'] = None
schema = self.mapping('pairedendlibrary_schema.json')
return {'data': rec, 'schema': schema}
def singleend_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'technology': data['sequencing_tech'],
'phred_type': data['phred_type'],
'read_count': int(data['read_count']),
'read_length': int(data.get('read_length_mean')),
'quality': float(data.get('qual_mean')),
'gc_content': float(data.get('gc_content'))}
if 'lib' in data:
rec['file'] = data['lib']['file']['file_name']
elif 'lib1' in data:
rec['file'] = data['lib1']['file']['file_name']
schema = self.mapping('singleendlibrary_schema.json')
return {'data': rec, 'schema': schema}
def pangenome_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'name': data['name'],
'type': data['type'],
'genomes': len(data['genome_refs']),
'orthologs': len(data['orthologs']),
'genome_names': []}
schema = self.mapping('pangenome_schema.json')
return {'data': rec, 'schema': schema}
def pangenomeorthologyfamily_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'parent': {}}
features_rec = []
for feature in data['orthologs']:
frec = {'guid': f'{self._guid(upa)}:{feature["id"]}',
'function': feature['function'],
'id': feature['id']}
genes = []
for g in feature['orthologs']:
genes.append(g[0])
frec['ortholog_genes'] = genes
features_rec.append(frec)
rec['documents'] = features_rec
rec['schema'] = self.mapping('pangenome_schema.json')
return rec
def rnaseqsampleset_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'sampleset_desc': data['sampleset_desc'],
'num_replicates': int(data.get('num_replicates', 0)),
'source': data['source'],
'num_samples': int(data['num_samples'])}
schema = self.mapping('rnaseqsampleset_schema.json')
return {'data': rec, 'schema': schema}
def taxon_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'scientific_name': data['scientific_name'],
'scientific_lineage': data['scientific_lineage'],
'domain': data['domain'],
'genetic_code': int(data['genetic_code']),
'aliases': data['aliases']}
schema = self.mapping('taxon_schema.json')
return {'data': rec, 'schema': schema}
def tree_index(self, upa):
obj = self.ws.get_objects2({'objects': [{'ref': upa}]})['data'][0]
data = obj['data']
rec = {'labels': data['default_node_labels'],
'type': data['type']}
schema = self.mapping('tree_schema.json')
return {'data': rec, 'schema': schema}
def mapping(self, filename):
with open(os.path.join(self.schema_dir, filename)) as f:
schema = json.loads(f.read())
return schema['schema'] | 0.317109 | 0.121217 |
import uuid
from datetime import datetime
from typing import Any
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship
db: Any = SQLAlchemy()
def save(instance):
db.session.add(instance)
db.session.commit()
def get_verification_email_by_email(email):
return VerificationEmail.query.filter(
VerificationEmail.email == email
).one_or_none()
def generate_uuid():
return str(uuid.uuid4())
class IDMixin:
id = db.Column(db.Integer, primary_key=True)
class VerificationEmail(IDMixin, db.Model):
email = db.Column(db.String(120), unique=True, nullable=False)
is_admin = db.Column(db.Boolean)
is_mentor = db.Column(db.Boolean)
def __str__(self):
return f'<VerificationEmail {self.id}: {self.email}>'
class PredefinedTagMixin(IDMixin):
value = db.Column(db.String(50))
class UserEditableTagMixin(IDMixin):
value = db.Column(db.String(50))
public = db.Column(db.Boolean, default=False)
class HospitalAffiliationOption(PredefinedTagMixin, db.Model):
pass
class ClinicalSpecialtyOption(UserEditableTagMixin, db.Model):
pass
class ProfessionalInterestOption(UserEditableTagMixin, db.Model):
pass
class PartsOfMeOption(UserEditableTagMixin, db.Model):
pass
class ActivityOption(UserEditableTagMixin, db.Model):
pass
class DegreeOption(UserEditableTagMixin, db.Model):
pass
class HospitalAffiliation(IDMixin, db.Model):
tag_id = db.Column(
db.Integer, db.ForeignKey(HospitalAffiliationOption.id), nullable=False
)
tag = relationship(HospitalAffiliationOption)
profile_id = db.Column(db.String, db.ForeignKey('profile.id'), nullable=False)
class ClinicalSpecialty(IDMixin, db.Model):
tag_id = db.Column(
db.Integer, db.ForeignKey(ClinicalSpecialtyOption.id), nullable=False
)
tag = relationship(ClinicalSpecialtyOption)
profile_id = db.Column(db.String, db.ForeignKey('profile.id'), nullable=False)
class PartsOfMe(IDMixin, db.Model):
tag_id = db.Column(db.Integer, db.ForeignKey(PartsOfMeOption.id), nullable=False)
tag = relationship(PartsOfMeOption)
profile_id = db.Column(db.String, db.ForeignKey('profile.id'), nullable=False)
class ProfessionalInterest(IDMixin, db.Model):
tag_id = db.Column(
db.Integer, db.ForeignKey(ProfessionalInterestOption.id), nullable=False
)
tag = relationship(ProfessionalInterestOption)
profile_id = db.Column(db.String, db.ForeignKey('profile.id'), nullable=False)
class ProfileActivity(IDMixin, db.Model):
tag_id = db.Column(db.Integer, db.ForeignKey(ActivityOption.id), nullable=False)
tag = relationship(ActivityOption)
profile_id = db.Column(db.String, db.ForeignKey('profile.id'), nullable=False)
class ProfileDegree(IDMixin, db.Model):
tag_id = db.Column(db.Integer, db.ForeignKey(DegreeOption.id), nullable=False)
tag = relationship(DegreeOption)
profile_id = db.Column(db.String, db.ForeignKey('profile.id'), nullable=False)
class Profile(db.Model):
id = db.Column(db.String, primary_key=True, default=generate_uuid)
name = db.Column(db.String(255), nullable=False)
verification_email_id = db.Column(
db.Integer, db.ForeignKey(VerificationEmail.id), nullable=False
)
verification_email = relationship(VerificationEmail, uselist=False)
contact_email = db.Column(db.String(120), unique=True, nullable=False)
profile_image_url = db.Column(db.String(255))
clinical_specialties = relationship(ClinicalSpecialty, cascade='all, delete')
affiliations = relationship(HospitalAffiliation, cascade='all, delete')
professional_interests = relationship(ProfessionalInterest, cascade='all, delete')
parts_of_me = relationship(PartsOfMe, cascade='all, delete')
activities = relationship(ProfileActivity, cascade='all, delete')
degrees = relationship(ProfileDegree, cascade='all, delete')
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
date_updated = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
# TODO make not nullable and remove additional_information === null workarounds
additional_information = db.Column(db.String(500), default='')
willing_shadowing = db.Column(db.Boolean, default=False)
willing_networking = db.Column(db.Boolean, default=False)
willing_goal_setting = db.Column(db.Boolean, default=False)
willing_discuss_personal = db.Column(db.Boolean, default=False)
willing_career_guidance = db.Column(db.Boolean, default=False)
willing_student_group = db.Column(db.Boolean, default=False)
cadence = db.Column(db.String(255), nullable=False)
other_cadence = db.Column(db.String(255), nullable=True)
available_for_mentoring = db.Column(db.Boolean, default=True)
def __repr__(self):
return f'<Profile id={self.id} name={self.name}>'
class VerificationToken(db.Model):
token = db.Column(db.String(36), primary_key=True)
email_id = db.Column(
db.Integer, db.ForeignKey(VerificationEmail.id), nullable=False
)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
verified = db.Column(db.Boolean, default=False)
expired = db.Column(db.Boolean, default=False)
email_log = db.Column(db.Text)
is_personal_device = db.Column(db.Boolean) | server/models.py | import uuid
from datetime import datetime
from typing import Any
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship
db: Any = SQLAlchemy()
def save(instance):
db.session.add(instance)
db.session.commit()
def get_verification_email_by_email(email):
return VerificationEmail.query.filter(
VerificationEmail.email == email
).one_or_none()
def generate_uuid():
return str(uuid.uuid4())
class IDMixin:
id = db.Column(db.Integer, primary_key=True)
class VerificationEmail(IDMixin, db.Model):
email = db.Column(db.String(120), unique=True, nullable=False)
is_admin = db.Column(db.Boolean)
is_mentor = db.Column(db.Boolean)
def __str__(self):
return f'<VerificationEmail {self.id}: {self.email}>'
class PredefinedTagMixin(IDMixin):
value = db.Column(db.String(50))
class UserEditableTagMixin(IDMixin):
value = db.Column(db.String(50))
public = db.Column(db.Boolean, default=False)
class HospitalAffiliationOption(PredefinedTagMixin, db.Model):
pass
class ClinicalSpecialtyOption(UserEditableTagMixin, db.Model):
pass
class ProfessionalInterestOption(UserEditableTagMixin, db.Model):
pass
class PartsOfMeOption(UserEditableTagMixin, db.Model):
pass
class ActivityOption(UserEditableTagMixin, db.Model):
pass
class DegreeOption(UserEditableTagMixin, db.Model):
pass
class HospitalAffiliation(IDMixin, db.Model):
tag_id = db.Column(
db.Integer, db.ForeignKey(HospitalAffiliationOption.id), nullable=False
)
tag = relationship(HospitalAffiliationOption)
profile_id = db.Column(db.String, db.ForeignKey('profile.id'), nullable=False)
class ClinicalSpecialty(IDMixin, db.Model):
tag_id = db.Column(
db.Integer, db.ForeignKey(ClinicalSpecialtyOption.id), nullable=False
)
tag = relationship(ClinicalSpecialtyOption)
profile_id = db.Column(db.String, db.ForeignKey('profile.id'), nullable=False)
class PartsOfMe(IDMixin, db.Model):
tag_id = db.Column(db.Integer, db.ForeignKey(PartsOfMeOption.id), nullable=False)
tag = relationship(PartsOfMeOption)
profile_id = db.Column(db.String, db.ForeignKey('profile.id'), nullable=False)
class ProfessionalInterest(IDMixin, db.Model):
tag_id = db.Column(
db.Integer, db.ForeignKey(ProfessionalInterestOption.id), nullable=False
)
tag = relationship(ProfessionalInterestOption)
profile_id = db.Column(db.String, db.ForeignKey('profile.id'), nullable=False)
class ProfileActivity(IDMixin, db.Model):
tag_id = db.Column(db.Integer, db.ForeignKey(ActivityOption.id), nullable=False)
tag = relationship(ActivityOption)
profile_id = db.Column(db.String, db.ForeignKey('profile.id'), nullable=False)
class ProfileDegree(IDMixin, db.Model):
tag_id = db.Column(db.Integer, db.ForeignKey(DegreeOption.id), nullable=False)
tag = relationship(DegreeOption)
profile_id = db.Column(db.String, db.ForeignKey('profile.id'), nullable=False)
class Profile(db.Model):
id = db.Column(db.String, primary_key=True, default=generate_uuid)
name = db.Column(db.String(255), nullable=False)
verification_email_id = db.Column(
db.Integer, db.ForeignKey(VerificationEmail.id), nullable=False
)
verification_email = relationship(VerificationEmail, uselist=False)
contact_email = db.Column(db.String(120), unique=True, nullable=False)
profile_image_url = db.Column(db.String(255))
clinical_specialties = relationship(ClinicalSpecialty, cascade='all, delete')
affiliations = relationship(HospitalAffiliation, cascade='all, delete')
professional_interests = relationship(ProfessionalInterest, cascade='all, delete')
parts_of_me = relationship(PartsOfMe, cascade='all, delete')
activities = relationship(ProfileActivity, cascade='all, delete')
degrees = relationship(ProfileDegree, cascade='all, delete')
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
date_updated = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
# TODO make not nullable and remove additional_information === null workarounds
additional_information = db.Column(db.String(500), default='')
willing_shadowing = db.Column(db.Boolean, default=False)
willing_networking = db.Column(db.Boolean, default=False)
willing_goal_setting = db.Column(db.Boolean, default=False)
willing_discuss_personal = db.Column(db.Boolean, default=False)
willing_career_guidance = db.Column(db.Boolean, default=False)
willing_student_group = db.Column(db.Boolean, default=False)
cadence = db.Column(db.String(255), nullable=False)
other_cadence = db.Column(db.String(255), nullable=True)
available_for_mentoring = db.Column(db.Boolean, default=True)
def __repr__(self):
return f'<Profile id={self.id} name={self.name}>'
class VerificationToken(db.Model):
token = db.Column(db.String(36), primary_key=True)
email_id = db.Column(
db.Integer, db.ForeignKey(VerificationEmail.id), nullable=False
)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
verified = db.Column(db.Boolean, default=False)
expired = db.Column(db.Boolean, default=False)
email_log = db.Column(db.Text)
is_personal_device = db.Column(db.Boolean) | 0.439747 | 0.068725 |
from typing import Dict, List
import numpy as np
import torch as th
import torch.distributions as td
from rls.algorithms.base.sarl_on_policy import SarlOnPolicy
from rls.common.data import Data
from rls.common.decorator import iton
from rls.nn.models import (ActorCriticValueCts, ActorCriticValueDct, ActorDct,
ActorMuLogstd, CriticValue)
from rls.nn.utils import OPLR
from rls.utils.np_utils import calculate_td_error, discounted_sum
class PPO(SarlOnPolicy):
"""
Proximal Policy Optimization, https://arxiv.org/abs/1707.06347
Emergence of Locomotion Behaviours in Rich Environments, http://arxiv.org/abs/1707.02286, DPPO
"""
policy_mode = 'on-policy'
def __init__(self,
agent_spec,
ent_coef: float = 1.0e-2,
vf_coef: float = 0.5,
lr: float = 5.0e-4,
lambda_: float = 0.95,
epsilon: float = 0.2,
use_duel_clip: bool = False,
duel_epsilon: float = 0.,
use_vclip: bool = False,
value_epsilon: float = 0.2,
share_net: bool = True,
actor_lr: float = 3e-4,
critic_lr: float = 1e-3,
kl_reverse: bool = False,
kl_target: float = 0.02,
kl_target_cutoff: float = 2,
kl_target_earlystop: float = 4,
kl_beta: List[float] = [0.7, 1.3],
kl_alpha: float = 1.5,
kl_coef: float = 1.0,
extra_coef: float = 1000.0,
use_kl_loss: bool = False,
use_extra_loss: bool = False,
use_early_stop: bool = False,
network_settings: Dict = {
'share': {
'continuous': {
'condition_sigma': False,
'log_std_bound': [-20, 2],
'share': [32, 32],
'mu': [32, 32],
'v': [32, 32]
},
'discrete': {
'share': [32, 32],
'logits': [32, 32],
'v': [32, 32]
}
},
'actor_continuous': {
'hidden_units': [64, 64],
'condition_sigma': False,
'log_std_bound': [-20, 2]
},
'actor_discrete': [32, 32],
'critic': [32, 32]
},
**kwargs):
super().__init__(agent_spec=agent_spec, **kwargs)
self._ent_coef = ent_coef
self.lambda_ = lambda_
assert 0.0 <= lambda_ <= 1.0, "GAE lambda should be in [0, 1]."
self._epsilon = epsilon
self._use_vclip = use_vclip
self._value_epsilon = value_epsilon
self._share_net = share_net
self._kl_reverse = kl_reverse
self._kl_target = kl_target
self._kl_alpha = kl_alpha
self._kl_coef = kl_coef
self._extra_coef = extra_coef
self._vf_coef = vf_coef
self._use_duel_clip = use_duel_clip
self._duel_epsilon = duel_epsilon
if self._use_duel_clip:
assert - \
self._epsilon < self._duel_epsilon < self._epsilon, "duel_epsilon should be set in the range of (-epsilon, epsilon)."
self._kl_cutoff = kl_target * kl_target_cutoff
self._kl_stop = kl_target * kl_target_earlystop
self._kl_low = kl_target * kl_beta[0]
self._kl_high = kl_target * kl_beta[-1]
self._use_kl_loss = use_kl_loss
self._use_extra_loss = use_extra_loss
self._use_early_stop = use_early_stop
if self._share_net:
if self.is_continuous:
self.net = ActorCriticValueCts(self.obs_spec,
rep_net_params=self._rep_net_params,
output_shape=self.a_dim,
network_settings=network_settings['share']['continuous']).to(self.device)
else:
self.net = ActorCriticValueDct(self.obs_spec,
rep_net_params=self._rep_net_params,
output_shape=self.a_dim,
network_settings=network_settings['share']['discrete']).to(self.device)
self.oplr = OPLR(self.net, lr, **self._oplr_params)
self._trainer_modules.update(model=self.net,
oplr=self.oplr)
else:
if self.is_continuous:
self.actor = ActorMuLogstd(self.obs_spec,
rep_net_params=self._rep_net_params,
output_shape=self.a_dim,
network_settings=network_settings['actor_continuous']).to(self.device)
else:
self.actor = ActorDct(self.obs_spec,
rep_net_params=self._rep_net_params,
output_shape=self.a_dim,
network_settings=network_settings['actor_discrete']).to(self.device)
self.critic = CriticValue(self.obs_spec,
rep_net_params=self._rep_net_params,
network_settings=network_settings['critic']).to(self.device)
self.actor_oplr = OPLR(self.actor, actor_lr, **self._oplr_params)
self.critic_oplr = OPLR(self.critic, critic_lr, **self._oplr_params)
self._trainer_modules.update(actor=self.actor,
critic=self.critic,
actor_oplr=self.actor_oplr,
critic_oplr=self.critic_oplr)
@iton
def select_action(self, obs):
if self.is_continuous:
if self._share_net:
mu, log_std, value = self.net(obs, rnncs=self.rnncs) # [B, A]
self.rnncs_ = self.net.get_rnncs()
else:
mu, log_std = self.actor(obs, rnncs=self.rnncs) # [B, A]
self.rnncs_ = self.actor.get_rnncs()
value = self.critic(obs, rnncs=self.rnncs) # [B, 1]
dist = td.Independent(td.Normal(mu, log_std.exp()), 1)
action = dist.sample().clamp(-1, 1) # [B, A]
log_prob = dist.log_prob(action).unsqueeze(-1) # [B, 1]
else:
if self._share_net:
logits, value = self.net(obs, rnncs=self.rnncs) # [B, A], [B, 1]
self.rnncs_ = self.net.get_rnncs()
else:
logits = self.actor(obs, rnncs=self.rnncs) # [B, A]
self.rnncs_ = self.actor.get_rnncs()
value = self.critic(obs, rnncs=self.rnncs) # [B, 1]
norm_dist = td.Categorical(logits=logits)
action = norm_dist.sample() # [B,]
log_prob = norm_dist.log_prob(action).unsqueeze(-1) # [B, 1]
acts_info = Data(action=action,
value=value,
log_prob=log_prob + th.finfo().eps)
if self.use_rnn:
acts_info.update(rnncs=self.rnncs)
return action, acts_info
@iton
def _get_value(self, obs, rnncs=None):
if self._share_net:
if self.is_continuous:
_, _, value = self.net(obs, rnncs=rnncs) # [B, 1]
else:
_, value = self.net(obs, rnncs=rnncs) # [B, 1]
else:
value = self.critic(obs, rnncs=rnncs) # [B, 1]
return value
def _preprocess_BATCH(self, BATCH): # [T, B, *]
BATCH = super()._preprocess_BATCH(BATCH)
value = self._get_value(BATCH.obs_[-1], rnncs=self.rnncs)
BATCH.discounted_reward = discounted_sum(BATCH.reward,
self.gamma,
BATCH.done,
BATCH.begin_mask,
init_value=value)
td_error = calculate_td_error(BATCH.reward,
self.gamma,
BATCH.done,
value=BATCH.value,
next_value=np.concatenate((BATCH.value[1:], value[np.newaxis, :]), 0))
BATCH.gae_adv = discounted_sum(td_error,
self.lambda_ * self.gamma,
BATCH.done,
BATCH.begin_mask,
init_value=0.,
normalize=True)
return BATCH
def learn(self, BATCH: Data):
BATCH = self._preprocess_BATCH(BATCH) # [T, B, *]
for _ in range(self._epochs):
kls = []
for _BATCH in BATCH.sample(self._chunk_length, self.batch_size, repeat=self._sample_allow_repeat):
_BATCH = self._before_train(_BATCH)
summaries, kl = self._train(_BATCH)
kls.append(kl)
self.summaries.update(summaries)
self._after_train()
if self._use_early_stop and sum(kls) / len(kls) > self._kl_stop:
break
def _train(self, BATCH):
if self._share_net:
summaries, kl = self.train_share(BATCH)
else:
summaries = dict()
actor_summaries, kl = self.train_actor(BATCH)
critic_summaries = self.train_critic(BATCH)
summaries.update(actor_summaries)
summaries.update(critic_summaries)
if self._use_kl_loss:
# ref: https://github.com/joschu/modular_rl/blob/6970cde3da265cf2a98537250fea5e0c0d9a7639/modular_rl/ppo.py#L93
if kl > self._kl_high:
self._kl_coef *= self._kl_alpha
elif kl < self._kl_low:
self._kl_coef /= self._kl_alpha
summaries.update({
'Statistics/kl_coef': self._kl_coef
})
return summaries, kl
@iton
def train_share(self, BATCH):
if self.is_continuous:
# [T, B, A], [T, B, A], [T, B, 1]
mu, log_std, value = self.net(BATCH.obs, begin_mask=BATCH.begin_mask)
dist = td.Independent(td.Normal(mu, log_std.exp()), 1)
new_log_prob = dist.log_prob(BATCH.action).unsqueeze(-1) # [T, B, 1]
entropy = dist.entropy().unsqueeze(-1) # [T, B, 1]
else:
# [T, B, A], [T, B, 1]
logits, value = self.net(BATCH.obs, begin_mask=BATCH.begin_mask)
logp_all = logits.log_softmax(-1) # [T, B, 1]
new_log_prob = (BATCH.action * logp_all).sum(-1, keepdim=True) # [T, B, 1]
entropy = -(logp_all.exp() * logp_all).sum(-1, keepdim=True) # [T, B, 1]
ratio = (new_log_prob - BATCH.log_prob).exp() # [T, B, 1]
surrogate = ratio * BATCH.gae_adv # [T, B, 1]
clipped_surrogate = th.minimum(
surrogate,
ratio.clamp(1.0 - self._epsilon, 1.0 + self._epsilon) * BATCH.gae_adv
) # [T, B, 1]
# ref: https://github.com/thu-ml/tianshou/blob/c97aa4065ee8464bd5897bb86f1f81abd8e2cff9/tianshou/policy/modelfree/ppo.py#L159
if self._use_duel_clip:
clipped_surrogate2 = th.maximum(
clipped_surrogate,
(1.0 + self._duel_epsilon) * BATCH.gae_adv
) # [T, B, 1]
clipped_surrogate = th.where(BATCH.gae_adv < 0, clipped_surrogate2, clipped_surrogate) # [T, B, 1]
actor_loss = -(clipped_surrogate + self._ent_coef * entropy).mean() # 1
# ref: https://github.com/joschu/modular_rl/blob/6970cde3da265cf2a98537250fea5e0c0d9a7639/modular_rl/ppo.py#L40
# ref: https://github.com/hill-a/stable-baselines/blob/b3f414f4f2900403107357a2206f80868af16da3/stable_baselines/ppo2/ppo2.py#L185
if self._kl_reverse: # TODO:
kl = .5 * (new_log_prob - BATCH.log_prob).square().mean() # 1
else:
# a sample estimate for KL-divergence, easy to compute
kl = .5 * (BATCH.log_prob - new_log_prob).square().mean()
if self._use_kl_loss:
kl_loss = self._kl_coef * kl # 1
actor_loss += kl_loss
if self._use_extra_loss:
extra_loss = self._extra_coef * th.maximum(th.zeros_like(kl), kl - self._kl_cutoff).square().mean() # 1
actor_loss += extra_loss
td_error = BATCH.discounted_reward - value # [T, B, 1]
if self._use_vclip:
# ref: https://github.com/llSourcell/OpenAI_Five_vs_Dota2_Explained/blob/c5def7e57aa70785c2394ea2eeb3e5f66ad59a53/train.py#L154
# ref: https://github.com/hill-a/stable-baselines/blob/b3f414f4f2900403107357a2206f80868af16da3/stable_baselines/ppo2/ppo2.py#L172
value_clip = BATCH.value + (value - BATCH.value).clamp(-self._value_epsilon,
self._value_epsilon) # [T, B, 1]
td_error_clip = BATCH.discounted_reward - value_clip # [T, B, 1]
td_square = th.maximum(td_error.square(), td_error_clip.square()) # [T, B, 1]
else:
td_square = td_error.square() # [T, B, 1]
critic_loss = 0.5 * td_square.mean() # 1
loss = actor_loss + self._vf_coef * critic_loss # 1
self.oplr.optimize(loss)
return {
'LOSS/actor_loss': actor_loss,
'LOSS/critic_loss': critic_loss,
'Statistics/kl': kl,
'Statistics/entropy': entropy.mean(),
'LEARNING_RATE/lr': self.oplr.lr
}, kl
@iton
def train_actor(self, BATCH):
if self.is_continuous:
# [T, B, A], [T, B, A]
mu, log_std = self.actor(BATCH.obs, begin_mask=BATCH.begin_mask)
dist = td.Independent(td.Normal(mu, log_std.exp()), 1)
new_log_prob = dist.log_prob(BATCH.action).unsqueeze(-1) # [T, B, 1]
entropy = dist.entropy().unsqueeze(-1) # [T, B, 1]
else:
logits = self.actor(BATCH.obs, begin_mask=BATCH.begin_mask) # [T, B, A]
logp_all = logits.log_softmax(-1) # [T, B, A]
new_log_prob = (BATCH.action * logp_all).sum(-1, keepdim=True) # [T, B, 1]
entropy = -(logp_all.exp() * logp_all).sum(-1, keepdim=True) # [T, B, 1]
ratio = (new_log_prob - BATCH.log_prob).exp() # [T, B, 1]
kl = (BATCH.log_prob - new_log_prob).square().mean() # 1
surrogate = ratio * BATCH.gae_adv # [T, B, 1]
clipped_surrogate = th.minimum(
surrogate,
th.where(BATCH.gae_adv > 0, (1 + self._epsilon) *
BATCH.gae_adv, (1 - self._epsilon) * BATCH.gae_adv)
) # [T, B, 1]
if self._use_duel_clip:
clipped_surrogate = th.maximum(
clipped_surrogate,
(1.0 + self._duel_epsilon) * BATCH.gae_adv
) # [T, B, 1]
actor_loss = -(clipped_surrogate + self._ent_coef * entropy).mean() # 1
if self._use_kl_loss:
kl_loss = self._kl_coef * kl # 1
actor_loss += kl_loss
if self._use_extra_loss:
extra_loss = self._extra_coef * th.maximum(th.zeros_like(kl), kl - self._kl_cutoff).square().mean() # 1
actor_loss += extra_loss
self.actor_oplr.optimize(actor_loss)
return {
'LOSS/actor_loss': actor_loss,
'Statistics/kl': kl,
'Statistics/entropy': entropy.mean(),
'LEARNING_RATE/actor_lr': self.actor_oplr.lr
}, kl
@iton
def train_critic(self, BATCH):
value = self.critic(BATCH.obs, begin_mask=BATCH.begin_mask) # [T, B, 1]
td_error = BATCH.discounted_reward - value # [T, B, 1]
if self._use_vclip:
value_clip = BATCH.value + (value - BATCH.value).clamp(-self._value_epsilon,
self._value_epsilon) # [T, B, 1]
td_error_clip = BATCH.discounted_reward - value_clip # [T, B, 1]
td_square = th.maximum(td_error.square(), td_error_clip.square()) # [T, B, 1]
else:
td_square = td_error.square() # [T, B, 1]
critic_loss = 0.5 * td_square.mean() # 1
self.critic_oplr.optimize(critic_loss)
return {
'LOSS/critic_loss': critic_loss,
'LEARNING_RATE/critic_lr': self.critic_oplr.lr
} | rls/algorithms/single/ppo.py |
from typing import Dict, List
import numpy as np
import torch as th
import torch.distributions as td
from rls.algorithms.base.sarl_on_policy import SarlOnPolicy
from rls.common.data import Data
from rls.common.decorator import iton
from rls.nn.models import (ActorCriticValueCts, ActorCriticValueDct, ActorDct,
ActorMuLogstd, CriticValue)
from rls.nn.utils import OPLR
from rls.utils.np_utils import calculate_td_error, discounted_sum
class PPO(SarlOnPolicy):
"""
Proximal Policy Optimization, https://arxiv.org/abs/1707.06347
Emergence of Locomotion Behaviours in Rich Environments, http://arxiv.org/abs/1707.02286, DPPO
"""
policy_mode = 'on-policy'
def __init__(self,
agent_spec,
ent_coef: float = 1.0e-2,
vf_coef: float = 0.5,
lr: float = 5.0e-4,
lambda_: float = 0.95,
epsilon: float = 0.2,
use_duel_clip: bool = False,
duel_epsilon: float = 0.,
use_vclip: bool = False,
value_epsilon: float = 0.2,
share_net: bool = True,
actor_lr: float = 3e-4,
critic_lr: float = 1e-3,
kl_reverse: bool = False,
kl_target: float = 0.02,
kl_target_cutoff: float = 2,
kl_target_earlystop: float = 4,
kl_beta: List[float] = [0.7, 1.3],
kl_alpha: float = 1.5,
kl_coef: float = 1.0,
extra_coef: float = 1000.0,
use_kl_loss: bool = False,
use_extra_loss: bool = False,
use_early_stop: bool = False,
network_settings: Dict = {
'share': {
'continuous': {
'condition_sigma': False,
'log_std_bound': [-20, 2],
'share': [32, 32],
'mu': [32, 32],
'v': [32, 32]
},
'discrete': {
'share': [32, 32],
'logits': [32, 32],
'v': [32, 32]
}
},
'actor_continuous': {
'hidden_units': [64, 64],
'condition_sigma': False,
'log_std_bound': [-20, 2]
},
'actor_discrete': [32, 32],
'critic': [32, 32]
},
**kwargs):
super().__init__(agent_spec=agent_spec, **kwargs)
self._ent_coef = ent_coef
self.lambda_ = lambda_
assert 0.0 <= lambda_ <= 1.0, "GAE lambda should be in [0, 1]."
self._epsilon = epsilon
self._use_vclip = use_vclip
self._value_epsilon = value_epsilon
self._share_net = share_net
self._kl_reverse = kl_reverse
self._kl_target = kl_target
self._kl_alpha = kl_alpha
self._kl_coef = kl_coef
self._extra_coef = extra_coef
self._vf_coef = vf_coef
self._use_duel_clip = use_duel_clip
self._duel_epsilon = duel_epsilon
if self._use_duel_clip:
assert - \
self._epsilon < self._duel_epsilon < self._epsilon, "duel_epsilon should be set in the range of (-epsilon, epsilon)."
self._kl_cutoff = kl_target * kl_target_cutoff
self._kl_stop = kl_target * kl_target_earlystop
self._kl_low = kl_target * kl_beta[0]
self._kl_high = kl_target * kl_beta[-1]
self._use_kl_loss = use_kl_loss
self._use_extra_loss = use_extra_loss
self._use_early_stop = use_early_stop
if self._share_net:
if self.is_continuous:
self.net = ActorCriticValueCts(self.obs_spec,
rep_net_params=self._rep_net_params,
output_shape=self.a_dim,
network_settings=network_settings['share']['continuous']).to(self.device)
else:
self.net = ActorCriticValueDct(self.obs_spec,
rep_net_params=self._rep_net_params,
output_shape=self.a_dim,
network_settings=network_settings['share']['discrete']).to(self.device)
self.oplr = OPLR(self.net, lr, **self._oplr_params)
self._trainer_modules.update(model=self.net,
oplr=self.oplr)
else:
if self.is_continuous:
self.actor = ActorMuLogstd(self.obs_spec,
rep_net_params=self._rep_net_params,
output_shape=self.a_dim,
network_settings=network_settings['actor_continuous']).to(self.device)
else:
self.actor = ActorDct(self.obs_spec,
rep_net_params=self._rep_net_params,
output_shape=self.a_dim,
network_settings=network_settings['actor_discrete']).to(self.device)
self.critic = CriticValue(self.obs_spec,
rep_net_params=self._rep_net_params,
network_settings=network_settings['critic']).to(self.device)
self.actor_oplr = OPLR(self.actor, actor_lr, **self._oplr_params)
self.critic_oplr = OPLR(self.critic, critic_lr, **self._oplr_params)
self._trainer_modules.update(actor=self.actor,
critic=self.critic,
actor_oplr=self.actor_oplr,
critic_oplr=self.critic_oplr)
@iton
def select_action(self, obs):
if self.is_continuous:
if self._share_net:
mu, log_std, value = self.net(obs, rnncs=self.rnncs) # [B, A]
self.rnncs_ = self.net.get_rnncs()
else:
mu, log_std = self.actor(obs, rnncs=self.rnncs) # [B, A]
self.rnncs_ = self.actor.get_rnncs()
value = self.critic(obs, rnncs=self.rnncs) # [B, 1]
dist = td.Independent(td.Normal(mu, log_std.exp()), 1)
action = dist.sample().clamp(-1, 1) # [B, A]
log_prob = dist.log_prob(action).unsqueeze(-1) # [B, 1]
else:
if self._share_net:
logits, value = self.net(obs, rnncs=self.rnncs) # [B, A], [B, 1]
self.rnncs_ = self.net.get_rnncs()
else:
logits = self.actor(obs, rnncs=self.rnncs) # [B, A]
self.rnncs_ = self.actor.get_rnncs()
value = self.critic(obs, rnncs=self.rnncs) # [B, 1]
norm_dist = td.Categorical(logits=logits)
action = norm_dist.sample() # [B,]
log_prob = norm_dist.log_prob(action).unsqueeze(-1) # [B, 1]
acts_info = Data(action=action,
value=value,
log_prob=log_prob + th.finfo().eps)
if self.use_rnn:
acts_info.update(rnncs=self.rnncs)
return action, acts_info
@iton
def _get_value(self, obs, rnncs=None):
if self._share_net:
if self.is_continuous:
_, _, value = self.net(obs, rnncs=rnncs) # [B, 1]
else:
_, value = self.net(obs, rnncs=rnncs) # [B, 1]
else:
value = self.critic(obs, rnncs=rnncs) # [B, 1]
return value
def _preprocess_BATCH(self, BATCH): # [T, B, *]
BATCH = super()._preprocess_BATCH(BATCH)
value = self._get_value(BATCH.obs_[-1], rnncs=self.rnncs)
BATCH.discounted_reward = discounted_sum(BATCH.reward,
self.gamma,
BATCH.done,
BATCH.begin_mask,
init_value=value)
td_error = calculate_td_error(BATCH.reward,
self.gamma,
BATCH.done,
value=BATCH.value,
next_value=np.concatenate((BATCH.value[1:], value[np.newaxis, :]), 0))
BATCH.gae_adv = discounted_sum(td_error,
self.lambda_ * self.gamma,
BATCH.done,
BATCH.begin_mask,
init_value=0.,
normalize=True)
return BATCH
def learn(self, BATCH: Data):
BATCH = self._preprocess_BATCH(BATCH) # [T, B, *]
for _ in range(self._epochs):
kls = []
for _BATCH in BATCH.sample(self._chunk_length, self.batch_size, repeat=self._sample_allow_repeat):
_BATCH = self._before_train(_BATCH)
summaries, kl = self._train(_BATCH)
kls.append(kl)
self.summaries.update(summaries)
self._after_train()
if self._use_early_stop and sum(kls) / len(kls) > self._kl_stop:
break
def _train(self, BATCH):
if self._share_net:
summaries, kl = self.train_share(BATCH)
else:
summaries = dict()
actor_summaries, kl = self.train_actor(BATCH)
critic_summaries = self.train_critic(BATCH)
summaries.update(actor_summaries)
summaries.update(critic_summaries)
if self._use_kl_loss:
# ref: https://github.com/joschu/modular_rl/blob/6970cde3da265cf2a98537250fea5e0c0d9a7639/modular_rl/ppo.py#L93
if kl > self._kl_high:
self._kl_coef *= self._kl_alpha
elif kl < self._kl_low:
self._kl_coef /= self._kl_alpha
summaries.update({
'Statistics/kl_coef': self._kl_coef
})
return summaries, kl
@iton
def train_share(self, BATCH):
if self.is_continuous:
# [T, B, A], [T, B, A], [T, B, 1]
mu, log_std, value = self.net(BATCH.obs, begin_mask=BATCH.begin_mask)
dist = td.Independent(td.Normal(mu, log_std.exp()), 1)
new_log_prob = dist.log_prob(BATCH.action).unsqueeze(-1) # [T, B, 1]
entropy = dist.entropy().unsqueeze(-1) # [T, B, 1]
else:
# [T, B, A], [T, B, 1]
logits, value = self.net(BATCH.obs, begin_mask=BATCH.begin_mask)
logp_all = logits.log_softmax(-1) # [T, B, 1]
new_log_prob = (BATCH.action * logp_all).sum(-1, keepdim=True) # [T, B, 1]
entropy = -(logp_all.exp() * logp_all).sum(-1, keepdim=True) # [T, B, 1]
ratio = (new_log_prob - BATCH.log_prob).exp() # [T, B, 1]
surrogate = ratio * BATCH.gae_adv # [T, B, 1]
clipped_surrogate = th.minimum(
surrogate,
ratio.clamp(1.0 - self._epsilon, 1.0 + self._epsilon) * BATCH.gae_adv
) # [T, B, 1]
# ref: https://github.com/thu-ml/tianshou/blob/c97aa4065ee8464bd5897bb86f1f81abd8e2cff9/tianshou/policy/modelfree/ppo.py#L159
if self._use_duel_clip:
clipped_surrogate2 = th.maximum(
clipped_surrogate,
(1.0 + self._duel_epsilon) * BATCH.gae_adv
) # [T, B, 1]
clipped_surrogate = th.where(BATCH.gae_adv < 0, clipped_surrogate2, clipped_surrogate) # [T, B, 1]
actor_loss = -(clipped_surrogate + self._ent_coef * entropy).mean() # 1
# ref: https://github.com/joschu/modular_rl/blob/6970cde3da265cf2a98537250fea5e0c0d9a7639/modular_rl/ppo.py#L40
# ref: https://github.com/hill-a/stable-baselines/blob/b3f414f4f2900403107357a2206f80868af16da3/stable_baselines/ppo2/ppo2.py#L185
if self._kl_reverse: # TODO:
kl = .5 * (new_log_prob - BATCH.log_prob).square().mean() # 1
else:
# a sample estimate for KL-divergence, easy to compute
kl = .5 * (BATCH.log_prob - new_log_prob).square().mean()
if self._use_kl_loss:
kl_loss = self._kl_coef * kl # 1
actor_loss += kl_loss
if self._use_extra_loss:
extra_loss = self._extra_coef * th.maximum(th.zeros_like(kl), kl - self._kl_cutoff).square().mean() # 1
actor_loss += extra_loss
td_error = BATCH.discounted_reward - value # [T, B, 1]
if self._use_vclip:
# ref: https://github.com/llSourcell/OpenAI_Five_vs_Dota2_Explained/blob/c5def7e57aa70785c2394ea2eeb3e5f66ad59a53/train.py#L154
# ref: https://github.com/hill-a/stable-baselines/blob/b3f414f4f2900403107357a2206f80868af16da3/stable_baselines/ppo2/ppo2.py#L172
value_clip = BATCH.value + (value - BATCH.value).clamp(-self._value_epsilon,
self._value_epsilon) # [T, B, 1]
td_error_clip = BATCH.discounted_reward - value_clip # [T, B, 1]
td_square = th.maximum(td_error.square(), td_error_clip.square()) # [T, B, 1]
else:
td_square = td_error.square() # [T, B, 1]
critic_loss = 0.5 * td_square.mean() # 1
loss = actor_loss + self._vf_coef * critic_loss # 1
self.oplr.optimize(loss)
return {
'LOSS/actor_loss': actor_loss,
'LOSS/critic_loss': critic_loss,
'Statistics/kl': kl,
'Statistics/entropy': entropy.mean(),
'LEARNING_RATE/lr': self.oplr.lr
}, kl
@iton
def train_actor(self, BATCH):
if self.is_continuous:
# [T, B, A], [T, B, A]
mu, log_std = self.actor(BATCH.obs, begin_mask=BATCH.begin_mask)
dist = td.Independent(td.Normal(mu, log_std.exp()), 1)
new_log_prob = dist.log_prob(BATCH.action).unsqueeze(-1) # [T, B, 1]
entropy = dist.entropy().unsqueeze(-1) # [T, B, 1]
else:
logits = self.actor(BATCH.obs, begin_mask=BATCH.begin_mask) # [T, B, A]
logp_all = logits.log_softmax(-1) # [T, B, A]
new_log_prob = (BATCH.action * logp_all).sum(-1, keepdim=True) # [T, B, 1]
entropy = -(logp_all.exp() * logp_all).sum(-1, keepdim=True) # [T, B, 1]
ratio = (new_log_prob - BATCH.log_prob).exp() # [T, B, 1]
kl = (BATCH.log_prob - new_log_prob).square().mean() # 1
surrogate = ratio * BATCH.gae_adv # [T, B, 1]
clipped_surrogate = th.minimum(
surrogate,
th.where(BATCH.gae_adv > 0, (1 + self._epsilon) *
BATCH.gae_adv, (1 - self._epsilon) * BATCH.gae_adv)
) # [T, B, 1]
if self._use_duel_clip:
clipped_surrogate = th.maximum(
clipped_surrogate,
(1.0 + self._duel_epsilon) * BATCH.gae_adv
) # [T, B, 1]
actor_loss = -(clipped_surrogate + self._ent_coef * entropy).mean() # 1
if self._use_kl_loss:
kl_loss = self._kl_coef * kl # 1
actor_loss += kl_loss
if self._use_extra_loss:
extra_loss = self._extra_coef * th.maximum(th.zeros_like(kl), kl - self._kl_cutoff).square().mean() # 1
actor_loss += extra_loss
self.actor_oplr.optimize(actor_loss)
return {
'LOSS/actor_loss': actor_loss,
'Statistics/kl': kl,
'Statistics/entropy': entropy.mean(),
'LEARNING_RATE/actor_lr': self.actor_oplr.lr
}, kl
@iton
def train_critic(self, BATCH):
value = self.critic(BATCH.obs, begin_mask=BATCH.begin_mask) # [T, B, 1]
td_error = BATCH.discounted_reward - value # [T, B, 1]
if self._use_vclip:
value_clip = BATCH.value + (value - BATCH.value).clamp(-self._value_epsilon,
self._value_epsilon) # [T, B, 1]
td_error_clip = BATCH.discounted_reward - value_clip # [T, B, 1]
td_square = th.maximum(td_error.square(), td_error_clip.square()) # [T, B, 1]
else:
td_square = td_error.square() # [T, B, 1]
critic_loss = 0.5 * td_square.mean() # 1
self.critic_oplr.optimize(critic_loss)
return {
'LOSS/critic_loss': critic_loss,
'LEARNING_RATE/critic_lr': self.critic_oplr.lr
} | 0.837487 | 0.320462 |
import numpy as np
class DefaultFunctions:
def __init__(self):
self.T = None
self.N = None
self.K_cus = None
self.K_pol = None
self.coeff_rr_u = None
self.coeff_rr_uu = None
self.coeff_rr_xu = None
self.coeff_rr_xx = None
self.coeff_rr_c = None
self.coeff_rr_x = None
self.coeff_sigma_x = None
self.coeff_sigma_u = None
self.coeff_sigma_c = None
self.dt = None
self.coeff_mu_u = None
self.coeff_mu_x = None
self.coeff_mu_c = None
self.M = None
self.measure_mu = None
self.K = None
def init_params(self):
self.dt = self.T / self.N
self.K = self.K_pol + self.K_cus
def generate_training_points(self, x):
return self.measure_mu[1] * np.random.randn(self.M) + self.measure_mu[0]
def transition_function_deterministic(self, n, x, u):
return (self.coeff_mu_c + self.coeff_mu_x * x + self.coeff_mu_u * u) * self.dt
def transition_function_stochastic(self, n, x, u):
return np.sqrt(self.dt) * (self.coeff_sigma_c + self.coeff_sigma_x * x + self.coeff_sigma_u * u)
def transition_function(self, n, x, u):
return x + self.transition_function_deterministic(n, x, u) + \
self.transition_function_stochastic(n, x, u) + np.random.randn(self.M)
def running_reward(self, x, u):
return self.coeff_rr_c + self.coeff_rr_x * x + self.coeff_rr_u * u + \
self.coeff_rr_xx * x ** 2 + self.coeff_rr_uu * u ** 2 + \
self.coeff_rr_xu * x * u + u * 0 + x * 0
def first_derivative(self, n, x, u):
return self.coeff_rr_u + 2 * self.coeff_rr_uu * u + self.coeff_rr_xu * x + u * 0
def second_derivative(self, n, x, u):
return 2 * self.coeff_rr_uu + u * 0 + x * 0
def transition_function_deterministic_du(self, n, x, u):
return self.coeff_mu_u * self.dt + 0 * u + 0 * x
def transition_function_stochastic_du(self, n, x, u):
return np.sqrt(self.dt) * self.coeff_sigma_u + 0 * u + 0 * x
def transition_function_deterministic_duu(self, n, x, u):
return 0 * u + 0 * x
def transition_function_stochastic_duu(self, n, x, u):
return 0 * u + 0 * x | objects/misc/default_functions.py | import numpy as np
class DefaultFunctions:
def __init__(self):
self.T = None
self.N = None
self.K_cus = None
self.K_pol = None
self.coeff_rr_u = None
self.coeff_rr_uu = None
self.coeff_rr_xu = None
self.coeff_rr_xx = None
self.coeff_rr_c = None
self.coeff_rr_x = None
self.coeff_sigma_x = None
self.coeff_sigma_u = None
self.coeff_sigma_c = None
self.dt = None
self.coeff_mu_u = None
self.coeff_mu_x = None
self.coeff_mu_c = None
self.M = None
self.measure_mu = None
self.K = None
def init_params(self):
self.dt = self.T / self.N
self.K = self.K_pol + self.K_cus
def generate_training_points(self, x):
return self.measure_mu[1] * np.random.randn(self.M) + self.measure_mu[0]
def transition_function_deterministic(self, n, x, u):
return (self.coeff_mu_c + self.coeff_mu_x * x + self.coeff_mu_u * u) * self.dt
def transition_function_stochastic(self, n, x, u):
return np.sqrt(self.dt) * (self.coeff_sigma_c + self.coeff_sigma_x * x + self.coeff_sigma_u * u)
def transition_function(self, n, x, u):
return x + self.transition_function_deterministic(n, x, u) + \
self.transition_function_stochastic(n, x, u) + np.random.randn(self.M)
def running_reward(self, x, u):
return self.coeff_rr_c + self.coeff_rr_x * x + self.coeff_rr_u * u + \
self.coeff_rr_xx * x ** 2 + self.coeff_rr_uu * u ** 2 + \
self.coeff_rr_xu * x * u + u * 0 + x * 0
def first_derivative(self, n, x, u):
return self.coeff_rr_u + 2 * self.coeff_rr_uu * u + self.coeff_rr_xu * x + u * 0
def second_derivative(self, n, x, u):
return 2 * self.coeff_rr_uu + u * 0 + x * 0
def transition_function_deterministic_du(self, n, x, u):
return self.coeff_mu_u * self.dt + 0 * u + 0 * x
def transition_function_stochastic_du(self, n, x, u):
return np.sqrt(self.dt) * self.coeff_sigma_u + 0 * u + 0 * x
def transition_function_deterministic_duu(self, n, x, u):
return 0 * u + 0 * x
def transition_function_stochastic_duu(self, n, x, u):
return 0 * u + 0 * x | 0.759582 | 0.234472 |
from .Graphics.objects.object2d import Connection, Pointer
from collections import deque
def create_bin_adj_list(Nodes, Edges, weighted=False):
'''
Binary Adj. List Format:
Weighted:
[(From, To, Weight),
...,
(From, None, None)]
Unweighted:
[(From, To),
...,
(From, None)]
Any pairs with a None 'To', are not
connected.
Time Complexity:
O(E+V)
, where E is the # of Edges,
and V is the number of vertecies
'From' and 'To' are Nodes represented
by their index, respectively to the
Nodes.values()
return list size len(Nodes)
'''
nodes = list(Nodes.values())
indecies = set()
_type = None
adj_list = []
for edge in Edges.values():
if _type is None:
_type = edge.__class__
if not isinstance(edge, _type):
raise Exception("All edges must be of the same class/type.")
index1, index2 = nodes.index(edge.obj1), nodes.index(edge.obj2)
indecies.add(index1)
adj_list.append((index1, index2)+(((0,) if edge.weight is None else (edge.weight,)) if weighted else ()))
if _type is Connection:
adj_list.append((index2, index1)+(((0,) if edge.weight is None else (edge.weight,)) if weighted else ()))
[adj_list.append((n, None)+((None,) if weighted else ())) for n in range(len(nodes)) if not n in indecies]
print('Binary Adj. List:', adj_list)
return adj_list
def create_adj_dict(Nodes, Edges, weighted=False):
'''
Adj. List Format:
Weighted: {From: [(To, Weight), ...],
...,
From: []}
Unweighted: {From: [To, ...],
...,
From: []}
Any pairs with a [] Value,
are not connected.
Time Complexity:
O(2*(E+V))
, where E is the # of Edges,
and V is the number of
vertecies.
return dict size len(Nodes)
'''
bin_adj_list = create_bin_adj_list(Nodes, Edges, weighted=weighted)
weighted = len(bin_adj_list[0]) == 3
adj_dict = {}
for pair in bin_adj_list:
from_, to, weight = pair+(() if weighted else (None,))
if not from_ in adj_dict:
adj_dict.update({from_: []})
if to is not None:
if weighted:
adj_dict[from_].append((to, weight))
else:
adj_dict[from_].append(to)
print('Adj. dictionary:', adj_dict)
return adj_dict
def DFS(window, Nodes, Edges, weighted=False, start=None, visited_fn=None): #Depth First Search Island Finder
assert start is None or (isinstance(start, int) and start >= 0 and start < len(Nodes)), "Starting index must be None or in range: [0, '%i']." % len(Nodes)-1
adj_dict = create_adj_dict(Nodes, Edges, weighted=weighted)
nodes = len(adj_dict)
visited = [False for _ in range(nodes)]
islands = []
def dfs(n=0):
if visited[n]:
return
visited[n] = True
islands[-1] += (n,)
if visited_fn is not None:
visited_fn(window, n, adj_dict[n], visited, weighted)
[dfs(n=node[0] if weighted else node) for node in adj_dict[n]]
if not start is None:
islands.append(())
dfs(n=start)
for n in range(nodes):
if not visited[n]:
islands.append(())
dfs(n=n)
return islands
def BFS(window, Nodes, Edges, weighted=False, start=None, end=None, visited_fn=None): #Breath First Search Path Finder
assert start is None or (isinstance(start, int) and start >= 0 and start < len(Nodes)), "Starting index must be None or in range: [0, '%i']." % len(Nodes)-1
adj_dict = create_adj_dict(Nodes, Edges, weighted=weighted)
nodes = len(adj_dict)
queue = deque()
visited = [False for _ in range(nodes)]
s = 0 if start is None else start
queue.append(s)
visited[s] = True
prev = [None for _ in range(nodes)]
while not len(queue) == 0:
node = queue.pop()
conns = adj_dict[node]
if visited_fn is not None and node == s:
visited_fn(window, node, adj_dict[node], visited, weighted)
for props in conns:
if weighted:
n, weight = props
else:
n = props
weight = None
if not visited[n]:
queue.append(n)
visited[n] = True
prev[n] = node
if visited_fn is not None:
visited_fn(window, n, adj_dict[n], visited, weighted)
if end is None:
return []
path = [end]
while path[-1] != s and path[-1] is not None:
path.append(prev[path[-1]])
path.reverse()
return path if path[0] == s else [] | VisualGraphTheory/algorithms.py | from .Graphics.objects.object2d import Connection, Pointer
from collections import deque
def create_bin_adj_list(Nodes, Edges, weighted=False):
'''
Binary Adj. List Format:
Weighted:
[(From, To, Weight),
...,
(From, None, None)]
Unweighted:
[(From, To),
...,
(From, None)]
Any pairs with a None 'To', are not
connected.
Time Complexity:
O(E+V)
, where E is the # of Edges,
and V is the number of vertecies
'From' and 'To' are Nodes represented
by their index, respectively to the
Nodes.values()
return list size len(Nodes)
'''
nodes = list(Nodes.values())
indecies = set()
_type = None
adj_list = []
for edge in Edges.values():
if _type is None:
_type = edge.__class__
if not isinstance(edge, _type):
raise Exception("All edges must be of the same class/type.")
index1, index2 = nodes.index(edge.obj1), nodes.index(edge.obj2)
indecies.add(index1)
adj_list.append((index1, index2)+(((0,) if edge.weight is None else (edge.weight,)) if weighted else ()))
if _type is Connection:
adj_list.append((index2, index1)+(((0,) if edge.weight is None else (edge.weight,)) if weighted else ()))
[adj_list.append((n, None)+((None,) if weighted else ())) for n in range(len(nodes)) if not n in indecies]
print('Binary Adj. List:', adj_list)
return adj_list
def create_adj_dict(Nodes, Edges, weighted=False):
'''
Adj. List Format:
Weighted: {From: [(To, Weight), ...],
...,
From: []}
Unweighted: {From: [To, ...],
...,
From: []}
Any pairs with a [] Value,
are not connected.
Time Complexity:
O(2*(E+V))
, where E is the # of Edges,
and V is the number of
vertecies.
return dict size len(Nodes)
'''
bin_adj_list = create_bin_adj_list(Nodes, Edges, weighted=weighted)
weighted = len(bin_adj_list[0]) == 3
adj_dict = {}
for pair in bin_adj_list:
from_, to, weight = pair+(() if weighted else (None,))
if not from_ in adj_dict:
adj_dict.update({from_: []})
if to is not None:
if weighted:
adj_dict[from_].append((to, weight))
else:
adj_dict[from_].append(to)
print('Adj. dictionary:', adj_dict)
return adj_dict
def DFS(window, Nodes, Edges, weighted=False, start=None, visited_fn=None): #Depth First Search Island Finder
assert start is None or (isinstance(start, int) and start >= 0 and start < len(Nodes)), "Starting index must be None or in range: [0, '%i']." % len(Nodes)-1
adj_dict = create_adj_dict(Nodes, Edges, weighted=weighted)
nodes = len(adj_dict)
visited = [False for _ in range(nodes)]
islands = []
def dfs(n=0):
if visited[n]:
return
visited[n] = True
islands[-1] += (n,)
if visited_fn is not None:
visited_fn(window, n, adj_dict[n], visited, weighted)
[dfs(n=node[0] if weighted else node) for node in adj_dict[n]]
if not start is None:
islands.append(())
dfs(n=start)
for n in range(nodes):
if not visited[n]:
islands.append(())
dfs(n=n)
return islands
def BFS(window, Nodes, Edges, weighted=False, start=None, end=None, visited_fn=None): #Breath First Search Path Finder
assert start is None or (isinstance(start, int) and start >= 0 and start < len(Nodes)), "Starting index must be None or in range: [0, '%i']." % len(Nodes)-1
adj_dict = create_adj_dict(Nodes, Edges, weighted=weighted)
nodes = len(adj_dict)
queue = deque()
visited = [False for _ in range(nodes)]
s = 0 if start is None else start
queue.append(s)
visited[s] = True
prev = [None for _ in range(nodes)]
while not len(queue) == 0:
node = queue.pop()
conns = adj_dict[node]
if visited_fn is not None and node == s:
visited_fn(window, node, adj_dict[node], visited, weighted)
for props in conns:
if weighted:
n, weight = props
else:
n = props
weight = None
if not visited[n]:
queue.append(n)
visited[n] = True
prev[n] = node
if visited_fn is not None:
visited_fn(window, n, adj_dict[n], visited, weighted)
if end is None:
return []
path = [end]
while path[-1] != s and path[-1] is not None:
path.append(prev[path[-1]])
path.reverse()
return path if path[0] == s else [] | 0.695752 | 0.414306 |
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
from hypothesis import given
from numpy.testing import assert_allclose
from mygrad import Tensor
from mygrad.nnet.activations import logsoftmax, softmax
from tests.utils.checkers import is_float_arr
from tests.custom_strategies import valid_axes
from tests.wrappers.uber import backprop_test_factory, fwdprop_test_factory
log_largest = np.log(np.finfo(np.float64).max)
@given(
arr=hnp.arrays(
shape=hnp.array_shapes(min_dims=0, min_side=0, max_side=0),
dtype=hnp.floating_dtypes() | hnp.integer_dtypes(),
elements=dict(min_value=-10, max_value=10),
),
data=st.data(),
)
def test_softmax_on_empty_arrays(arr: np.ndarray, data: st.DataObject):
axes = data.draw(valid_axes(arr.ndim))
out = softmax(arr, axis=axes)
expected_dtype = arr.dtype if is_float_arr(arr) else np.dtype(np.float64)
assert out.shape == arr.shape
assert out.dtype == expected_dtype
@given(
hnp.arrays(
shape=hnp.array_shapes(min_dims=0, min_side=0),
dtype=hnp.integer_dtypes(),
elements=dict(min_value=-10, max_value=10),
)
)
def test_softmax_on_ints(arr: np.ndarray):
actual = softmax(arr)
desired = softmax(arr.astype(np.float64))
assert desired.dtype == actual.dtype
assert_allclose(desired, actual, atol=1e-3, rtol=1e-3)
@given(
x=hnp.arrays(
shape=hnp.array_shapes(min_dims=0),
dtype=np.float64,
elements=st.floats(-log_largest, log_largest),
),
data=st.data(),
)
def test_softmax_numerical_stability(x: np.ndarray, data: st.DataObject):
axis = data.draw(valid_axes(x.ndim), label="axis")
out = softmax(x, axis=axis).data
assert np.all(np.logical_and(0 <= out, out <= 1))
assert_allclose(out.sum(axis=axis), 1.0)
@given(
x=hnp.arrays(
shape=hnp.array_shapes(min_dims=0),
dtype=np.float64,
elements=st.floats(-log_largest, log_largest),
),
data=st.data(),
)
def test_log_softmax_numerical_stability(x: np.ndarray, data: st.DataObject):
axis = data.draw(valid_axes(x.ndim), label="axis")
out = np.exp(logsoftmax(x, axis=axis).data)
assert np.all(np.logical_and(0 <= out, out <= 1)), out
assert_allclose(out.sum(axis=axis), 1.0)
def numpy_softmax(x, axis):
x = np.asarray(x)
x = np.exp(x - x.max(axis, keepdims=True))
return x / x.sum(axis, keepdims=True)
def numpy_logsoftmax(x, axis):
return np.log(numpy_softmax(x, axis))
@fwdprop_test_factory(
mygrad_func=softmax,
true_func=numpy_softmax,
num_arrays=1,
kwargs=dict(axis=lambda arrs: valid_axes(arrs.ndim)),
)
def test_softmax_fwd():
pass
@backprop_test_factory(
mygrad_func=softmax,
true_func=numpy_softmax,
num_arrays=1,
kwargs=dict(axis=lambda arrs: valid_axes(arrs.ndim)),
vary_each_element=True,
)
def test_softmax_bkwd():
pass
@fwdprop_test_factory(
mygrad_func=logsoftmax,
true_func=numpy_logsoftmax,
num_arrays=1,
kwargs=dict(axis=lambda arrs: valid_axes(arrs.ndim)),
index_to_bnds={0: (-10, 10)},
)
def test_logsoftmax_fwd():
pass
@backprop_test_factory(
mygrad_func=logsoftmax,
true_func=numpy_logsoftmax,
num_arrays=1,
kwargs=dict(axis=lambda arrs: valid_axes(arrs.ndim)),
vary_each_element=True,
index_to_bnds={0: (-10, 10)},
)
def test_logsoftmax_bkwd():
pass
def test_static_softmax1d():
# Verified against theano.tensor.softmax
skew = np.array([0.87566484, 0.53596079, 0.85693981, 0.09526036])
x = np.array([0.0, 1.0, 2.0, 3.0])
x = Tensor(x)
f = (softmax(x, constant=False) * skew).sum()
out = np.array(0.33911235096116465)
assert_allclose(actual=f.data, desired=out)
f.backward()
dx = np.array([0.01720112, 0.01715422, 0.12266443, -0.15701977])
assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5)
def test_static_softmax2d():
# Verified against theano.tensor.softmax
skew = np.array(
[
[0.87566484, 0.53596079, 0.85693981, 0.09526036],
[0.32024455, 0.81532148, 0.2480434, 0.85119342],
[0.57943085, 0.33958252, 0.95864464, 0.22881712],
]
)
x = np.array([[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0], [8.0, 9.0, 10.0, 11.0]])
x = Tensor(x)
f = (softmax(x, constant=False) * skew).sum()
out = np.array(1.449875865467131)
assert_allclose(actual=f.data, desired=out)
f.backward()
dx = np.array(
[
[0.01720112, 0.01715422, 0.12266443, -0.15701977],
[-0.01179518, 0.01108053, -0.10425844, 0.10497309],
[0.00502799, -0.00723393, 0.12698131, -0.12477536],
]
)
assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5)
def test_static_logsoftmax1d():
# Verified against theano.tensor.softmax
skew = np.array([0.87566484, 0.53596079, 0.85693981, 0.09526036])
x = np.array([0.0, 1.0, 2.0, 3.0])
x = Tensor(x)
f = (logsoftmax(x, constant=False) * skew).sum()
out = np.array(-5.596387676353177)
assert_allclose(actual=f.data, desired=out)
f.backward()
dx = np.array([0.79988389, 0.3299668, 0.29699009, -1.42684078])
assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5)
def test_static_logsoftmax2d():
# Verified against theano.tensor.softmax
skew = np.array(
[
[0.87566484, 0.53596079, 0.85693981, 0.09526036],
[0.32024455, 0.81532148, 0.2480434, 0.85119342],
[0.57943085, 0.33958252, 0.95864464, 0.22881712],
]
)
x = np.array([[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0], [8.0, 9.0, 10.0, 11.0]])
x = Tensor(x)
f = (logsoftmax(x, constant=False) * skew).sum()
out = np.array(-13.722895761739732)
assert_allclose(actual=f.data, desired=out)
f.backward()
dx = np.array(
[
[0.79988389, 0.3299668, 0.29699009, -1.42684078],
[0.24859989, 0.62057111, -0.281343, -0.587828],
[0.5119002, 0.15601518, 0.45965687, -1.12757225],
]
)
assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5) | tests/nnet/activations/test_softmax.py | import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
from hypothesis import given
from numpy.testing import assert_allclose
from mygrad import Tensor
from mygrad.nnet.activations import logsoftmax, softmax
from tests.utils.checkers import is_float_arr
from tests.custom_strategies import valid_axes
from tests.wrappers.uber import backprop_test_factory, fwdprop_test_factory
log_largest = np.log(np.finfo(np.float64).max)
@given(
arr=hnp.arrays(
shape=hnp.array_shapes(min_dims=0, min_side=0, max_side=0),
dtype=hnp.floating_dtypes() | hnp.integer_dtypes(),
elements=dict(min_value=-10, max_value=10),
),
data=st.data(),
)
def test_softmax_on_empty_arrays(arr: np.ndarray, data: st.DataObject):
axes = data.draw(valid_axes(arr.ndim))
out = softmax(arr, axis=axes)
expected_dtype = arr.dtype if is_float_arr(arr) else np.dtype(np.float64)
assert out.shape == arr.shape
assert out.dtype == expected_dtype
@given(
hnp.arrays(
shape=hnp.array_shapes(min_dims=0, min_side=0),
dtype=hnp.integer_dtypes(),
elements=dict(min_value=-10, max_value=10),
)
)
def test_softmax_on_ints(arr: np.ndarray):
actual = softmax(arr)
desired = softmax(arr.astype(np.float64))
assert desired.dtype == actual.dtype
assert_allclose(desired, actual, atol=1e-3, rtol=1e-3)
@given(
x=hnp.arrays(
shape=hnp.array_shapes(min_dims=0),
dtype=np.float64,
elements=st.floats(-log_largest, log_largest),
),
data=st.data(),
)
def test_softmax_numerical_stability(x: np.ndarray, data: st.DataObject):
axis = data.draw(valid_axes(x.ndim), label="axis")
out = softmax(x, axis=axis).data
assert np.all(np.logical_and(0 <= out, out <= 1))
assert_allclose(out.sum(axis=axis), 1.0)
@given(
x=hnp.arrays(
shape=hnp.array_shapes(min_dims=0),
dtype=np.float64,
elements=st.floats(-log_largest, log_largest),
),
data=st.data(),
)
def test_log_softmax_numerical_stability(x: np.ndarray, data: st.DataObject):
axis = data.draw(valid_axes(x.ndim), label="axis")
out = np.exp(logsoftmax(x, axis=axis).data)
assert np.all(np.logical_and(0 <= out, out <= 1)), out
assert_allclose(out.sum(axis=axis), 1.0)
def numpy_softmax(x, axis):
x = np.asarray(x)
x = np.exp(x - x.max(axis, keepdims=True))
return x / x.sum(axis, keepdims=True)
def numpy_logsoftmax(x, axis):
return np.log(numpy_softmax(x, axis))
@fwdprop_test_factory(
mygrad_func=softmax,
true_func=numpy_softmax,
num_arrays=1,
kwargs=dict(axis=lambda arrs: valid_axes(arrs.ndim)),
)
def test_softmax_fwd():
pass
@backprop_test_factory(
mygrad_func=softmax,
true_func=numpy_softmax,
num_arrays=1,
kwargs=dict(axis=lambda arrs: valid_axes(arrs.ndim)),
vary_each_element=True,
)
def test_softmax_bkwd():
pass
@fwdprop_test_factory(
mygrad_func=logsoftmax,
true_func=numpy_logsoftmax,
num_arrays=1,
kwargs=dict(axis=lambda arrs: valid_axes(arrs.ndim)),
index_to_bnds={0: (-10, 10)},
)
def test_logsoftmax_fwd():
pass
@backprop_test_factory(
mygrad_func=logsoftmax,
true_func=numpy_logsoftmax,
num_arrays=1,
kwargs=dict(axis=lambda arrs: valid_axes(arrs.ndim)),
vary_each_element=True,
index_to_bnds={0: (-10, 10)},
)
def test_logsoftmax_bkwd():
pass
def test_static_softmax1d():
# Verified against theano.tensor.softmax
skew = np.array([0.87566484, 0.53596079, 0.85693981, 0.09526036])
x = np.array([0.0, 1.0, 2.0, 3.0])
x = Tensor(x)
f = (softmax(x, constant=False) * skew).sum()
out = np.array(0.33911235096116465)
assert_allclose(actual=f.data, desired=out)
f.backward()
dx = np.array([0.01720112, 0.01715422, 0.12266443, -0.15701977])
assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5)
def test_static_softmax2d():
# Verified against theano.tensor.softmax
skew = np.array(
[
[0.87566484, 0.53596079, 0.85693981, 0.09526036],
[0.32024455, 0.81532148, 0.2480434, 0.85119342],
[0.57943085, 0.33958252, 0.95864464, 0.22881712],
]
)
x = np.array([[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0], [8.0, 9.0, 10.0, 11.0]])
x = Tensor(x)
f = (softmax(x, constant=False) * skew).sum()
out = np.array(1.449875865467131)
assert_allclose(actual=f.data, desired=out)
f.backward()
dx = np.array(
[
[0.01720112, 0.01715422, 0.12266443, -0.15701977],
[-0.01179518, 0.01108053, -0.10425844, 0.10497309],
[0.00502799, -0.00723393, 0.12698131, -0.12477536],
]
)
assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5)
def test_static_logsoftmax1d():
# Verified against theano.tensor.softmax
skew = np.array([0.87566484, 0.53596079, 0.85693981, 0.09526036])
x = np.array([0.0, 1.0, 2.0, 3.0])
x = Tensor(x)
f = (logsoftmax(x, constant=False) * skew).sum()
out = np.array(-5.596387676353177)
assert_allclose(actual=f.data, desired=out)
f.backward()
dx = np.array([0.79988389, 0.3299668, 0.29699009, -1.42684078])
assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5)
def test_static_logsoftmax2d():
# Verified against theano.tensor.softmax
skew = np.array(
[
[0.87566484, 0.53596079, 0.85693981, 0.09526036],
[0.32024455, 0.81532148, 0.2480434, 0.85119342],
[0.57943085, 0.33958252, 0.95864464, 0.22881712],
]
)
x = np.array([[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0], [8.0, 9.0, 10.0, 11.0]])
x = Tensor(x)
f = (logsoftmax(x, constant=False) * skew).sum()
out = np.array(-13.722895761739732)
assert_allclose(actual=f.data, desired=out)
f.backward()
dx = np.array(
[
[0.79988389, 0.3299668, 0.29699009, -1.42684078],
[0.24859989, 0.62057111, -0.281343, -0.587828],
[0.5119002, 0.15601518, 0.45965687, -1.12757225],
]
)
assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5) | 0.705278 | 0.548553 |
import idfy_rest_client.models.merchant_error
class SignResponse(object):
"""Implementation of the 'SignResponse' model.
TODO: type model description here.
Attributes:
signed_data (string): base 64 encoded signed data
audit_log_reference (uuid|string): Reference Id to audit log
signing_format (SigningFormat): Signing format
error (MerchantError): Error message
sign_certificate_base_64_string (string): Signed with certificate
transaction_id (uuid|string): Id to look up the transaction at a later
time
"""
# Create a mapping from Model property names to API property names
_names = {
"signed_data":'signedData',
"audit_log_reference":'auditLogReference',
"signing_format":'signingFormat',
"error":'error',
"sign_certificate_base_64_string":'signCertificateBase64String',
"transaction_id":'transactionId'
}
def __init__(self,
signed_data=None,
audit_log_reference=None,
signing_format=None,
error=None,
sign_certificate_base_64_string=None,
transaction_id=None,
additional_properties = {}):
"""Constructor for the SignResponse class"""
# Initialize members of the class
self.signed_data = signed_data
self.audit_log_reference = audit_log_reference
self.signing_format = signing_format
self.error = error
self.sign_certificate_base_64_string = sign_certificate_base_64_string
self.transaction_id = transaction_id
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
signed_data = dictionary.get('signedData')
audit_log_reference = dictionary.get('auditLogReference')
signing_format = dictionary.get('signingFormat')
error = idfy_rest_client.models.merchant_error.MerchantError.from_dictionary(dictionary.get('error')) if dictionary.get('error') else None
sign_certificate_base_64_string = dictionary.get('signCertificateBase64String')
transaction_id = dictionary.get('transactionId')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(signed_data,
audit_log_reference,
signing_format,
error,
sign_certificate_base_64_string,
transaction_id,
dictionary) | idfy_rest_client/models/sign_response.py | import idfy_rest_client.models.merchant_error
class SignResponse(object):
"""Implementation of the 'SignResponse' model.
TODO: type model description here.
Attributes:
signed_data (string): base 64 encoded signed data
audit_log_reference (uuid|string): Reference Id to audit log
signing_format (SigningFormat): Signing format
error (MerchantError): Error message
sign_certificate_base_64_string (string): Signed with certificate
transaction_id (uuid|string): Id to look up the transaction at a later
time
"""
# Create a mapping from Model property names to API property names
_names = {
"signed_data":'signedData',
"audit_log_reference":'auditLogReference',
"signing_format":'signingFormat',
"error":'error',
"sign_certificate_base_64_string":'signCertificateBase64String',
"transaction_id":'transactionId'
}
def __init__(self,
signed_data=None,
audit_log_reference=None,
signing_format=None,
error=None,
sign_certificate_base_64_string=None,
transaction_id=None,
additional_properties = {}):
"""Constructor for the SignResponse class"""
# Initialize members of the class
self.signed_data = signed_data
self.audit_log_reference = audit_log_reference
self.signing_format = signing_format
self.error = error
self.sign_certificate_base_64_string = sign_certificate_base_64_string
self.transaction_id = transaction_id
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
signed_data = dictionary.get('signedData')
audit_log_reference = dictionary.get('auditLogReference')
signing_format = dictionary.get('signingFormat')
error = idfy_rest_client.models.merchant_error.MerchantError.from_dictionary(dictionary.get('error')) if dictionary.get('error') else None
sign_certificate_base_64_string = dictionary.get('signCertificateBase64String')
transaction_id = dictionary.get('transactionId')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(signed_data,
audit_log_reference,
signing_format,
error,
sign_certificate_base_64_string,
transaction_id,
dictionary) | 0.580233 | 0.248865 |
from datetime import datetime
from flask import (
Blueprint,
current_app,
flash,
jsonify,
redirect,
render_template,
request,
url_for
)
from flask_login import (
current_user,
login_user,
login_required,
logout_user
)
from .models import (
Campaign,
Character,
Post,
User,
Roll
)
from .shared import (
csrf,
db
)
from .util import (
create_password_reset_key,
clear_password_reset_keys,
get_password_reset_key,
is_safe_url,
is_valid_email,
pagination_pages,
roll_dice,
send_email as _send_email
)
blueprint = Blueprint('base', __name__, template_folder='templates')
@blueprint.route('/')
def index():
return render_template('index.jinja2')
@blueprint.route('/campaigns')
def campaigns():
campaigns = Campaign.query.all()
return render_template('campaigns.jinja2', campaigns=campaigns)
@blueprint.route('/campaigns/create', methods=['GET', 'POST'])
@login_required
def campaign_create():
if request.method == 'POST':
# TODO check if name is unique
new_campaign = Campaign(
creator_user_id=current_user.id,
name=request.form['name'],
description=request.form['description'],
date_created=datetime.utcnow()
)
new_dm = Character(
user_id=current_user.id,
name='DM',
tag='Dungeon Master',
campaign_approved=True,
)
new_campaign.dm_character = new_dm
db.session.add(new_campaign)
db.session.add(new_dm)
db.session.commit()
new_dm.campaign_id = new_campaign.id
db.session.commit()
current_app.logger.info(f'User {current_user.id} created new campaign with name "{new_campaign.name}"')
flash('New campaign created')
return redirect(url_for('.campaigns'))
campaigns = Campaign.query.all()
return render_template('campaigns_create.jinja2', campaigns=campaigns)
@blueprint.route('/campaign/<int:campaign_id>/posts')
@blueprint.route('/campaign/<int:campaign_id>/posts/<int:page>')
def campaign_posts(campaign_id, page=1):
campaign = Campaign.query.get(campaign_id)
if not campaign:
flash('Could not find campaign with that id', 'error')
return redirect(url_for('.campaigns'))
query = Post.query.filter_by(campaign_id=campaign_id)
if current_user.is_authenticated and current_user.posts_newest_first:
query = query.order_by(Post.id.desc())
pagination = query.paginate(page=page, per_page=current_user.posts_per_page if current_user.is_authenticated else 20)
return render_template(
'campaign_posts.jinja2',
campaign=campaign,
posts=pagination.items,
pages=pagination.pages,
page=page,
pagination_pages=pagination_pages
)
@blueprint.route('/campaign/<int:campaign_id>/info')
def campaign_info(campaign_id):
campaign = Campaign.query.get(campaign_id)
if not campaign:
flash('Could not find campaign with that id', 'error')
return redirect(url_for('.campaigns'))
return render_template('campaign_info.jinja2', campaign=campaign)
@blueprint.route('/campaign/<int:campaign_id>/new_post', methods=['POST'])
@login_required
def campaign_new_post(campaign_id):
campaign = Campaign.query.get(campaign_id)
if not campaign:
flash('Could not find campaign with that id', 'error')
return redirect(url_for('.campaigns'))
character = current_user.get_character_in_campaign(campaign)
if not character:
flash('You are not a member of that campaign', 'error')
return redirect(url_for('.campaign_posts', campaign_id=campaign_id))
post = Post(
character_id=character.id,
campaign_id=campaign.id,
date=datetime.utcnow(),
tag=character.tag,
content=request.form['content']
)
db.session.add(post)
pending_rolls = Roll.query.filter_by(character_id=character.id, post_id=None).all()
for roll in pending_rolls:
roll.post = post
db.session.commit()
current_app.logger.info(f'User {current_user.id} made new post in campaign {campaign.id}')
flash('New post added')
is_dm_post = character.campaign.dm_character_id == character.id
for other_character in campaign.characters:
if other_character.id == character.id:
continue
link = url_for('.campaign_posts', campaign_id=campaign.id, _external=True)
if is_dm_post and other_character.user.email_for_dm_post:
current_app.logger.info(f'Send DM post notify email to {other_character.user.id} for campaign {campaign.id}')
send_email(
[other_character.user.email],
f'New DM post in "{campaign.name}"',
f'The DM has made a new post in the campaign.\n\nCampaign link: {link}'
)
elif not is_dm_post and other_character.user.email_for_any_post:
current_app.logger.info(f'Send generic post notify email to {other_character.user.id} for campaign {campaign.id}')
send_email(
[other_character.user.email],
f'New post in "{campaign.name}"',
f'{character.name} has made a new post in the campaign.\n\nCampaign link: {link}'
)
return redirect(url_for('.campaign_posts', campaign_id=campaign_id))
@blueprint.route('/campaign/<int:campaign_id>/roll', methods=['GET', 'POST'])
@login_required
@csrf.exempt
def campaign_rolls(campaign_id):
campaign = Campaign.query.get(campaign_id)
if not campaign:
return 'Could not find campaign with that id', 404
character = current_user.get_character_in_campaign(campaign)
if not character:
return 'You are not a member of that campaign', 403
if request.method == 'POST':
roll_str = request.json.get('roll')
if not roll_str:
return '', 400
roll = roll_dice(character, roll_str)
db.session.add(roll)
current_app.logger.info(f'User {current_user.id} as character {character.id} rolled str "{roll_str}"')
db.session.commit()
rolls = Roll.query.filter_by(character_id=current_user.get_character_in_campaign(campaign).id, post_id=None).all()
return jsonify([r.to_dict() for r in rolls])
@blueprint.route('/post/<int:post_id>/edit', methods=['GET', 'POST'])
@login_required
def campaign_edit_post(post_id):
post = Post.query.get(post_id)
if not post:
flash('Could not find a post with that id', 'error')
return redirect(url_for('.campaigns'))
if not post.character.user_id == current_user.id:
flash('That isn\'t your post', 'error')
return redirect(url_for('.campaign_posts', campaign_id=post.campaign_id))
if not post.can_be_edited:
flash('This post can no longer be edited', 'error')
return redirect(url_for('.campaign_posts', campaign_id=post.campaign_id))
if request.method == 'POST':
content = request.form['content']
post.content = content
db.session.commit()
flash('Content saved')
current_app.logger.info(f'User {current_user.id} edited post {post.id}')
return redirect(url_for('.campaign_posts', campaign_id=post.campaign_id))
return render_template('campaign_edit_post.jinja2', post=post)
@blueprint.route('/campaign/<int:campaign_id>/join', methods=['GET', 'POST'])
@login_required
def campaign_join(campaign_id):
campaign = Campaign.query.get(campaign_id)
if not current_user.should_show_join_link(campaign):
return redirect(url_for('.campaign_posts', campaign_id=campaign_id))
if request.method == 'POST':
character = Character.query.get(int(request.form['character']))
if character.campaign_id:
if not character.campaign_approved:
flash('Your membership to that campaign is pending', 'error')
return redirect(url_for('.campaign_join', campaign_id=campaign_id))
flash('That character is already a member of that campaign')
return redirect(url_for('.campaign_join', campaign_id=campaign_id))
for other_character in campaign.characters:
if other_character.name == character.name:
flash('There is already a character in that campaign with that name', 'error')
return redirect(url_for('.campaign_join', campaign_id=campaign_id))
character.campaign_id = campaign_id
character.campaign_join_note = request.form['notes']
db.session.commit()
current_app.logger.info(f'User {current_user.id} as character {character.id} requested to join campaign {campaign.id}')
flash('Membership request submitted')
return redirect(url_for('.campaign_join', campaign_id=campaign_id))
return render_template('campaign_join.jinja2', campaign=campaign)
@blueprint.route('/campaign/<int:campaign_id>/dm_controls', methods=['GET', 'POST'])
@login_required
def campaign_dm_controls(campaign_id):
campaign = Campaign.query.get(campaign_id)
if not campaign:
flash('Could not find campaign with that id', 'error')
return redirect(url_for('.campaigns'))
if not current_user.is_dm_to_campaign(campaign):
flash('You are not a DM of that campaign', 'error')
return redirect(url_for('.campaign_posts', campaign_id=campaign_id))
if request.method == 'POST':
form_type = request.form['type']
if form_type == 'applicant':
character = Character.query.get(request.form['character_id'])
if not character:
flash('Unknown character', 'error')
return redirect(url_for('.campaign_dm_controls', campaign_id=campaign_id))
if not character.campaign_id == campaign_id:
flash('That character has not applied for this campaign', 'error')
return redirect(url_for('.campaign_dm_controls', campaign_id=campaign_id))
if character.campaign_approved:
flash('That character is already approved for this campaign', 'error')
return redirect(url_for('.campaign_dm_controls', campaign_id=campaign_id))
if request.form['action'] == 'accept':
character.campaign_approved = True
if character.user.email_for_accepted:
send_email(
[character.user.email],
'Your campaign join request has been approved',
f'Your request to join "{campaign.name}" has been approved for your character {character.name}'
)
current_app.logger.info(f'User {current_user.id} accepted {character.id} to campaign {campaign.id}')
flash('Character accepted')
else:
current_app.logger.info(f'User {current_user.id} denied {character.id} to campaign {campaign.id}')
db.session.delete(character)
flash('Character denied')
db.session.commit()
return redirect(url_for('.campaign_dm_controls', campaign_id=campaign_id))
elif form_type == 'name_description':
campaign.name = request.form['name']
campaign.description = request.form['description']
db.session.commit()
flash('Campaign name/desciption updated')
current_app.logger.info(f'User {current_user.id} updated campaign {campaign.id} name or description')
return redirect(url_for('.campaign_dm_controls', campaign_id=campaign_id))
else:
flash('Unknown form submission', 'error')
return redirect(url_for('.campaign_dm_controls', campaign_id=campaign_id))
applicants = Character.query.filter_by(campaign_id=campaign_id, campaign_approved=False).all()
members = Character.query.filter_by(campaign_id=campaign_id, campaign_approved=True).all()
return render_template('campaign_dm_controls.jinja2', campaign=campaign, applicants=applicants, members=members)
@blueprint.route('/help')
def help():
return render_template('help.jinja2')
@blueprint.route('/profile/login', methods=['GET', 'POST'])
def profile_login():
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
user = User.query.filter_by(email=email).first()
if not user or not user.check_password(password):
current_app.logger.warning(f'Incorrect login for "{email}"')
flash('Login failed', 'error')
return redirect(url_for('.profile_login'))
flash('Login successful')
login_user(user, remember=True)
current_app.logger.info(f'User {current_user.id} logged in')
next_url = request.args.get('next')
if next_url and not is_safe_url(next_url):
return redirect(url_for('.campaigns'))
return redirect(next_url or url_for('.campaigns'))
return render_template('login.jinja2')
@blueprint.route('/profile/register', methods=['GET', 'POST'])
def profile_register():
if request.method == 'POST':
email = request.form['email']
if User.query.filter_by(email=email).first():
flash('Email already in use', 'error')
return redirect(url_for('.profile_register'))
password = request.form['password']
if not is_valid_email(email):
flash('Email does meet basic requirements', 'error')
return redirect(url_for('.profile_register'))
if len(password) < 5:
flash('Password must be at least 5 characters long', 'error')
return redirect(url_for('.profile_register'))
new_user = User(email=email, date_joined=datetime.utcnow())
new_user.set_password(password)
db.session.add(new_user)
db.session.commit()
login_user(new_user, remember=True)
current_app.logger.info(f'User {current_user.id} registered')
flash('Login successful')
return redirect(url_for('.campaigns'))
return render_template('register.jinja2')
@blueprint.route('/profile/reset_password', methods=['GET', 'POST'])
def profile_reset_password():
if request.method == 'POST':
email = request.form['email']
user = User.query.filter_by(email=email).first()
if not user:
flash('Password reset link sent')
return redirect(url_for('.profile_login'))
if user:
key = create_password_reset_key(user.email)
link = url_for('.profile_reset_password_confirm', email=user.email, key=key, _external=True)
send_email([user.email], 'Password reset link', link)
flash('Password reset link sent')
current_app.logger.info(f'User {current_user.id} requested password reset link')
return redirect(url_for('.profile_login'))
return render_template('reset_password.jinja2')
@blueprint.route('/profile/reset_password/<email>/<key>', methods=['GET', 'POST'])
def profile_reset_password_confirm(email, key):
user = User.query.filter_by(email=email).first()
if not user:
return redirect(url_for('.profile_login'))
actual_key = get_password_reset_key(email)
if not key == actual_key:
flash('Wrong reset key', 'error')
return redirect(url_for('.profile_login'))
if request.method == 'POST':
if not request.form['new_password'] == request.form['new_password_confirm']:
flash('New passwords don\'t match', 'error')
return redirect(url_for('.profile_reset_password_confirm', email=email, key=key))
if not len(request.form['new_password']) > 5:
flash('Password must be at least 5 characters long', 'error')
return redirect(url_for('.profile_reset_password_confirm', email=email, key=key))
user.set_password(request.form['new_password'])
db.session.commit()
clear_password_reset_keys(email)
current_app.logger.info(f'User {current_user.id} updated password via reset link')
flash('New password saved, please log in')
return redirect(url_for('.profile_login'))
return render_template('reset_password_confirm.jinja2', email=email, key=key)
@blueprint.route('/profile/characters', methods=['GET', 'POST'])
@login_required
def profile_characters():
if request.method == 'POST':
form_field = request.form.get('field')
new_value = request.form.get('value')
character_id = request.form.get('character_id', 0, type=int)
if form_field == 'new_character':
character = Character(user_id=current_user.id, name=new_value)
db.session.add(character)
db.session.commit()
flash('New character created')
current_app.logger.info(f'User {current_user.id} created new character with name "{character.name}"')
return redirect(url_for('.profile_characters'))
elif form_field == 'delete':
character = Character.query.get(character_id)
if not character:
flash('Unknown character', 'error')
return redirect(url_for('.profile_characters'))
if not character.user_id == current_user.id:
flash('You are not the owner of that character', 'error')
return redirect(url_for('.profile_characters'))
if character.campaign_approved:
flash('You cannot delete a character that\'s part of a campaign', 'error')
return redirect(url_for('.profile_characters'))
current_app.logger.info(f'User {current_user.id} deleted character {character.id}')
db.session.delete(character)
db.session.commit()
flash('Character deleted')
return redirect(url_for('.profile_characters'))
else:
character = Character.query.get(character_id)
if not character:
flash('Unknown character', 'error')
return redirect(url_for('.profile_characters'))
if not character.user_id == current_user.id:
flash('You are not the owner of that character', 'error')
return redirect(url_for('.profile_characters'))
if form_field == 'name':
if character.name == 'DM':
flash('You cannot rename a DM character', 'error')
return redirect(url_for('.profile_characters'))
if character.campaign_id:
for other_character in Character.query.filter_by(campaign_id=character.campaign_id):
if other_character.character.name == new_value:
flash('A character with that name is already in the same campaign', 'error')
return redirect(url_for('.profile_characters'))
current_app.logger.info(f'User {current_user.id} set character {character.id} name to "{new_value}"')
character.name = new_value
elif form_field == 'tag':
current_app.logger.info(f'User {current_user.id} set character {character.id} tag to "{new_value}"')
character.tag = new_value
else:
flash('An error occurred', 'error')
db.session.commit()
return redirect(url_for('.profile_characters'))
characters = Character.query.filter_by(user_id=current_user.id).all()
return render_template('profile_characters.jinja2', characters=characters)
@blueprint.route('/profile/settings', methods=['GET', 'POST'])
@login_required
def profile_settings():
if request.method == 'POST':
settings_type = request.form['settings_type']
if settings_type == 'posts':
current_user.posts_per_page = request.form['posts_per_page']
current_user.posts_newest_first = request.form['posts_newest_first'] == 'newest'
db.session.commit()
current_app.logger.info(f'User {current_user.id} updated post settings')
flash('Post settings saved')
return redirect(url_for('base.profile_settings'))
elif settings_type == 'email':
new_email = request.form['email']
if current_user.email == new_email:
flash('That\'s already your email', 'error')
return redirect(url_for('base.profile_settings'))
if User.query.filter_by(email=new_email).first():
flash('Email already in use', 'error')
return redirect(url_for('base.profile_settings'))
if is_valid_email(new_email):
current_user.email = new_email
db.session.commit()
current_app.logger.info(f'User {current_user.id} updated email settings')
flash('Email settings saved')
else:
flash('Email does meet basic requirements', 'error')
return redirect(url_for('base.profile_settings'))
elif settings_type == 'password':
if not current_user.check_password(request.form['old_password']):
flash('Incorrect current password', 'error')
return redirect(url_for('base.profile_settings'))
if not request.form['new_password'] == request.form['new_password_confirm']:
flash('New passwords don\'t match', 'error')
return redirect(url_for('base.profile_settings'))
if not len(request.form['new_password']) > 5:
flash('Password must be at least 5 characters long', 'error')
return redirect(url_for('base.profile_settings'))
current_user.set_password(request.form['new_password'])
db.session.commit()
current_app.logger.info(f'User {current_user.id} updated password')
flash('New password saved')
return redirect(url_for('base.profile_settings'))
elif settings_type == 'email_notifications':
current_user.email_for_accepted = 'email_for_accepted' in request.form
current_user.email_for_dm_post = 'email_for_dm_post' in request.form
current_user.email_for_any_post = 'email_for_any_post' in request.form
db.session.commit()
current_app.logger.info(f'User {current_user.id} updated email notification settings')
flash('Email settings saved')
return redirect(url_for('base.profile_settings'))
else:
flash('Unknown setting value', 'error')
return redirect(url_for('base.profile_settings'))
return render_template('profile_settings.jinja2')
@blueprint.route('/profile/logout')
def profile_logout():
current_app.logger.info(f'User {current_user.id} logged out')
logout_user()
return redirect(url_for('.profile_login'))
def send_email(recipients, subject, body):
current_app.logger.info('Sending email to "{}" with subject "{}"'.format(
', '.join(recipients),
subject
))
return _send_email.apply_async(args=[
current_app.config['EMAIL_API_KEY'],
current_app.config['EMAIL_DOMAIN'],
current_app.config['EMAIL_FROM'],
recipients,
subject,
body
]) | pbp/blueprint.py | from datetime import datetime
from flask import (
Blueprint,
current_app,
flash,
jsonify,
redirect,
render_template,
request,
url_for
)
from flask_login import (
current_user,
login_user,
login_required,
logout_user
)
from .models import (
Campaign,
Character,
Post,
User,
Roll
)
from .shared import (
csrf,
db
)
from .util import (
create_password_reset_key,
clear_password_reset_keys,
get_password_reset_key,
is_safe_url,
is_valid_email,
pagination_pages,
roll_dice,
send_email as _send_email
)
blueprint = Blueprint('base', __name__, template_folder='templates')
@blueprint.route('/')
def index():
return render_template('index.jinja2')
@blueprint.route('/campaigns')
def campaigns():
campaigns = Campaign.query.all()
return render_template('campaigns.jinja2', campaigns=campaigns)
@blueprint.route('/campaigns/create', methods=['GET', 'POST'])
@login_required
def campaign_create():
if request.method == 'POST':
# TODO check if name is unique
new_campaign = Campaign(
creator_user_id=current_user.id,
name=request.form['name'],
description=request.form['description'],
date_created=datetime.utcnow()
)
new_dm = Character(
user_id=current_user.id,
name='DM',
tag='Dungeon Master',
campaign_approved=True,
)
new_campaign.dm_character = new_dm
db.session.add(new_campaign)
db.session.add(new_dm)
db.session.commit()
new_dm.campaign_id = new_campaign.id
db.session.commit()
current_app.logger.info(f'User {current_user.id} created new campaign with name "{new_campaign.name}"')
flash('New campaign created')
return redirect(url_for('.campaigns'))
campaigns = Campaign.query.all()
return render_template('campaigns_create.jinja2', campaigns=campaigns)
@blueprint.route('/campaign/<int:campaign_id>/posts')
@blueprint.route('/campaign/<int:campaign_id>/posts/<int:page>')
def campaign_posts(campaign_id, page=1):
campaign = Campaign.query.get(campaign_id)
if not campaign:
flash('Could not find campaign with that id', 'error')
return redirect(url_for('.campaigns'))
query = Post.query.filter_by(campaign_id=campaign_id)
if current_user.is_authenticated and current_user.posts_newest_first:
query = query.order_by(Post.id.desc())
pagination = query.paginate(page=page, per_page=current_user.posts_per_page if current_user.is_authenticated else 20)
return render_template(
'campaign_posts.jinja2',
campaign=campaign,
posts=pagination.items,
pages=pagination.pages,
page=page,
pagination_pages=pagination_pages
)
@blueprint.route('/campaign/<int:campaign_id>/info')
def campaign_info(campaign_id):
campaign = Campaign.query.get(campaign_id)
if not campaign:
flash('Could not find campaign with that id', 'error')
return redirect(url_for('.campaigns'))
return render_template('campaign_info.jinja2', campaign=campaign)
@blueprint.route('/campaign/<int:campaign_id>/new_post', methods=['POST'])
@login_required
def campaign_new_post(campaign_id):
campaign = Campaign.query.get(campaign_id)
if not campaign:
flash('Could not find campaign with that id', 'error')
return redirect(url_for('.campaigns'))
character = current_user.get_character_in_campaign(campaign)
if not character:
flash('You are not a member of that campaign', 'error')
return redirect(url_for('.campaign_posts', campaign_id=campaign_id))
post = Post(
character_id=character.id,
campaign_id=campaign.id,
date=datetime.utcnow(),
tag=character.tag,
content=request.form['content']
)
db.session.add(post)
pending_rolls = Roll.query.filter_by(character_id=character.id, post_id=None).all()
for roll in pending_rolls:
roll.post = post
db.session.commit()
current_app.logger.info(f'User {current_user.id} made new post in campaign {campaign.id}')
flash('New post added')
is_dm_post = character.campaign.dm_character_id == character.id
for other_character in campaign.characters:
if other_character.id == character.id:
continue
link = url_for('.campaign_posts', campaign_id=campaign.id, _external=True)
if is_dm_post and other_character.user.email_for_dm_post:
current_app.logger.info(f'Send DM post notify email to {other_character.user.id} for campaign {campaign.id}')
send_email(
[other_character.user.email],
f'New DM post in "{campaign.name}"',
f'The DM has made a new post in the campaign.\n\nCampaign link: {link}'
)
elif not is_dm_post and other_character.user.email_for_any_post:
current_app.logger.info(f'Send generic post notify email to {other_character.user.id} for campaign {campaign.id}')
send_email(
[other_character.user.email],
f'New post in "{campaign.name}"',
f'{character.name} has made a new post in the campaign.\n\nCampaign link: {link}'
)
return redirect(url_for('.campaign_posts', campaign_id=campaign_id))
@blueprint.route('/campaign/<int:campaign_id>/roll', methods=['GET', 'POST'])
@login_required
@csrf.exempt
def campaign_rolls(campaign_id):
campaign = Campaign.query.get(campaign_id)
if not campaign:
return 'Could not find campaign with that id', 404
character = current_user.get_character_in_campaign(campaign)
if not character:
return 'You are not a member of that campaign', 403
if request.method == 'POST':
roll_str = request.json.get('roll')
if not roll_str:
return '', 400
roll = roll_dice(character, roll_str)
db.session.add(roll)
current_app.logger.info(f'User {current_user.id} as character {character.id} rolled str "{roll_str}"')
db.session.commit()
rolls = Roll.query.filter_by(character_id=current_user.get_character_in_campaign(campaign).id, post_id=None).all()
return jsonify([r.to_dict() for r in rolls])
@blueprint.route('/post/<int:post_id>/edit', methods=['GET', 'POST'])
@login_required
def campaign_edit_post(post_id):
post = Post.query.get(post_id)
if not post:
flash('Could not find a post with that id', 'error')
return redirect(url_for('.campaigns'))
if not post.character.user_id == current_user.id:
flash('That isn\'t your post', 'error')
return redirect(url_for('.campaign_posts', campaign_id=post.campaign_id))
if not post.can_be_edited:
flash('This post can no longer be edited', 'error')
return redirect(url_for('.campaign_posts', campaign_id=post.campaign_id))
if request.method == 'POST':
content = request.form['content']
post.content = content
db.session.commit()
flash('Content saved')
current_app.logger.info(f'User {current_user.id} edited post {post.id}')
return redirect(url_for('.campaign_posts', campaign_id=post.campaign_id))
return render_template('campaign_edit_post.jinja2', post=post)
@blueprint.route('/campaign/<int:campaign_id>/join', methods=['GET', 'POST'])
@login_required
def campaign_join(campaign_id):
campaign = Campaign.query.get(campaign_id)
if not current_user.should_show_join_link(campaign):
return redirect(url_for('.campaign_posts', campaign_id=campaign_id))
if request.method == 'POST':
character = Character.query.get(int(request.form['character']))
if character.campaign_id:
if not character.campaign_approved:
flash('Your membership to that campaign is pending', 'error')
return redirect(url_for('.campaign_join', campaign_id=campaign_id))
flash('That character is already a member of that campaign')
return redirect(url_for('.campaign_join', campaign_id=campaign_id))
for other_character in campaign.characters:
if other_character.name == character.name:
flash('There is already a character in that campaign with that name', 'error')
return redirect(url_for('.campaign_join', campaign_id=campaign_id))
character.campaign_id = campaign_id
character.campaign_join_note = request.form['notes']
db.session.commit()
current_app.logger.info(f'User {current_user.id} as character {character.id} requested to join campaign {campaign.id}')
flash('Membership request submitted')
return redirect(url_for('.campaign_join', campaign_id=campaign_id))
return render_template('campaign_join.jinja2', campaign=campaign)
@blueprint.route('/campaign/<int:campaign_id>/dm_controls', methods=['GET', 'POST'])
@login_required
def campaign_dm_controls(campaign_id):
campaign = Campaign.query.get(campaign_id)
if not campaign:
flash('Could not find campaign with that id', 'error')
return redirect(url_for('.campaigns'))
if not current_user.is_dm_to_campaign(campaign):
flash('You are not a DM of that campaign', 'error')
return redirect(url_for('.campaign_posts', campaign_id=campaign_id))
if request.method == 'POST':
form_type = request.form['type']
if form_type == 'applicant':
character = Character.query.get(request.form['character_id'])
if not character:
flash('Unknown character', 'error')
return redirect(url_for('.campaign_dm_controls', campaign_id=campaign_id))
if not character.campaign_id == campaign_id:
flash('That character has not applied for this campaign', 'error')
return redirect(url_for('.campaign_dm_controls', campaign_id=campaign_id))
if character.campaign_approved:
flash('That character is already approved for this campaign', 'error')
return redirect(url_for('.campaign_dm_controls', campaign_id=campaign_id))
if request.form['action'] == 'accept':
character.campaign_approved = True
if character.user.email_for_accepted:
send_email(
[character.user.email],
'Your campaign join request has been approved',
f'Your request to join "{campaign.name}" has been approved for your character {character.name}'
)
current_app.logger.info(f'User {current_user.id} accepted {character.id} to campaign {campaign.id}')
flash('Character accepted')
else:
current_app.logger.info(f'User {current_user.id} denied {character.id} to campaign {campaign.id}')
db.session.delete(character)
flash('Character denied')
db.session.commit()
return redirect(url_for('.campaign_dm_controls', campaign_id=campaign_id))
elif form_type == 'name_description':
campaign.name = request.form['name']
campaign.description = request.form['description']
db.session.commit()
flash('Campaign name/desciption updated')
current_app.logger.info(f'User {current_user.id} updated campaign {campaign.id} name or description')
return redirect(url_for('.campaign_dm_controls', campaign_id=campaign_id))
else:
flash('Unknown form submission', 'error')
return redirect(url_for('.campaign_dm_controls', campaign_id=campaign_id))
applicants = Character.query.filter_by(campaign_id=campaign_id, campaign_approved=False).all()
members = Character.query.filter_by(campaign_id=campaign_id, campaign_approved=True).all()
return render_template('campaign_dm_controls.jinja2', campaign=campaign, applicants=applicants, members=members)
@blueprint.route('/help')
def help():
return render_template('help.jinja2')
@blueprint.route('/profile/login', methods=['GET', 'POST'])
def profile_login():
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
user = User.query.filter_by(email=email).first()
if not user or not user.check_password(password):
current_app.logger.warning(f'Incorrect login for "{email}"')
flash('Login failed', 'error')
return redirect(url_for('.profile_login'))
flash('Login successful')
login_user(user, remember=True)
current_app.logger.info(f'User {current_user.id} logged in')
next_url = request.args.get('next')
if next_url and not is_safe_url(next_url):
return redirect(url_for('.campaigns'))
return redirect(next_url or url_for('.campaigns'))
return render_template('login.jinja2')
@blueprint.route('/profile/register', methods=['GET', 'POST'])
def profile_register():
if request.method == 'POST':
email = request.form['email']
if User.query.filter_by(email=email).first():
flash('Email already in use', 'error')
return redirect(url_for('.profile_register'))
password = request.form['password']
if not is_valid_email(email):
flash('Email does meet basic requirements', 'error')
return redirect(url_for('.profile_register'))
if len(password) < 5:
flash('Password must be at least 5 characters long', 'error')
return redirect(url_for('.profile_register'))
new_user = User(email=email, date_joined=datetime.utcnow())
new_user.set_password(password)
db.session.add(new_user)
db.session.commit()
login_user(new_user, remember=True)
current_app.logger.info(f'User {current_user.id} registered')
flash('Login successful')
return redirect(url_for('.campaigns'))
return render_template('register.jinja2')
@blueprint.route('/profile/reset_password', methods=['GET', 'POST'])
def profile_reset_password():
if request.method == 'POST':
email = request.form['email']
user = User.query.filter_by(email=email).first()
if not user:
flash('Password reset link sent')
return redirect(url_for('.profile_login'))
if user:
key = create_password_reset_key(user.email)
link = url_for('.profile_reset_password_confirm', email=user.email, key=key, _external=True)
send_email([user.email], 'Password reset link', link)
flash('Password reset link sent')
current_app.logger.info(f'User {current_user.id} requested password reset link')
return redirect(url_for('.profile_login'))
return render_template('reset_password.jinja2')
@blueprint.route('/profile/reset_password/<email>/<key>', methods=['GET', 'POST'])
def profile_reset_password_confirm(email, key):
user = User.query.filter_by(email=email).first()
if not user:
return redirect(url_for('.profile_login'))
actual_key = get_password_reset_key(email)
if not key == actual_key:
flash('Wrong reset key', 'error')
return redirect(url_for('.profile_login'))
if request.method == 'POST':
if not request.form['new_password'] == request.form['new_password_confirm']:
flash('New passwords don\'t match', 'error')
return redirect(url_for('.profile_reset_password_confirm', email=email, key=key))
if not len(request.form['new_password']) > 5:
flash('Password must be at least 5 characters long', 'error')
return redirect(url_for('.profile_reset_password_confirm', email=email, key=key))
user.set_password(request.form['new_password'])
db.session.commit()
clear_password_reset_keys(email)
current_app.logger.info(f'User {current_user.id} updated password via reset link')
flash('New password saved, please log in')
return redirect(url_for('.profile_login'))
return render_template('reset_password_confirm.jinja2', email=email, key=key)
@blueprint.route('/profile/characters', methods=['GET', 'POST'])
@login_required
def profile_characters():
if request.method == 'POST':
form_field = request.form.get('field')
new_value = request.form.get('value')
character_id = request.form.get('character_id', 0, type=int)
if form_field == 'new_character':
character = Character(user_id=current_user.id, name=new_value)
db.session.add(character)
db.session.commit()
flash('New character created')
current_app.logger.info(f'User {current_user.id} created new character with name "{character.name}"')
return redirect(url_for('.profile_characters'))
elif form_field == 'delete':
character = Character.query.get(character_id)
if not character:
flash('Unknown character', 'error')
return redirect(url_for('.profile_characters'))
if not character.user_id == current_user.id:
flash('You are not the owner of that character', 'error')
return redirect(url_for('.profile_characters'))
if character.campaign_approved:
flash('You cannot delete a character that\'s part of a campaign', 'error')
return redirect(url_for('.profile_characters'))
current_app.logger.info(f'User {current_user.id} deleted character {character.id}')
db.session.delete(character)
db.session.commit()
flash('Character deleted')
return redirect(url_for('.profile_characters'))
else:
character = Character.query.get(character_id)
if not character:
flash('Unknown character', 'error')
return redirect(url_for('.profile_characters'))
if not character.user_id == current_user.id:
flash('You are not the owner of that character', 'error')
return redirect(url_for('.profile_characters'))
if form_field == 'name':
if character.name == 'DM':
flash('You cannot rename a DM character', 'error')
return redirect(url_for('.profile_characters'))
if character.campaign_id:
for other_character in Character.query.filter_by(campaign_id=character.campaign_id):
if other_character.character.name == new_value:
flash('A character with that name is already in the same campaign', 'error')
return redirect(url_for('.profile_characters'))
current_app.logger.info(f'User {current_user.id} set character {character.id} name to "{new_value}"')
character.name = new_value
elif form_field == 'tag':
current_app.logger.info(f'User {current_user.id} set character {character.id} tag to "{new_value}"')
character.tag = new_value
else:
flash('An error occurred', 'error')
db.session.commit()
return redirect(url_for('.profile_characters'))
characters = Character.query.filter_by(user_id=current_user.id).all()
return render_template('profile_characters.jinja2', characters=characters)
@blueprint.route('/profile/settings', methods=['GET', 'POST'])
@login_required
def profile_settings():
if request.method == 'POST':
settings_type = request.form['settings_type']
if settings_type == 'posts':
current_user.posts_per_page = request.form['posts_per_page']
current_user.posts_newest_first = request.form['posts_newest_first'] == 'newest'
db.session.commit()
current_app.logger.info(f'User {current_user.id} updated post settings')
flash('Post settings saved')
return redirect(url_for('base.profile_settings'))
elif settings_type == 'email':
new_email = request.form['email']
if current_user.email == new_email:
flash('That\'s already your email', 'error')
return redirect(url_for('base.profile_settings'))
if User.query.filter_by(email=new_email).first():
flash('Email already in use', 'error')
return redirect(url_for('base.profile_settings'))
if is_valid_email(new_email):
current_user.email = new_email
db.session.commit()
current_app.logger.info(f'User {current_user.id} updated email settings')
flash('Email settings saved')
else:
flash('Email does meet basic requirements', 'error')
return redirect(url_for('base.profile_settings'))
elif settings_type == 'password':
if not current_user.check_password(request.form['old_password']):
flash('Incorrect current password', 'error')
return redirect(url_for('base.profile_settings'))
if not request.form['new_password'] == request.form['new_password_confirm']:
flash('New passwords don\'t match', 'error')
return redirect(url_for('base.profile_settings'))
if not len(request.form['new_password']) > 5:
flash('Password must be at least 5 characters long', 'error')
return redirect(url_for('base.profile_settings'))
current_user.set_password(request.form['new_password'])
db.session.commit()
current_app.logger.info(f'User {current_user.id} updated password')
flash('New password saved')
return redirect(url_for('base.profile_settings'))
elif settings_type == 'email_notifications':
current_user.email_for_accepted = 'email_for_accepted' in request.form
current_user.email_for_dm_post = 'email_for_dm_post' in request.form
current_user.email_for_any_post = 'email_for_any_post' in request.form
db.session.commit()
current_app.logger.info(f'User {current_user.id} updated email notification settings')
flash('Email settings saved')
return redirect(url_for('base.profile_settings'))
else:
flash('Unknown setting value', 'error')
return redirect(url_for('base.profile_settings'))
return render_template('profile_settings.jinja2')
@blueprint.route('/profile/logout')
def profile_logout():
current_app.logger.info(f'User {current_user.id} logged out')
logout_user()
return redirect(url_for('.profile_login'))
def send_email(recipients, subject, body):
current_app.logger.info('Sending email to "{}" with subject "{}"'.format(
', '.join(recipients),
subject
))
return _send_email.apply_async(args=[
current_app.config['EMAIL_API_KEY'],
current_app.config['EMAIL_DOMAIN'],
current_app.config['EMAIL_FROM'],
recipients,
subject,
body
]) | 0.249264 | 0.051177 |
import json
import numpy as np
class EarlyStopping:
def __init__(self, patience, name, is_better_fn):
self.patience = patience
self.name = 'main_cost/avg'
self.is_better_fn = is_better_fn
self.metric_class_name = is_better_fn.__self__.__class__.__name__
self.best = None # best VALIDATION
self.best_call_counter = 0 # best VALIDATION epoch
self.best_chpt = None # address to best VALIDATION checkpoint, if provided
self.corresponding_test = None # TEST value for the best VALIDATION
self.should_stop = False
self.patience_counter = 0
self.call_counter = 0
self.anynan = False
self.min_delta = 0.05
def reset_patience(self):
self.patience_counter = 0
def reduce_patience(self):
self.patience_counter += 1
if self.patience_counter >= self.patience:
self.should_stop = True
def __call__(self, vlog, tlog, chpt_str=''):
if self.should_stop:
return
if np.isnan(vlog[self.name]):
self.anynan = True
self.reduce_patience()
return
if self.best is None: # keep separate from next condition
self.best = vlog[self.name]
self.best_call_counter = self.call_counter
self.best_chpt = chpt_str
self.corresponding_test = tlog
self.corresponding_valid = vlog
self.reset_patience()
elif self.is_better_fn(vlog[self.name] + self.min_delta, self.best):
self.best = vlog[self.name]
self.best_call_counter = self.call_counter
self.best_chpt = chpt_str
self.corresponding_test = tlog
self.corresponding_valid = vlog
self.reset_patience()
else:
self.reduce_patience()
self.call_counter += 1
print('Patience count: ', self.patience_counter)
def save(self, _file):
with open(_file, 'w') as f:
f.write(json.dumps(self.get_status(), indent=4))
def get_status(self):
return dict(
name=self.name,
best=self.best,
best_call_counter=self.best_call_counter,
best_chpt=self.best_chpt,
corresponding_test=self.corresponding_test,
corresponding_valid=self.corresponding_valid,
should_stop=self.should_stop,
patience_counter=self.patience_counter,
call_counter=self.call_counter,
anynan=self.anynan,
metric_class_name=self.metric_class_name,
)
class EarlyStoppingVAE:
def __init__(self, patience=3, min_delta1 = 1, min_delta2 = 0.1):
self.patience = patience
self.min_delta1 = min_delta1
self.min_delta2 = min_delta2
self.patience_cnt1 = 0
self.prev_loss_val1 = 200000
self.patience_cnt2 = 0
self.prev_loss_val2 = 200000
def stop(self, loss_val1, loss_val2):
if(self.prev_loss_val1 - loss_val1>self.min_delta1):
self.patience_cnt1 = 0
self.prev_loss_val1 = loss_val1
else:
self.patience_cnt1 += 1
if(self.prev_loss_val2 - loss_val2>self.min_delta2):
self.patience_cnt2 = 0
self.prev_loss_val2 = loss_val2
else:
self.patience_cnt2 += 1
print('Patience count1, count2: ', self.patience_cnt1, self.patience_cnt2)
if self.patience_cnt1 > self.patience and self.patience_cnt2 > self.patience :
return True
else:
return False | utils/early_stopping.py | import json
import numpy as np
class EarlyStopping:
def __init__(self, patience, name, is_better_fn):
self.patience = patience
self.name = 'main_cost/avg'
self.is_better_fn = is_better_fn
self.metric_class_name = is_better_fn.__self__.__class__.__name__
self.best = None # best VALIDATION
self.best_call_counter = 0 # best VALIDATION epoch
self.best_chpt = None # address to best VALIDATION checkpoint, if provided
self.corresponding_test = None # TEST value for the best VALIDATION
self.should_stop = False
self.patience_counter = 0
self.call_counter = 0
self.anynan = False
self.min_delta = 0.05
def reset_patience(self):
self.patience_counter = 0
def reduce_patience(self):
self.patience_counter += 1
if self.patience_counter >= self.patience:
self.should_stop = True
def __call__(self, vlog, tlog, chpt_str=''):
if self.should_stop:
return
if np.isnan(vlog[self.name]):
self.anynan = True
self.reduce_patience()
return
if self.best is None: # keep separate from next condition
self.best = vlog[self.name]
self.best_call_counter = self.call_counter
self.best_chpt = chpt_str
self.corresponding_test = tlog
self.corresponding_valid = vlog
self.reset_patience()
elif self.is_better_fn(vlog[self.name] + self.min_delta, self.best):
self.best = vlog[self.name]
self.best_call_counter = self.call_counter
self.best_chpt = chpt_str
self.corresponding_test = tlog
self.corresponding_valid = vlog
self.reset_patience()
else:
self.reduce_patience()
self.call_counter += 1
print('Patience count: ', self.patience_counter)
def save(self, _file):
with open(_file, 'w') as f:
f.write(json.dumps(self.get_status(), indent=4))
def get_status(self):
return dict(
name=self.name,
best=self.best,
best_call_counter=self.best_call_counter,
best_chpt=self.best_chpt,
corresponding_test=self.corresponding_test,
corresponding_valid=self.corresponding_valid,
should_stop=self.should_stop,
patience_counter=self.patience_counter,
call_counter=self.call_counter,
anynan=self.anynan,
metric_class_name=self.metric_class_name,
)
class EarlyStoppingVAE:
def __init__(self, patience=3, min_delta1 = 1, min_delta2 = 0.1):
self.patience = patience
self.min_delta1 = min_delta1
self.min_delta2 = min_delta2
self.patience_cnt1 = 0
self.prev_loss_val1 = 200000
self.patience_cnt2 = 0
self.prev_loss_val2 = 200000
def stop(self, loss_val1, loss_val2):
if(self.prev_loss_val1 - loss_val1>self.min_delta1):
self.patience_cnt1 = 0
self.prev_loss_val1 = loss_val1
else:
self.patience_cnt1 += 1
if(self.prev_loss_val2 - loss_val2>self.min_delta2):
self.patience_cnt2 = 0
self.prev_loss_val2 = loss_val2
else:
self.patience_cnt2 += 1
print('Patience count1, count2: ', self.patience_cnt1, self.patience_cnt2)
if self.patience_cnt1 > self.patience and self.patience_cnt2 > self.patience :
return True
else:
return False | 0.427516 | 0.165357 |
import click
import json
import logging
import pandas as pd
from tqdm import tqdm
import sys
origins = {
1:'ARGs',
2:'MGEs',
4:'MRGs',
3:'Functional Genes'
}
pathogens = {
1352: 'Enterococcus faecium',
1280: 'Staphylococcus aureus',
573: 'Klebsiella pneumonia',
470: 'Acinetobacter baumannii',
287: 'Pseudomonas aeruginosa',
42895: 'Enterobacter spp.',
543: 'Enterobacteriaceae',
1352: 'Enterococcus faecium',
1280: 'Staphylococcus aureus',
210: 'Helicobacter pylori',
205: 'Campylobacter sp',
590: 'Salmonellae',
485: 'Neisseria gonorrhoeae',
1313: 'Streptococcus pneumoniae',
727: 'Haemophilus influenzae',
625: 'Shigella sp'
}
def traverse_data(data):
for read in tqdm(data):
for gene in read['data']:
gene['gene_id'] = gene['metadata'][0]
gene['category'] = gene['metadata'][3]
gene['gene_name'] = gene['metadata'][4]
gene['read'] = gene['block_id']
gene['group'] = origins[gene['origin']]
if origins[gene['origin']] == 'MRGs':
gene['gene_name'] = gene['category']
if origins[gene['origin']] == 'Functional Genes':
gene['gene_name'] = gene['category']
gene['NCBI_taxa_id'] = read['read'][0]['taxa_id']
gene['taxa_centrifuge_score'] = read['read'][0]['taxa_score']
gene['species'] = read['read'][0]['taxa_species']
try:
assert(pathogens[int(gene['NCBI_taxa_id'])])
gene['is_pathogen'] = 1
except:
gene['is_pathogen'] = 0
del gene['metadata']
del gene['block_id']
del gene['color']
del gene['origin']
del gene['stroke_width']
del gene['total_reads']
del gene['value']
del gene['score']
del gene['position']
yield gene
@click.command()
@click.option('--input-file', default='', help='JSON fil downloaded from NanoARG')
@click.option('--output-file', default='', help='file with the mapping table as shown in the genes mapped to nanopore reads')
def mapping_table(input_file, output_file):
'''
Generate table of genes mapped to nanopore reads
This tool will generate the full table named "genes
mapped to nanopore reads" under the NanoARG website.
https://bench.cs.vt.edu/nanoarg/
'''
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format="%(levelname)s %(asctime)s - %(message)s"
)
log = logging.getLogger()
log.info('loading input file ' + input_file)
data = json.load(open(input_file))
log.info('traversing file ' + input_file)
reads = pd.DataFrame(traverse_data(data[0]))
dataset = reads[
[
'read',
'gene_id',
'gene_name',
'group',
'category',
'start',
'end',
'strand',
'identity',
'bitscore',
'evalue',
'NCBI_taxa_id',
'taxa_centrifuge_score',
'species',
'coverage',
'is_pathogen'
]
]
log.info('Storing table to '+ output_file)
dataset.to_csv(output_file, index=False) | GeneTools/nanoarg/mapping_table.py | import click
import json
import logging
import pandas as pd
from tqdm import tqdm
import sys
origins = {
1:'ARGs',
2:'MGEs',
4:'MRGs',
3:'Functional Genes'
}
pathogens = {
1352: 'Enterococcus faecium',
1280: 'Staphylococcus aureus',
573: 'Klebsiella pneumonia',
470: 'Acinetobacter baumannii',
287: 'Pseudomonas aeruginosa',
42895: 'Enterobacter spp.',
543: 'Enterobacteriaceae',
1352: 'Enterococcus faecium',
1280: 'Staphylococcus aureus',
210: 'Helicobacter pylori',
205: 'Campylobacter sp',
590: 'Salmonellae',
485: 'Neisseria gonorrhoeae',
1313: 'Streptococcus pneumoniae',
727: 'Haemophilus influenzae',
625: 'Shigella sp'
}
def traverse_data(data):
for read in tqdm(data):
for gene in read['data']:
gene['gene_id'] = gene['metadata'][0]
gene['category'] = gene['metadata'][3]
gene['gene_name'] = gene['metadata'][4]
gene['read'] = gene['block_id']
gene['group'] = origins[gene['origin']]
if origins[gene['origin']] == 'MRGs':
gene['gene_name'] = gene['category']
if origins[gene['origin']] == 'Functional Genes':
gene['gene_name'] = gene['category']
gene['NCBI_taxa_id'] = read['read'][0]['taxa_id']
gene['taxa_centrifuge_score'] = read['read'][0]['taxa_score']
gene['species'] = read['read'][0]['taxa_species']
try:
assert(pathogens[int(gene['NCBI_taxa_id'])])
gene['is_pathogen'] = 1
except:
gene['is_pathogen'] = 0
del gene['metadata']
del gene['block_id']
del gene['color']
del gene['origin']
del gene['stroke_width']
del gene['total_reads']
del gene['value']
del gene['score']
del gene['position']
yield gene
@click.command()
@click.option('--input-file', default='', help='JSON fil downloaded from NanoARG')
@click.option('--output-file', default='', help='file with the mapping table as shown in the genes mapped to nanopore reads')
def mapping_table(input_file, output_file):
'''
Generate table of genes mapped to nanopore reads
This tool will generate the full table named "genes
mapped to nanopore reads" under the NanoARG website.
https://bench.cs.vt.edu/nanoarg/
'''
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format="%(levelname)s %(asctime)s - %(message)s"
)
log = logging.getLogger()
log.info('loading input file ' + input_file)
data = json.load(open(input_file))
log.info('traversing file ' + input_file)
reads = pd.DataFrame(traverse_data(data[0]))
dataset = reads[
[
'read',
'gene_id',
'gene_name',
'group',
'category',
'start',
'end',
'strand',
'identity',
'bitscore',
'evalue',
'NCBI_taxa_id',
'taxa_centrifuge_score',
'species',
'coverage',
'is_pathogen'
]
]
log.info('Storing table to '+ output_file)
dataset.to_csv(output_file, index=False) | 0.183923 | 0.286806 |
from Qt_Viewer import Qt_Viewer
from pvaccess import *
from threading import Event
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QObject,pyqtSignal
import numpy as np
import sys
class PVAPYProvider(QObject) :
monitorCallbacksignal = pyqtSignal()
connectCallbacksignal = pyqtSignal()
def __init__(self):
QObject.__init__(self)
self.monitordata = None
self.connectdata = None
self.firstStart = True
self.isConnected = False
self.channelName = 'TPYqtpeakimageRecord'
self.connectCallbacksignal.connect(self.connectionCallback)
self.monitorCallbacksignal.connect(self.monitorCallback)
self.callbackDoneEvent = Event()
self.callbackDoneEvent.clear()
self.channel = None
self.isStarted = False
def setChannelName(self,channelName) :
if self.channel!=None and self.isStarted : self.stop()
self.channel = None
self.firstStart = True
self.channelName = channelName
def putInt(self,value,request) :
if self.channel==None :
data = dict()
data["exception"] = "channel is None"
self.viewerCallback(data)
return
self.channel.put(value,request)
def getChannelName(self) :
return self.channelName
def start(self) :
if self.firstStart :
self.channel = Channel(self.channelName)
self.firstStart = False
self.channel.setConnectionCallback(self.pvapyconnectioncallback)
self.channel.monitor(self.pvapymonitorcallback,\
'field(argument{format,height,width},result.value)')
def stop(self) :
self.isStarted = False;
if self.channel==None : return
self.channel.stopMonitor()
def viewerCallback(self,arg) :
self.viewer.callback(arg)
def pvapyconnectioncallback(self,arg) :
data = dict()
if arg==True :
data["status"] = "connected"
elif arg==False :
data["status"] = "disconnected"
else :
data["exception"] = "bad pvapy connection callback =" + str(arg)
self.connectdata = data
self.connectCallbacksignal.emit()
self.callbackDoneEvent.wait()
self.callbackDoneEvent.clear()
def connectionCallback(self) :
arg = self.connectdata
self.connectdata = None
self.viewerCallback(arg)
self.callbackDoneEvent.set()
self.connectdata = None
def pvapymonitorcallback(self,arg) :
if self.monitordata==None:
data = {\
"format" : arg['argument.format'],\
"height": arg['argument.height'],\
"width": arg['argument.width'],\
"value": arg['result.value']\
}
self.monitordata = data
self.monitorCallbacksignal.emit()
self.callbackDoneEvent.wait()
self.callbackDoneEvent.clear()
else:
self.monitordata = data
def monitorCallback(self) :
arg = dict()
try:
arg['value'] = self.monitordata
except Exception as error:
arg["exception"] = repr(error)
self.viewerCallback(arg)
self.monitordata = None
self.callbackDoneEvent.set()
if __name__ == '__main__':
app = QApplication(list())
PVAPYProvider = PVAPYProvider()
nargs = len(sys.argv)
if nargs>=2 :
channelName = sys.argv[1]
PVAPYProvider.setChannelName(channelName)
PVAPYProvider.viewer = Qt_Viewer(PVAPYProvider,"PVAPY")
sys.exit(app.exec_()) | qtimage/PVAPY_Qt_Viewer.py |
from Qt_Viewer import Qt_Viewer
from pvaccess import *
from threading import Event
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QObject,pyqtSignal
import numpy as np
import sys
class PVAPYProvider(QObject) :
monitorCallbacksignal = pyqtSignal()
connectCallbacksignal = pyqtSignal()
def __init__(self):
QObject.__init__(self)
self.monitordata = None
self.connectdata = None
self.firstStart = True
self.isConnected = False
self.channelName = 'TPYqtpeakimageRecord'
self.connectCallbacksignal.connect(self.connectionCallback)
self.monitorCallbacksignal.connect(self.monitorCallback)
self.callbackDoneEvent = Event()
self.callbackDoneEvent.clear()
self.channel = None
self.isStarted = False
def setChannelName(self,channelName) :
if self.channel!=None and self.isStarted : self.stop()
self.channel = None
self.firstStart = True
self.channelName = channelName
def putInt(self,value,request) :
if self.channel==None :
data = dict()
data["exception"] = "channel is None"
self.viewerCallback(data)
return
self.channel.put(value,request)
def getChannelName(self) :
return self.channelName
def start(self) :
if self.firstStart :
self.channel = Channel(self.channelName)
self.firstStart = False
self.channel.setConnectionCallback(self.pvapyconnectioncallback)
self.channel.monitor(self.pvapymonitorcallback,\
'field(argument{format,height,width},result.value)')
def stop(self) :
self.isStarted = False;
if self.channel==None : return
self.channel.stopMonitor()
def viewerCallback(self,arg) :
self.viewer.callback(arg)
def pvapyconnectioncallback(self,arg) :
data = dict()
if arg==True :
data["status"] = "connected"
elif arg==False :
data["status"] = "disconnected"
else :
data["exception"] = "bad pvapy connection callback =" + str(arg)
self.connectdata = data
self.connectCallbacksignal.emit()
self.callbackDoneEvent.wait()
self.callbackDoneEvent.clear()
def connectionCallback(self) :
arg = self.connectdata
self.connectdata = None
self.viewerCallback(arg)
self.callbackDoneEvent.set()
self.connectdata = None
def pvapymonitorcallback(self,arg) :
if self.monitordata==None:
data = {\
"format" : arg['argument.format'],\
"height": arg['argument.height'],\
"width": arg['argument.width'],\
"value": arg['result.value']\
}
self.monitordata = data
self.monitorCallbacksignal.emit()
self.callbackDoneEvent.wait()
self.callbackDoneEvent.clear()
else:
self.monitordata = data
def monitorCallback(self) :
arg = dict()
try:
arg['value'] = self.monitordata
except Exception as error:
arg["exception"] = repr(error)
self.viewerCallback(arg)
self.monitordata = None
self.callbackDoneEvent.set()
if __name__ == '__main__':
app = QApplication(list())
PVAPYProvider = PVAPYProvider()
nargs = len(sys.argv)
if nargs>=2 :
channelName = sys.argv[1]
PVAPYProvider.setChannelName(channelName)
PVAPYProvider.viewer = Qt_Viewer(PVAPYProvider,"PVAPY")
sys.exit(app.exec_()) | 0.290679 | 0.127381 |
import logging
LOGGER = logging.getLogger(__name__)
def sql_tables(bosslet_config):
"""
List all tables in sql.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
Returns:
tables(list): Lookup key.
"""
query = "show tables"
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query)
tables = cursor.fetchall()
for i in tables:
LOGGER.info(tables)
return tables
def sql_list(bosslet_config, db_table):
"""
List all the available members of a given sql table.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
db_table: Identifies which table members to list.
Returns:
ans(list): list of all members of sql table.
"""
query = "SELECT * FROM {}".format(db_table)
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query)
ans = cursor.fetchall()
if len(ans) == 0:
raise Exception(
"Can't find table name: {}".format(db_table))
else:
for i in ans:
LOGGER.info(i)
return ans
def sql_resource_lookup_key(bosslet_config, resource_params):
"""
Get the lookup key that identifies the resource from the database.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
resource_params (str): Identifies collection, experiment or channel.
Returns:
cuboid_str(str): Cuboid lookup key.
"""
collection, experiment, channel = None, None, None
resource = resource_params.split("/")
if len(resource) == 0:
raise Exception("Incorrect number of arguments(Make sure the resource provided has at least a collection to lookup)")
else:
if len(resource) > 0:
collection = resource_params.split("/")[0]
if len(resource) > 1:
experiment = resource_params.split("/")[1]
if len(resource) > 2:
channel = resource_params.split("/")[2]
elif len(resource) > 3:
raise Exception("Only provide /coll/exp/chan")
coll_query = "SELECT id FROM collection WHERE name = %s"
exp_query = "SELECT id FROM experiment WHERE name = %s"
chan_query = "SELECT id FROM channel WHERE name = %s"
with bosslet_config.call.connect_rds() as cursor:
if collection is not None:
cursor.execute(coll_query, (collection,))
coll_set = cursor.fetchall()
if len(coll_set) != 1: # TODO: Alert the user when there are more than one results
raise Exception(
"Can't find collection: {}".format(collection))
else:
cuboid_str = "{}&".format(coll_set[0][0])
LOGGER.info("{} collection id: {}".format(collection, coll_set[0][0]))
if experiment is not None:
cursor.execute(exp_query, (experiment,))
exp_set = cursor.fetchall()
if len(exp_set) != 1: # TODO: Alert the user when there are more than one results
raise Exception(
"Can't find experiment: {}".format(experiment))
else:
cuboid_str = cuboid_str + "{}&".format(exp_set[0][0])
LOGGER.info("{} experiment id: {}".format(experiment, exp_set[0][0]))
if channel is not None:
cursor.execute(chan_query, (channel,))
chan_set = cursor.fetchall()
if len(chan_set) != 1: # TODO: Alert the user when there are more than one results
raise Exception(
"Can't find channel: {}".format(channel))
else:
cuboid_str = cuboid_str + "{}&".format(chan_set[0][0])
LOGGER.info("{} channel id: {}".format(channel, chan_set[0][0]))
LOGGER.info("Cuboid key: {} \n".format(cuboid_str))
return cuboid_str
def sql_coordinate_frame_lookup_key(bosslet_config, coordinate_frame):
"""
Get the lookup key that identifies the coordinate fram specified.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
coordinate_frame: Identifies coordinate frame.
Returns:
coordinate_set(str): Coordinate Frame lookup key.
"""
query = "SELECT id FROM coordinate_frame WHERE name = %s"
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query, (coordinate_frame,))
coordinate_set = cursor.fetchall()
if len(coordinate_set) != 1:
raise Exception(
"Can't find coordinate frame: {}".format(coordinate_frame))
else:
LOGGER.info("{} coordinate frame id: {}".format(coordinate_frame, coordinate_set[0][0]))
return coordinate_set[0][0]
def sql_channel_job_ids(bosslet_config, resource):
"""
Get a list of channel job ids related to a given channel
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
resource(str): resource
Returns:
job_ids(list): job_ids and start dates and x,y and z range associated with channel
format: (id, start_date, x_start,y_start,z_start,x_stop, y_stop, z_stop)
ex: (2933, datetime.datetime(2019, 3, 16, 21, 33, 37, 831357), 32000, 45824, 14880, 213760, 169728, 14912)
"""
coll = resource.split("/")[0]
exp = resource.split("/")[1]
chan = resource.split("/")[2]
query = "SELECT id,start_date,x_start,y_start,z_start,x_stop,y_stop,z_stop FROM ingest_job WHERE collection = '{}' AND experiment = '{}' AND channel = '{}'".format(coll,exp,chan)
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query)
job_ids = cursor.fetchall()
if len(job_ids) == 0:
raise Exception(
"Can't find resource name: {}/{}/{}".format(coll,exp,chan))
else:
LOGGER.info("\n Job-Ids corresponding to {}/{}/{}".format(coll,exp,chan))
LOGGER.info("< id, start_date, x_start,y_start,z_start,x_stop, y_stop, z_stop>")
for i in job_ids:
LOGGER.info(i)
return job_ids
def sql_get_names_from_lookup_keys(bosslet_config, lookup_keys):
"""
Gets collection/experiment/channel names from lookup keys.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
lookup_keys (list[str]): List of lookup keys to get col/exp/chan names for.
Expected format f'{col_id}&{exp_id}&{chan_id}'
Returns:
(list[tuple(str, str, str)]): List of tuples of collection/exp/chan names.
If a look up key is not found, empty strings
will be returned for that key's corresponding tuple.
"""
names = []
if len(lookup_keys) < 1:
LOGGER.error('No lookup keys provided, aborting.')
return names
query = 'SELECT collection_name, experiment_name, channel_name FROM lookup WHERE lookup_key = %(key)s'
with bosslet_config.call.connect_rds() as cursor:
for key in lookup_keys:
cursor.execute(query, { 'key': key })
rows = cursor.fetchall()
if(len(rows) > 0):
this_row = (rows[0][0], rows[0][1], rows[0][2])
else:
this_row = ('', '', '')
names.append(this_row)
LOGGER.info('key: {}, coll: {}, exp: {}, chan: {}'.format(key, this_row[0], this_row[1], this_row[2]))
return names | lib/boss_rds.py |
import logging
LOGGER = logging.getLogger(__name__)
def sql_tables(bosslet_config):
"""
List all tables in sql.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
Returns:
tables(list): Lookup key.
"""
query = "show tables"
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query)
tables = cursor.fetchall()
for i in tables:
LOGGER.info(tables)
return tables
def sql_list(bosslet_config, db_table):
"""
List all the available members of a given sql table.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
db_table: Identifies which table members to list.
Returns:
ans(list): list of all members of sql table.
"""
query = "SELECT * FROM {}".format(db_table)
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query)
ans = cursor.fetchall()
if len(ans) == 0:
raise Exception(
"Can't find table name: {}".format(db_table))
else:
for i in ans:
LOGGER.info(i)
return ans
def sql_resource_lookup_key(bosslet_config, resource_params):
"""
Get the lookup key that identifies the resource from the database.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
resource_params (str): Identifies collection, experiment or channel.
Returns:
cuboid_str(str): Cuboid lookup key.
"""
collection, experiment, channel = None, None, None
resource = resource_params.split("/")
if len(resource) == 0:
raise Exception("Incorrect number of arguments(Make sure the resource provided has at least a collection to lookup)")
else:
if len(resource) > 0:
collection = resource_params.split("/")[0]
if len(resource) > 1:
experiment = resource_params.split("/")[1]
if len(resource) > 2:
channel = resource_params.split("/")[2]
elif len(resource) > 3:
raise Exception("Only provide /coll/exp/chan")
coll_query = "SELECT id FROM collection WHERE name = %s"
exp_query = "SELECT id FROM experiment WHERE name = %s"
chan_query = "SELECT id FROM channel WHERE name = %s"
with bosslet_config.call.connect_rds() as cursor:
if collection is not None:
cursor.execute(coll_query, (collection,))
coll_set = cursor.fetchall()
if len(coll_set) != 1: # TODO: Alert the user when there are more than one results
raise Exception(
"Can't find collection: {}".format(collection))
else:
cuboid_str = "{}&".format(coll_set[0][0])
LOGGER.info("{} collection id: {}".format(collection, coll_set[0][0]))
if experiment is not None:
cursor.execute(exp_query, (experiment,))
exp_set = cursor.fetchall()
if len(exp_set) != 1: # TODO: Alert the user when there are more than one results
raise Exception(
"Can't find experiment: {}".format(experiment))
else:
cuboid_str = cuboid_str + "{}&".format(exp_set[0][0])
LOGGER.info("{} experiment id: {}".format(experiment, exp_set[0][0]))
if channel is not None:
cursor.execute(chan_query, (channel,))
chan_set = cursor.fetchall()
if len(chan_set) != 1: # TODO: Alert the user when there are more than one results
raise Exception(
"Can't find channel: {}".format(channel))
else:
cuboid_str = cuboid_str + "{}&".format(chan_set[0][0])
LOGGER.info("{} channel id: {}".format(channel, chan_set[0][0]))
LOGGER.info("Cuboid key: {} \n".format(cuboid_str))
return cuboid_str
def sql_coordinate_frame_lookup_key(bosslet_config, coordinate_frame):
"""
Get the lookup key that identifies the coordinate fram specified.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
coordinate_frame: Identifies coordinate frame.
Returns:
coordinate_set(str): Coordinate Frame lookup key.
"""
query = "SELECT id FROM coordinate_frame WHERE name = %s"
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query, (coordinate_frame,))
coordinate_set = cursor.fetchall()
if len(coordinate_set) != 1:
raise Exception(
"Can't find coordinate frame: {}".format(coordinate_frame))
else:
LOGGER.info("{} coordinate frame id: {}".format(coordinate_frame, coordinate_set[0][0]))
return coordinate_set[0][0]
def sql_channel_job_ids(bosslet_config, resource):
"""
Get a list of channel job ids related to a given channel
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
resource(str): resource
Returns:
job_ids(list): job_ids and start dates and x,y and z range associated with channel
format: (id, start_date, x_start,y_start,z_start,x_stop, y_stop, z_stop)
ex: (2933, datetime.datetime(2019, 3, 16, 21, 33, 37, 831357), 32000, 45824, 14880, 213760, 169728, 14912)
"""
coll = resource.split("/")[0]
exp = resource.split("/")[1]
chan = resource.split("/")[2]
query = "SELECT id,start_date,x_start,y_start,z_start,x_stop,y_stop,z_stop FROM ingest_job WHERE collection = '{}' AND experiment = '{}' AND channel = '{}'".format(coll,exp,chan)
with bosslet_config.call.connect_rds() as cursor:
cursor.execute(query)
job_ids = cursor.fetchall()
if len(job_ids) == 0:
raise Exception(
"Can't find resource name: {}/{}/{}".format(coll,exp,chan))
else:
LOGGER.info("\n Job-Ids corresponding to {}/{}/{}".format(coll,exp,chan))
LOGGER.info("< id, start_date, x_start,y_start,z_start,x_stop, y_stop, z_stop>")
for i in job_ids:
LOGGER.info(i)
return job_ids
def sql_get_names_from_lookup_keys(bosslet_config, lookup_keys):
"""
Gets collection/experiment/channel names from lookup keys.
Args:
bosslet_config (BossConfiguration): Bosslet configuration object
lookup_keys (list[str]): List of lookup keys to get col/exp/chan names for.
Expected format f'{col_id}&{exp_id}&{chan_id}'
Returns:
(list[tuple(str, str, str)]): List of tuples of collection/exp/chan names.
If a look up key is not found, empty strings
will be returned for that key's corresponding tuple.
"""
names = []
if len(lookup_keys) < 1:
LOGGER.error('No lookup keys provided, aborting.')
return names
query = 'SELECT collection_name, experiment_name, channel_name FROM lookup WHERE lookup_key = %(key)s'
with bosslet_config.call.connect_rds() as cursor:
for key in lookup_keys:
cursor.execute(query, { 'key': key })
rows = cursor.fetchall()
if(len(rows) > 0):
this_row = (rows[0][0], rows[0][1], rows[0][2])
else:
this_row = ('', '', '')
names.append(this_row)
LOGGER.info('key: {}, coll: {}, exp: {}, chan: {}'.format(key, this_row[0], this_row[1], this_row[2]))
return names | 0.644001 | 0.361756 |
import math
from typing import Callable, Final, Sequence, Tuple
from hw1.solution1 import root_finding, secant
SEG_DEFAULT_BEG: Final = -1
SEG_DEFAULT_END: Final = 1
EPS_DEFAULT_VAL: Final = 10 ** (-20)
def _legendre(x_val, num_nodes) -> float:
"""
Calculates legendre function using recurrent formula for homogeneous polynomials
"""
if num_nodes == 0:
return 1
if num_nodes == 1:
return x_val
return (2 * num_nodes - 1) / num_nodes * _legendre(x_val, num_nodes - 1) * x_val - (
num_nodes - 1
) / num_nodes * _legendre(x_val, num_nodes - 2)
def print_node_coef_pares(pares: Tuple[float, float]) -> float:
"""
Prints the pares of nodes and values in "node <-> value" format.
Returns checksum of coefficients.
"""
checksum = 0.0
for node, coef in pares:
print(f"{node:.12f} <-> {coef:.12f}")
checksum += coef
return checksum
def compute_gauss_node_coef_pares(num_nodes: int) -> Sequence[Tuple[float, float]]:
"""
Calculate pares of nodes and coefficients with gauss' quadratic formula
using legendre's polynomial
"""
node_coef_pares = []
segments = root_finding(
lambda x: _legendre(x, num_nodes), SEG_DEFAULT_BEG, SEG_DEFAULT_END
)
for seg in segments:
node = secant(
lambda x: _legendre(x, num_nodes), seg[0], seg[1], EPS_DEFAULT_VAL
)
coef = (2 * (1 - node ** 2)) / (
num_nodes ** 2 * _legendre(node, num_nodes - 1) ** 2
)
node_coef_pares.append((node, coef))
return node_coef_pares
def compute_meler_node_coef_pares(num_nodes: int) -> Sequence[Tuple[float, float]]:
"""
Calculate pares of nodes and coefficients with meler's quadratic formula
"""
node_coef_pares = []
coef = math.pi / num_nodes
for num in range(1, num_nodes + 1):
node = math.cos((2 * num - 1) * math.pi / (2 * num_nodes))
node_coef_pares.append((node, coef))
return node_coef_pares
def map_gauss_coef_pares(
node_coef_pares: Tuple[float, float], seg_a: float, seg_b: float
) -> Sequence[Tuple[float, float]]:
"""
Linearly maps node_coef_pares on [seg_a, seg_b] to [-1, 1],
Given the pares of nodes and coefficient finds the approximate value of
an integral on [-1, 1] of function in func.
"""
mapped_node_coef_pares = []
similarity_coefficient = (seg_b - seg_a) / (SEG_DEFAULT_END - SEG_DEFAULT_BEG)
for root, coef in node_coef_pares:
mapped_coef = coef * similarity_coefficient
mapped_root = seg_a + similarity_coefficient * (root - SEG_DEFAULT_BEG)
mapped_node_coef_pares.append((mapped_root, mapped_coef))
return mapped_node_coef_pares
def find_gauss_integral(
mapped_node_coef_pares: Tuple[float, float], func: Callable
) -> float:
"""
Calculates the integral sum from mapped node-coefficient pares
"""
integral_sum = 0
for root, coef in mapped_node_coef_pares:
integral_sum += coef * func(root)
return integral_sum
def find_meler_integral(node_coef_pares: Tuple[float, float], func: Callable) -> float:
"""
Given the pares of nodes and coefficient finds the approximate value of
an integral on [-1, 1] of function in func.
"""
integral_sum = 0
num_nodes = len(node_coef_pares)
for i in range(num_nodes):
node = node_coef_pares[i][0]
integral_sum += func(node)
return math.pi * integral_sum / num_nodes
def do_accuracy_check_for(nodes_list: Sequence[int]) -> None:
"""
Basically a test (or either 3 tests) of compute compute_gauss_node_coef_pares()
on three different numbers of nodes for interpolated quadratic formula.
All the integrals are calculated on the segment [-1, 1]
"""
for num_nodes in nodes_list:
if num_nodes == 3:
func = lambda x: 6 * x ** 5 + 2 * x + 34
exact_integral = 68
node_coef_pares = compute_gauss_node_coef_pares(num_nodes)
approx_integral = find_gauss_integral(node_coef_pares, func)
print(f"\nPolynom: 6x^5 + 2x + 34, exact integral = {exact_integral}")
print(f"Approximate integral: {approx_integral:.12f}")
elif num_nodes == 4:
func = lambda x: 8 * x ** 7 + 3 * x ** 2
exact_integral = 2
node_coef_pares = compute_gauss_node_coef_pares(num_nodes)
approx_integral = find_gauss_integral(node_coef_pares, func)
print(f"\nPolynom: 8x^7 + 3x^2, exact integral = {exact_integral}")
print(f"Approximate integral: {approx_integral:.12f}")
else:
func = lambda x: 10 * x ** 9 + 5 * x ** 4 + 1
exact_integral = 4
node_coef_pares = compute_gauss_node_coef_pares(num_nodes)
approx_integral = find_gauss_integral(node_coef_pares, func)
print(f"\nPolynom: 10x^9 + 5x^4 + 1, exact integral = {exact_integral}")
print(f"Approximate integral: {approx_integral:.12f}")
def do_task_1() -> None:
"""
Runs task 1 output
"""
print("\n-------------------------------\nЗадание 1.")
num_nodes_list = list(range(1, 9))
for num_nodes in num_nodes_list:
node_coef_pares = compute_gauss_node_coef_pares(num_nodes)
print(f"\nЧисло узлов: {num_nodes}")
print(f"Checksum: {print_node_coef_pares(node_coef_pares):.12f}")
def do_task_2() -> None:
"""
Runs task 2 output
"""
print("\n-------------------------------\nЗадание 2.")
num_nodes_list = [3, 4, 5]
print(f"Узлы: {num_nodes_list}")
do_accuracy_check_for(num_nodes_list)
def do_task_3() -> None:
"""
Runs task 3 output
"""
print("\n-------------------------------\nЗадание 3.")
num_nodes_list = list(
map(int, input("Введите список числа узлов (до 4 узлов): ").strip().split())
)[:4]
seg_a = input("Введите a (default = 0):")
seg_b = input("Введите b (default = 1):")
seg_a = EXAMPLE_GAUSS_SEG_BEG if seg_a == "" else float(seg_a)
seg_b = EXAMPLE_GAUSS_SEG_END if seg_b == "" else float(seg_b)
for num_nodes in num_nodes_list:
node_coef_pares = compute_gauss_node_coef_pares(num_nodes)
mapped_node_coef_pares = map_gauss_coef_pares(node_coef_pares, seg_a, seg_b)
approx_integral = find_gauss_integral(mapped_node_coef_pares, example_gauss)
print(f"\nN = {num_nodes}")
print(f"Checksum: {print_node_coef_pares(mapped_node_coef_pares):.12f}")
print(f"\nExact integral = {EXAMPLE_GAUSS_EXACT_INTEGRAL(seg_a, seg_b)}")
print(f"Approximate integral = {approx_integral}")
print(
f"Error: {abs(approx_integral - EXAMPLE_GAUSS_EXACT_INTEGRAL(seg_a, seg_b))}"
)
def do_task_4() -> None:
"""
Runs task 4 output
"""
print("\n-------------------------------\nЗадание 4.")
num_nodes_list = list(
map(int, input("Введите список числа узлов (до 3 узлов): ").strip().split())
)[:3]
for num_nodes in num_nodes_list:
print(f"\nN = {num_nodes}")
node_coef_pares = compute_meler_node_coef_pares(num_nodes)
approx_integral = find_meler_integral(node_coef_pares, example_meler)
print(f"Checksum: {print_node_coef_pares(node_coef_pares):.12f}")
print(f"\nApproximate integral = {approx_integral}")
if __name__ == "__main__":
print(
"""
Задание 5. КФ Гаусса, ее узлы и коэффициенты.
Вычисление интегралов при помощи КФ Гаусса.
КФ Мелера, ее узлы и коэффициенты.
Вычисление интегралов при помощи КФ Мелера.
"""
)
# Variant 6
example_gauss = lambda x: x * math.log(1 + x)
example_meler = lambda x: math.cos(x) ** 2
exact = lambda x: 1 / 4 * (2 * (x ** 2 - 1) * math.log(x + 1) - (x - 2) * x)
EXAMPLE_GAUSS_SEG_BEG = 0
EXAMPLE_GAUSS_SEG_END = 1
EXAMPLE_GAUSS_EXACT_INTEGRAL = lambda a, b: exact(b) - exact(a)
while True:
do_task_1()
do_task_2()
do_task_3()
do_task_4()
if input("\nПродолжить с новыми узлами, a, b? (y, n): ") == "y":
continue
break | hw5/solution5.py | import math
from typing import Callable, Final, Sequence, Tuple
from hw1.solution1 import root_finding, secant
SEG_DEFAULT_BEG: Final = -1
SEG_DEFAULT_END: Final = 1
EPS_DEFAULT_VAL: Final = 10 ** (-20)
def _legendre(x_val, num_nodes) -> float:
"""
Calculates legendre function using recurrent formula for homogeneous polynomials
"""
if num_nodes == 0:
return 1
if num_nodes == 1:
return x_val
return (2 * num_nodes - 1) / num_nodes * _legendre(x_val, num_nodes - 1) * x_val - (
num_nodes - 1
) / num_nodes * _legendre(x_val, num_nodes - 2)
def print_node_coef_pares(pares: Tuple[float, float]) -> float:
"""
Prints the pares of nodes and values in "node <-> value" format.
Returns checksum of coefficients.
"""
checksum = 0.0
for node, coef in pares:
print(f"{node:.12f} <-> {coef:.12f}")
checksum += coef
return checksum
def compute_gauss_node_coef_pares(num_nodes: int) -> Sequence[Tuple[float, float]]:
"""
Calculate pares of nodes and coefficients with gauss' quadratic formula
using legendre's polynomial
"""
node_coef_pares = []
segments = root_finding(
lambda x: _legendre(x, num_nodes), SEG_DEFAULT_BEG, SEG_DEFAULT_END
)
for seg in segments:
node = secant(
lambda x: _legendre(x, num_nodes), seg[0], seg[1], EPS_DEFAULT_VAL
)
coef = (2 * (1 - node ** 2)) / (
num_nodes ** 2 * _legendre(node, num_nodes - 1) ** 2
)
node_coef_pares.append((node, coef))
return node_coef_pares
def compute_meler_node_coef_pares(num_nodes: int) -> Sequence[Tuple[float, float]]:
"""
Calculate pares of nodes and coefficients with meler's quadratic formula
"""
node_coef_pares = []
coef = math.pi / num_nodes
for num in range(1, num_nodes + 1):
node = math.cos((2 * num - 1) * math.pi / (2 * num_nodes))
node_coef_pares.append((node, coef))
return node_coef_pares
def map_gauss_coef_pares(
node_coef_pares: Tuple[float, float], seg_a: float, seg_b: float
) -> Sequence[Tuple[float, float]]:
"""
Linearly maps node_coef_pares on [seg_a, seg_b] to [-1, 1],
Given the pares of nodes and coefficient finds the approximate value of
an integral on [-1, 1] of function in func.
"""
mapped_node_coef_pares = []
similarity_coefficient = (seg_b - seg_a) / (SEG_DEFAULT_END - SEG_DEFAULT_BEG)
for root, coef in node_coef_pares:
mapped_coef = coef * similarity_coefficient
mapped_root = seg_a + similarity_coefficient * (root - SEG_DEFAULT_BEG)
mapped_node_coef_pares.append((mapped_root, mapped_coef))
return mapped_node_coef_pares
def find_gauss_integral(
mapped_node_coef_pares: Tuple[float, float], func: Callable
) -> float:
"""
Calculates the integral sum from mapped node-coefficient pares
"""
integral_sum = 0
for root, coef in mapped_node_coef_pares:
integral_sum += coef * func(root)
return integral_sum
def find_meler_integral(node_coef_pares: Tuple[float, float], func: Callable) -> float:
"""
Given the pares of nodes and coefficient finds the approximate value of
an integral on [-1, 1] of function in func.
"""
integral_sum = 0
num_nodes = len(node_coef_pares)
for i in range(num_nodes):
node = node_coef_pares[i][0]
integral_sum += func(node)
return math.pi * integral_sum / num_nodes
def do_accuracy_check_for(nodes_list: Sequence[int]) -> None:
"""
Basically a test (or either 3 tests) of compute compute_gauss_node_coef_pares()
on three different numbers of nodes for interpolated quadratic formula.
All the integrals are calculated on the segment [-1, 1]
"""
for num_nodes in nodes_list:
if num_nodes == 3:
func = lambda x: 6 * x ** 5 + 2 * x + 34
exact_integral = 68
node_coef_pares = compute_gauss_node_coef_pares(num_nodes)
approx_integral = find_gauss_integral(node_coef_pares, func)
print(f"\nPolynom: 6x^5 + 2x + 34, exact integral = {exact_integral}")
print(f"Approximate integral: {approx_integral:.12f}")
elif num_nodes == 4:
func = lambda x: 8 * x ** 7 + 3 * x ** 2
exact_integral = 2
node_coef_pares = compute_gauss_node_coef_pares(num_nodes)
approx_integral = find_gauss_integral(node_coef_pares, func)
print(f"\nPolynom: 8x^7 + 3x^2, exact integral = {exact_integral}")
print(f"Approximate integral: {approx_integral:.12f}")
else:
func = lambda x: 10 * x ** 9 + 5 * x ** 4 + 1
exact_integral = 4
node_coef_pares = compute_gauss_node_coef_pares(num_nodes)
approx_integral = find_gauss_integral(node_coef_pares, func)
print(f"\nPolynom: 10x^9 + 5x^4 + 1, exact integral = {exact_integral}")
print(f"Approximate integral: {approx_integral:.12f}")
def do_task_1() -> None:
"""
Runs task 1 output
"""
print("\n-------------------------------\nЗадание 1.")
num_nodes_list = list(range(1, 9))
for num_nodes in num_nodes_list:
node_coef_pares = compute_gauss_node_coef_pares(num_nodes)
print(f"\nЧисло узлов: {num_nodes}")
print(f"Checksum: {print_node_coef_pares(node_coef_pares):.12f}")
def do_task_2() -> None:
"""
Runs task 2 output
"""
print("\n-------------------------------\nЗадание 2.")
num_nodes_list = [3, 4, 5]
print(f"Узлы: {num_nodes_list}")
do_accuracy_check_for(num_nodes_list)
def do_task_3() -> None:
"""
Runs task 3 output
"""
print("\n-------------------------------\nЗадание 3.")
num_nodes_list = list(
map(int, input("Введите список числа узлов (до 4 узлов): ").strip().split())
)[:4]
seg_a = input("Введите a (default = 0):")
seg_b = input("Введите b (default = 1):")
seg_a = EXAMPLE_GAUSS_SEG_BEG if seg_a == "" else float(seg_a)
seg_b = EXAMPLE_GAUSS_SEG_END if seg_b == "" else float(seg_b)
for num_nodes in num_nodes_list:
node_coef_pares = compute_gauss_node_coef_pares(num_nodes)
mapped_node_coef_pares = map_gauss_coef_pares(node_coef_pares, seg_a, seg_b)
approx_integral = find_gauss_integral(mapped_node_coef_pares, example_gauss)
print(f"\nN = {num_nodes}")
print(f"Checksum: {print_node_coef_pares(mapped_node_coef_pares):.12f}")
print(f"\nExact integral = {EXAMPLE_GAUSS_EXACT_INTEGRAL(seg_a, seg_b)}")
print(f"Approximate integral = {approx_integral}")
print(
f"Error: {abs(approx_integral - EXAMPLE_GAUSS_EXACT_INTEGRAL(seg_a, seg_b))}"
)
def do_task_4() -> None:
"""
Runs task 4 output
"""
print("\n-------------------------------\nЗадание 4.")
num_nodes_list = list(
map(int, input("Введите список числа узлов (до 3 узлов): ").strip().split())
)[:3]
for num_nodes in num_nodes_list:
print(f"\nN = {num_nodes}")
node_coef_pares = compute_meler_node_coef_pares(num_nodes)
approx_integral = find_meler_integral(node_coef_pares, example_meler)
print(f"Checksum: {print_node_coef_pares(node_coef_pares):.12f}")
print(f"\nApproximate integral = {approx_integral}")
if __name__ == "__main__":
print(
"""
Задание 5. КФ Гаусса, ее узлы и коэффициенты.
Вычисление интегралов при помощи КФ Гаусса.
КФ Мелера, ее узлы и коэффициенты.
Вычисление интегралов при помощи КФ Мелера.
"""
)
# Variant 6
example_gauss = lambda x: x * math.log(1 + x)
example_meler = lambda x: math.cos(x) ** 2
exact = lambda x: 1 / 4 * (2 * (x ** 2 - 1) * math.log(x + 1) - (x - 2) * x)
EXAMPLE_GAUSS_SEG_BEG = 0
EXAMPLE_GAUSS_SEG_END = 1
EXAMPLE_GAUSS_EXACT_INTEGRAL = lambda a, b: exact(b) - exact(a)
while True:
do_task_1()
do_task_2()
do_task_3()
do_task_4()
if input("\nПродолжить с новыми узлами, a, b? (y, n): ") == "y":
continue
break | 0.844985 | 0.732424 |
from .lib import (
waf_detect,
infoga
)
from .utils import (
infoga_modules,
show,
log,
description,
proto,
no_proto,
waf_debug,
json_respon
)
import requests,readline,marshal,whois
from bs4 import BeautifulSoup as bs
log = log(__name__)
mod = infoga_modules
uag = {'User-Agent':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'}
def main():
while True:
try:
inp = input('zsf(\033[91mfootprinting\033[0m): ').lower()
if inp == mod[0]:
url = input('host: ')
auth = b'\xdaF1482f3f1d83cdacf018a60f7b44be2cae9244161a54c3909561d19160f0baf6de8575a'
r = requests.get(
f'https://whatcms.org/APIEndpoint/Detect?key={marshal.loads(auth)}&url={url}'
)
json_respon(r.json()['result'])
elif inp == mod[1]:
url = proto(input('url: '))
r = requests.get(
url,
headers = uag,
timeout = 7,
verify=False
)
log.log(50,f'Server: {infoga.server(r)}')
log.log(50,f'X-Powered-By: {infoga.x_powered(r)}')
if infoga.click_jacking(r) == None:
log.log(20,'target might be vulnerability clickjacking')
if infoga.xss_protect(r) == None:
log.log(20,f'no xss protection')
if infoga.cors_wildcard(r) == True:
log.log(50,'cors wildcard detected')
log.log(50,f'sha256 content: {infoga.sha_content(r)}')
print('show all result')
json_respon(r.headers)
elif inp == mod[2]:
tgt = no_proto(input('host/Ip: '))
r = requests.get(f'http://ip-api.com/json/{tgt}')
json_respon(r.json())
elif inp == mod[3]:
url = proto(input('url: '))
r = requests.get(
url,
verify=False,
headers = uag
)
if infoga.email_search(r) == None:
log.log(30,'no email found')
else:
for i in infoga.email_search(r):
log.log(50,f'found: {i}')
elif inp == mod[4]:
tgt = no_proto(input('host/Ip: '))
r = requests.get('https://api.hackertarget.com/mtr/?q={tgt}')
print(r.text)
elif inp == mod[5]:
url = proto(input('url: '))
r = requests.get(
url,
headers = uag,
verify=False
)
if r.status_code == 200:
log.log(50,'robot.txt found')
print(r.text)
else:
log.log(30,'robot.txt not found')
elif inp == mod[6]:
dom = no_proto(input('domain: '))
r = requests.post(
'https://domains.yougetsignal.com/domains.php',
data = {
'remoteAddress':dom,
'key':''
}
)
if r.json()['status'] == 'Success':
for domain,_ in r.json()['domainArray']:
log.log(10,f'{domain}')
else:
log.log(30,f'{r.json["status"]}')
elif inp == mod[7]:
xxx = whois.whois(input('host: '))
print(xxx)
elif inp == mod[8]:
url = proto(input('url: '))
r = requests.get(
url,
headers = uag,
verify=False
)
x = bs(r.text,'lxml')
p = infoga.html_form(x)
if len(p.values()) == 0:
log.log(30,'no html form found')
else:
json_respon(p)
elif inp == mod[9]:
url = proto(input('url: '))
r = requests.get(
url,
headers = uag,
allow_redirects=True,
timeout=7
)
x = waf_debug(waf_detect,r)
x.main()
elif inp == 'back':
break
elif inp == 'exit':
exit()
elif inp == 'help':
show(infoga_modules,description['information gathering'])
else:
print(f'\033[91m!\033[0m no command {inp}')
except Exception as e:
print(e)
except KeyboardInterrupt:
exit() | zeeb_src/recon_src.py | from .lib import (
waf_detect,
infoga
)
from .utils import (
infoga_modules,
show,
log,
description,
proto,
no_proto,
waf_debug,
json_respon
)
import requests,readline,marshal,whois
from bs4 import BeautifulSoup as bs
log = log(__name__)
mod = infoga_modules
uag = {'User-Agent':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'}
def main():
while True:
try:
inp = input('zsf(\033[91mfootprinting\033[0m): ').lower()
if inp == mod[0]:
url = input('host: ')
auth = b'\xdaF1482f3f1d83cdacf018a60f7b44be2cae9244161a54c3909561d19160f0baf6de8575a'
r = requests.get(
f'https://whatcms.org/APIEndpoint/Detect?key={marshal.loads(auth)}&url={url}'
)
json_respon(r.json()['result'])
elif inp == mod[1]:
url = proto(input('url: '))
r = requests.get(
url,
headers = uag,
timeout = 7,
verify=False
)
log.log(50,f'Server: {infoga.server(r)}')
log.log(50,f'X-Powered-By: {infoga.x_powered(r)}')
if infoga.click_jacking(r) == None:
log.log(20,'target might be vulnerability clickjacking')
if infoga.xss_protect(r) == None:
log.log(20,f'no xss protection')
if infoga.cors_wildcard(r) == True:
log.log(50,'cors wildcard detected')
log.log(50,f'sha256 content: {infoga.sha_content(r)}')
print('show all result')
json_respon(r.headers)
elif inp == mod[2]:
tgt = no_proto(input('host/Ip: '))
r = requests.get(f'http://ip-api.com/json/{tgt}')
json_respon(r.json())
elif inp == mod[3]:
url = proto(input('url: '))
r = requests.get(
url,
verify=False,
headers = uag
)
if infoga.email_search(r) == None:
log.log(30,'no email found')
else:
for i in infoga.email_search(r):
log.log(50,f'found: {i}')
elif inp == mod[4]:
tgt = no_proto(input('host/Ip: '))
r = requests.get('https://api.hackertarget.com/mtr/?q={tgt}')
print(r.text)
elif inp == mod[5]:
url = proto(input('url: '))
r = requests.get(
url,
headers = uag,
verify=False
)
if r.status_code == 200:
log.log(50,'robot.txt found')
print(r.text)
else:
log.log(30,'robot.txt not found')
elif inp == mod[6]:
dom = no_proto(input('domain: '))
r = requests.post(
'https://domains.yougetsignal.com/domains.php',
data = {
'remoteAddress':dom,
'key':''
}
)
if r.json()['status'] == 'Success':
for domain,_ in r.json()['domainArray']:
log.log(10,f'{domain}')
else:
log.log(30,f'{r.json["status"]}')
elif inp == mod[7]:
xxx = whois.whois(input('host: '))
print(xxx)
elif inp == mod[8]:
url = proto(input('url: '))
r = requests.get(
url,
headers = uag,
verify=False
)
x = bs(r.text,'lxml')
p = infoga.html_form(x)
if len(p.values()) == 0:
log.log(30,'no html form found')
else:
json_respon(p)
elif inp == mod[9]:
url = proto(input('url: '))
r = requests.get(
url,
headers = uag,
allow_redirects=True,
timeout=7
)
x = waf_debug(waf_detect,r)
x.main()
elif inp == 'back':
break
elif inp == 'exit':
exit()
elif inp == 'help':
show(infoga_modules,description['information gathering'])
else:
print(f'\033[91m!\033[0m no command {inp}')
except Exception as e:
print(e)
except KeyboardInterrupt:
exit() | 0.094482 | 0.090454 |
from django import forms
from django.contrib import admin, messages
from django.urls import reverse
from django.utils.html import mark_safe
from reversion_compare.admin import CompareVersionAdmin
from notesfrombelow.admin import editor_site
from . import models
class TagAdmin(CompareVersionAdmin):
list_display = ['name', 'show_image', 'slug', 'category']
prepopulated_fields = {'slug': ('name',)}
search_fields = ['name']
def show_image(self, obj):
if obj.image:
to_return = '<img src="{}" class="ui medium image" />'.format(
obj.image.url,
)
return mark_safe(to_return)
else:
return ''
show_image.short_description = 'Image'
class IssueAdmin(CompareVersionAdmin):
list_display = ['number', 'title', 'date', 'slug']
prepopulated_fields = {'slug': ('title',)}
search_fields = ['title']
class CategoryAdmin(CompareVersionAdmin):
list_display = ['name', 'slug', 'tag_name', 'order_on_homepage']
prepopulated_fields = {'slug': ('name',)}
class AuthorAdmin(CompareVersionAdmin):
list_display = ['name', 'bio', 'slug', 'twitter']
prepopulated_fields = {'slug': ('name',)}
ordering = ['name']
search_fields = ['name']
class ArticleForm(forms.ModelForm):
class Meta:
model = models.Article
fields = '__all__'
widgets = {
'image_credit': forms.TextInput(),
'subtitle': forms.Textarea({'rows': 2}),
}
def publish(modeladmin, request, queryset):
queryset.update(published=True)
messages.info(request, "Published {} article(s)".format(
queryset.count())
)
def remove_tags(modeladmin, request, queryset):
for article in queryset:
article.tags.remove()
messages.info(request, "Removed tags from {} article(s)".format(
queryset.count())
)
def make_add_tag_action(tag):
def add_tag(modeladmin, request, queryset):
for article in queryset:
article.tags.add(tag)
messages.info(request, "Added tag '{}' to {} article(s)".format(
tag.name,
queryset.count())
)
add_tag.short_description = "Add tag '{}'".format(tag.name)
add_tag.__name__ = 'add_tag_{0}'.format(tag.pk)
return add_tag
class ArticleAdmin(CompareVersionAdmin):
list_display = ['display_title', 'date', 'show_image', 'display_tags', 'published', 'is_featured']
list_filter = ['category', 'tags', 'issue']
prepopulated_fields = {'slug': ('title',)}
change_form_template = 'admin/edit_article.html'
form = ArticleForm
list_display_links = None
search_fields = ['title']
autocomplete_fields = ['related_1', 'related_2', 'issue', 'tags', 'authors']
def display_title(self, obj):
if obj.authors.count():
authors = ', '.join(a.name for a in obj.authors.all())
else:
authors = 'anonymous'
to_return = (
'<h3 class="ui header"><a href="{edit}">{title}</a><div class="sub header">{subtitle}</div></h3><span>by {authors}</span><br><code><a href="{view}">{slug}</a></code>'.format(
edit=reverse('editor:journal_article_change', args=[obj.id]),
title=obj.title,
subtitle=obj.subtitle or '<em>No subtitle</em>',
authors=authors,
view=obj.get_absolute_url(),
slug=obj.slug,
)
)
return mark_safe(to_return)
display_title.short_description = 'Article details'
def display_tags(self, obj):
html = []
if obj.issue:
html.append(
'<a href="{u}">Issue {n}: {i} (#{o})</a>'.format(
u=reverse('editor:journal_issue_change', args=[obj.issue.id]),
n=obj.issue.number,
i=obj.issue.title,
o=obj.order_in_issue
)
)
if obj.category:
html.append(
'<a href="{u}"><strong>{c}</strong></a>'.format(
u=reverse('editor:journal_category_change', args=[obj.category.id]),
c=obj.category
)
)
for tag in obj.tags.all():
html.append(
'<div class="ui {c} label">{t}</div>'.format(
# highlight tags of the same category as the article
c='red' if tag.category and tag.category.pk == obj.category.pk else '',
t=tag.name
)
)
return mark_safe('<br />'.join(html))
display_tags.short_description = 'Issue, category, and tags'
def show_image(self, obj):
to_return = '<img src="{}" class="ui medium image" />'.format(
obj.image.url,
)
return mark_safe(to_return)
show_image.short_description = 'Image'
def is_featured(self, obj):
return obj.featured is not None
is_featured.short_description = 'Featured?'
is_featured.boolean = True
def get_actions(self, request):
actions = super(ArticleAdmin, self).get_actions(request)
# Make an action to clear all tags
actions['publish'] = (publish, 'publish', 'Publish')
actions['remove_tags'] = (remove_tags, 'remove_tags', 'Remove all tags')
# Make an action for adding each tag
for tag in models.Tag.objects.all():
action = make_add_tag_action(tag)
actions[action.__name__] = (action,
action.__name__,
action.short_description)
return actions
class FeaturedArticleAdmin(CompareVersionAdmin):
list_display = ['article', 'order_on_homepage', 'is_thumb']
class ArticleTranslationAdmin(CompareVersionAdmin):
list_display = ['article', 'title', 'slug', 'language']
prepopulated_fields = {'slug': ('title',)}
editor_site.register(models.Issue, IssueAdmin)
editor_site.register(models.Article, ArticleAdmin)
editor_site.register(models.ArticleTranslation, ArticleTranslationAdmin)
editor_site.register(models.FeaturedArticle, FeaturedArticleAdmin)
editor_site.register(models.Author, AuthorAdmin)
editor_site.register(models.Category, CategoryAdmin)
editor_site.register(models.Tag, TagAdmin)
admin.site.register(models.Issue, IssueAdmin)
admin.site.register(models.Article, ArticleAdmin)
admin.site.register(models.ArticleTranslation, ArticleTranslationAdmin)
admin.site.register(models.FeaturedArticle, FeaturedArticleAdmin)
admin.site.register(models.Author, AuthorAdmin)
admin.site.register(models.Category, CategoryAdmin)
admin.site.register(models.Tag, TagAdmin) | django/journal/admin.py | from django import forms
from django.contrib import admin, messages
from django.urls import reverse
from django.utils.html import mark_safe
from reversion_compare.admin import CompareVersionAdmin
from notesfrombelow.admin import editor_site
from . import models
class TagAdmin(CompareVersionAdmin):
list_display = ['name', 'show_image', 'slug', 'category']
prepopulated_fields = {'slug': ('name',)}
search_fields = ['name']
def show_image(self, obj):
if obj.image:
to_return = '<img src="{}" class="ui medium image" />'.format(
obj.image.url,
)
return mark_safe(to_return)
else:
return ''
show_image.short_description = 'Image'
class IssueAdmin(CompareVersionAdmin):
list_display = ['number', 'title', 'date', 'slug']
prepopulated_fields = {'slug': ('title',)}
search_fields = ['title']
class CategoryAdmin(CompareVersionAdmin):
list_display = ['name', 'slug', 'tag_name', 'order_on_homepage']
prepopulated_fields = {'slug': ('name',)}
class AuthorAdmin(CompareVersionAdmin):
list_display = ['name', 'bio', 'slug', 'twitter']
prepopulated_fields = {'slug': ('name',)}
ordering = ['name']
search_fields = ['name']
class ArticleForm(forms.ModelForm):
class Meta:
model = models.Article
fields = '__all__'
widgets = {
'image_credit': forms.TextInput(),
'subtitle': forms.Textarea({'rows': 2}),
}
def publish(modeladmin, request, queryset):
queryset.update(published=True)
messages.info(request, "Published {} article(s)".format(
queryset.count())
)
def remove_tags(modeladmin, request, queryset):
for article in queryset:
article.tags.remove()
messages.info(request, "Removed tags from {} article(s)".format(
queryset.count())
)
def make_add_tag_action(tag):
def add_tag(modeladmin, request, queryset):
for article in queryset:
article.tags.add(tag)
messages.info(request, "Added tag '{}' to {} article(s)".format(
tag.name,
queryset.count())
)
add_tag.short_description = "Add tag '{}'".format(tag.name)
add_tag.__name__ = 'add_tag_{0}'.format(tag.pk)
return add_tag
class ArticleAdmin(CompareVersionAdmin):
list_display = ['display_title', 'date', 'show_image', 'display_tags', 'published', 'is_featured']
list_filter = ['category', 'tags', 'issue']
prepopulated_fields = {'slug': ('title',)}
change_form_template = 'admin/edit_article.html'
form = ArticleForm
list_display_links = None
search_fields = ['title']
autocomplete_fields = ['related_1', 'related_2', 'issue', 'tags', 'authors']
def display_title(self, obj):
if obj.authors.count():
authors = ', '.join(a.name for a in obj.authors.all())
else:
authors = 'anonymous'
to_return = (
'<h3 class="ui header"><a href="{edit}">{title}</a><div class="sub header">{subtitle}</div></h3><span>by {authors}</span><br><code><a href="{view}">{slug}</a></code>'.format(
edit=reverse('editor:journal_article_change', args=[obj.id]),
title=obj.title,
subtitle=obj.subtitle or '<em>No subtitle</em>',
authors=authors,
view=obj.get_absolute_url(),
slug=obj.slug,
)
)
return mark_safe(to_return)
display_title.short_description = 'Article details'
def display_tags(self, obj):
html = []
if obj.issue:
html.append(
'<a href="{u}">Issue {n}: {i} (#{o})</a>'.format(
u=reverse('editor:journal_issue_change', args=[obj.issue.id]),
n=obj.issue.number,
i=obj.issue.title,
o=obj.order_in_issue
)
)
if obj.category:
html.append(
'<a href="{u}"><strong>{c}</strong></a>'.format(
u=reverse('editor:journal_category_change', args=[obj.category.id]),
c=obj.category
)
)
for tag in obj.tags.all():
html.append(
'<div class="ui {c} label">{t}</div>'.format(
# highlight tags of the same category as the article
c='red' if tag.category and tag.category.pk == obj.category.pk else '',
t=tag.name
)
)
return mark_safe('<br />'.join(html))
display_tags.short_description = 'Issue, category, and tags'
def show_image(self, obj):
to_return = '<img src="{}" class="ui medium image" />'.format(
obj.image.url,
)
return mark_safe(to_return)
show_image.short_description = 'Image'
def is_featured(self, obj):
return obj.featured is not None
is_featured.short_description = 'Featured?'
is_featured.boolean = True
def get_actions(self, request):
actions = super(ArticleAdmin, self).get_actions(request)
# Make an action to clear all tags
actions['publish'] = (publish, 'publish', 'Publish')
actions['remove_tags'] = (remove_tags, 'remove_tags', 'Remove all tags')
# Make an action for adding each tag
for tag in models.Tag.objects.all():
action = make_add_tag_action(tag)
actions[action.__name__] = (action,
action.__name__,
action.short_description)
return actions
class FeaturedArticleAdmin(CompareVersionAdmin):
list_display = ['article', 'order_on_homepage', 'is_thumb']
class ArticleTranslationAdmin(CompareVersionAdmin):
list_display = ['article', 'title', 'slug', 'language']
prepopulated_fields = {'slug': ('title',)}
editor_site.register(models.Issue, IssueAdmin)
editor_site.register(models.Article, ArticleAdmin)
editor_site.register(models.ArticleTranslation, ArticleTranslationAdmin)
editor_site.register(models.FeaturedArticle, FeaturedArticleAdmin)
editor_site.register(models.Author, AuthorAdmin)
editor_site.register(models.Category, CategoryAdmin)
editor_site.register(models.Tag, TagAdmin)
admin.site.register(models.Issue, IssueAdmin)
admin.site.register(models.Article, ArticleAdmin)
admin.site.register(models.ArticleTranslation, ArticleTranslationAdmin)
admin.site.register(models.FeaturedArticle, FeaturedArticleAdmin)
admin.site.register(models.Author, AuthorAdmin)
admin.site.register(models.Category, CategoryAdmin)
admin.site.register(models.Tag, TagAdmin) | 0.477067 | 0.116613 |
from mealy.constants import ErrorAnalyzerConstants
from sklearn.metrics import accuracy_score, balanced_accuracy_score
import numpy as np
def compute_confidence_decision(primary_model_true_accuracy, primary_model_predicted_accuracy):
difference_true_pred_accuracy = np.abs(primary_model_true_accuracy - primary_model_predicted_accuracy)
decision = difference_true_pred_accuracy <= ErrorAnalyzerConstants.TREE_ACCURACY_TOLERANCE
fidelity = 1. - difference_true_pred_accuracy
# TODO Binomial test
return fidelity, decision
def compute_accuracy_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def compute_primary_model_accuracy(y):
n_test_samples = y.shape[0]
return float(np.count_nonzero(y == ErrorAnalyzerConstants.CORRECT_PREDICTION)) / n_test_samples
def compute_fidelity_score(y_true, y_pred):
difference_true_pred_accuracy = np.abs(compute_primary_model_accuracy(y_true) -
compute_primary_model_accuracy(y_pred))
fidelity = 1. - difference_true_pred_accuracy
return fidelity
def fidelity_balanced_accuracy_score(y_true, y_pred):
return compute_fidelity_score(y_true, y_pred) + balanced_accuracy_score(y_true, y_pred)
def error_decision_tree_report(y_true, y_pred, output_format='str'):
"""Return a report showing the main Error Decision Tree metrics.
Args:
y_true (numpy.ndarray): Ground truth values of wrong/correct predictions of the error tree primary model.
Expected values in [ErrorAnalyzerConstants.WRONG_PREDICTION, ErrorAnalyzerConstants.CORRECT_PREDICTION].
y_pred (numpy.ndarray): Estimated targets as returned by the error tree. Expected values in
[ErrorAnalyzerConstants.WRONG_PREDICTION, ErrorAnalyzerConstants.CORRECT_PREDICTION].
output_format (string): Return format used for the report. Valid values are 'dict' or 'str'.
Return:
dict or str: dictionary or string report storing different metrics regarding the Error Decision Tree.
"""
tree_accuracy_score = compute_accuracy_score(y_true, y_pred)
tree_balanced_accuracy = balanced_accuracy_score(y_true, y_pred)
primary_model_predicted_accuracy = compute_primary_model_accuracy(y_pred)
primary_model_true_accuracy = compute_primary_model_accuracy(y_true)
fidelity, confidence_decision = compute_confidence_decision(primary_model_true_accuracy,
primary_model_predicted_accuracy)
if output_format == 'dict':
report_dict = dict()
report_dict[ErrorAnalyzerConstants.TREE_ACCURACY] = tree_accuracy_score
report_dict[ErrorAnalyzerConstants.TREE_BALANCED_ACCURACY] = tree_balanced_accuracy
report_dict[ErrorAnalyzerConstants.TREE_FIDELITY] = fidelity
report_dict[ErrorAnalyzerConstants.PRIMARY_MODEL_TRUE_ACCURACY] = primary_model_true_accuracy
report_dict[ErrorAnalyzerConstants.PRIMARY_MODEL_PREDICTED_ACCURACY] = primary_model_predicted_accuracy
report_dict[ErrorAnalyzerConstants.CONFIDENCE_DECISION] = confidence_decision
return report_dict
if output_format == 'str':
report = 'The Error Decision Tree was trained with accuracy %.2f%% and balanced accuracy %.2f%%.' % (tree_accuracy_score * 100, tree_balanced_accuracy * 100)
report += '\n'
report += 'The Decision Tree estimated the primary model''s accuracy to %.2f%%.' % \
(primary_model_predicted_accuracy * 100)
report += '\n'
report += 'The true accuracy of the primary model is %.2f.%%' % (primary_model_true_accuracy * 100)
report += '\n'
report += 'The Fidelity of the error tree is %.2f%%.' % \
(fidelity * 100)
report += '\n'
if not confidence_decision:
report += 'Warning: the built tree might not be representative of the primary model performances.'
report += '\n'
report += 'The error tree predicted model accuracy is considered too different from the true model accuracy.'
report += '\n'
else:
report += 'The error tree is considered representative of the primary model performances.'
report += '\n'
return report
else:
raise ValueError("Output format should either be 'dict' or 'str'") | mealy/metrics.py | from mealy.constants import ErrorAnalyzerConstants
from sklearn.metrics import accuracy_score, balanced_accuracy_score
import numpy as np
def compute_confidence_decision(primary_model_true_accuracy, primary_model_predicted_accuracy):
difference_true_pred_accuracy = np.abs(primary_model_true_accuracy - primary_model_predicted_accuracy)
decision = difference_true_pred_accuracy <= ErrorAnalyzerConstants.TREE_ACCURACY_TOLERANCE
fidelity = 1. - difference_true_pred_accuracy
# TODO Binomial test
return fidelity, decision
def compute_accuracy_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def compute_primary_model_accuracy(y):
n_test_samples = y.shape[0]
return float(np.count_nonzero(y == ErrorAnalyzerConstants.CORRECT_PREDICTION)) / n_test_samples
def compute_fidelity_score(y_true, y_pred):
difference_true_pred_accuracy = np.abs(compute_primary_model_accuracy(y_true) -
compute_primary_model_accuracy(y_pred))
fidelity = 1. - difference_true_pred_accuracy
return fidelity
def fidelity_balanced_accuracy_score(y_true, y_pred):
return compute_fidelity_score(y_true, y_pred) + balanced_accuracy_score(y_true, y_pred)
def error_decision_tree_report(y_true, y_pred, output_format='str'):
"""Return a report showing the main Error Decision Tree metrics.
Args:
y_true (numpy.ndarray): Ground truth values of wrong/correct predictions of the error tree primary model.
Expected values in [ErrorAnalyzerConstants.WRONG_PREDICTION, ErrorAnalyzerConstants.CORRECT_PREDICTION].
y_pred (numpy.ndarray): Estimated targets as returned by the error tree. Expected values in
[ErrorAnalyzerConstants.WRONG_PREDICTION, ErrorAnalyzerConstants.CORRECT_PREDICTION].
output_format (string): Return format used for the report. Valid values are 'dict' or 'str'.
Return:
dict or str: dictionary or string report storing different metrics regarding the Error Decision Tree.
"""
tree_accuracy_score = compute_accuracy_score(y_true, y_pred)
tree_balanced_accuracy = balanced_accuracy_score(y_true, y_pred)
primary_model_predicted_accuracy = compute_primary_model_accuracy(y_pred)
primary_model_true_accuracy = compute_primary_model_accuracy(y_true)
fidelity, confidence_decision = compute_confidence_decision(primary_model_true_accuracy,
primary_model_predicted_accuracy)
if output_format == 'dict':
report_dict = dict()
report_dict[ErrorAnalyzerConstants.TREE_ACCURACY] = tree_accuracy_score
report_dict[ErrorAnalyzerConstants.TREE_BALANCED_ACCURACY] = tree_balanced_accuracy
report_dict[ErrorAnalyzerConstants.TREE_FIDELITY] = fidelity
report_dict[ErrorAnalyzerConstants.PRIMARY_MODEL_TRUE_ACCURACY] = primary_model_true_accuracy
report_dict[ErrorAnalyzerConstants.PRIMARY_MODEL_PREDICTED_ACCURACY] = primary_model_predicted_accuracy
report_dict[ErrorAnalyzerConstants.CONFIDENCE_DECISION] = confidence_decision
return report_dict
if output_format == 'str':
report = 'The Error Decision Tree was trained with accuracy %.2f%% and balanced accuracy %.2f%%.' % (tree_accuracy_score * 100, tree_balanced_accuracy * 100)
report += '\n'
report += 'The Decision Tree estimated the primary model''s accuracy to %.2f%%.' % \
(primary_model_predicted_accuracy * 100)
report += '\n'
report += 'The true accuracy of the primary model is %.2f.%%' % (primary_model_true_accuracy * 100)
report += '\n'
report += 'The Fidelity of the error tree is %.2f%%.' % \
(fidelity * 100)
report += '\n'
if not confidence_decision:
report += 'Warning: the built tree might not be representative of the primary model performances.'
report += '\n'
report += 'The error tree predicted model accuracy is considered too different from the true model accuracy.'
report += '\n'
else:
report += 'The error tree is considered representative of the primary model performances.'
report += '\n'
return report
else:
raise ValueError("Output format should either be 'dict' or 'str'") | 0.700383 | 0.421195 |
import copy
import re
import urlparse
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.contrib.sites.shortcuts import get_current_site
from django.utils.translation import get_language
from .base import Menu, DEFAULT, ONCE, PER_REQUEST, POST_SELECT
from .utils import import_path, tgenerator
from . import settings as msettings
class Processor(object):
# methods that are expected to be extended if required:
# router - menuconf router
# cache_key - cache name generator for confname
# post_build_data_handler - update data after ONCE
# check_node_url_with_domain - check urls with specified domain
# compare_paths - compare two paths by length, get/hash existance, ect
registry = None
def __init__(self, registry):
self.registry = registry
self._modifiers = {}
def router(self, request):
"""
Simple router implementaion, based on url regexps.
If you need more complex validation, please update this method.
"""
if not hasattr(self, '_ROUTES'):
self._ROUTES = [(name, conf['ROUTE'])
for name, conf in msettings.MENUS.items()
if conf.get('ROUTE', None)] or None
if self._ROUTES:
for name, route in self._ROUTES:
if re.match(route, request.path):
return name
return 'default'
def cache_key(self, request=None, menuconf=None,
lang=None, site_id=None, extra=None, **kwargs):
"""
Generate cache_key by request and menuconf data, by lang and site
note: extra should be list or tuple, any item should be ascii string
"""
lang = lang or get_language()
site_id = site_id or get_current_site(None).pk
extra = '_'.join(map(str, extra)) if extra else ''
return 'nodes_%s_%s_%s%s_cache' % (menuconf['NAME'],
lang, site_id, extra)
def menuconf(self, request, name=None):
"""Get menuconf value, call router if required (once)"""
# get menu configuration data (dict value expected as valid)
# also if menuconf is dict -> request is already meta-processed
if isinstance(name, dict):
return name
# update request with meta
self.add_nodes_to_request(request)
# call router once per request
if not hasattr(request.nodes, '_menuconf'):
request.nodes._menuconf_selected = self.router(request)
request.nodes._menuconf = {}
# get menuconf and cache it
name = name or request.nodes._menuconf_selected
conf = request.nodes._menuconf.get(name, None)
if not conf:
conf = msettings.MENUS.get(name, None)
if conf is None:
raise ValueError('Menus menuconf invalid name (%s).' % name)
conf['SELECTED'] = name == request.nodes._menuconf_selected
request.nodes._menuconf[name] = conf
return conf
# Nodes processing methods
# ------------------------
def get_nodes(self, menuconf, request,
modifiers=None, init_only=False, **kwargs):
"""Generate nodes by menu confname."""
menuconf = self.menuconf(request, name=menuconf)
# cache requested menuconf nodes in request object
nodes = getattr(request.nodes, 'menus', {}).get(menuconf['NAME'], None)
if nodes is None:
request.nodes.menus = getattr(request.nodes, 'menus', {})
cache_key = self.cache_key(request=request, menuconf=menuconf)
cache_required = False
rebuild_mode = False
rebuild_countdown = 10
while rebuild_countdown:
rebuild_countdown -= 1
meta = {'rebuild_mode': rebuild_mode,}
nodes = cache.get(cache_key, None) if nodes is None else nodes
if nodes is None:
nodes = self.build_nodes(request, menuconf['MENUS'])
nodes = {'nodes': nodes, 'selected': None, 'chain': None,}
cache_required = True
# running once cached code (ONCE)
self.apply_modifiers(menuconf, nodes, request,
modify_event=ONCE, meta=meta)
self.post_build_data_handler(menuconf, nodes, request, meta)
if cache_required and not rebuild_mode:
cache.set(cache_key, nodes, menuconf['CACHE_TIMEOUT'])
# per-request cached code (PER_REQUEST)
self.apply_modifiers(menuconf, nodes, request,
modify_event=PER_REQUEST, meta=meta)
# selected node related code
# check - does menu routed (SELECTED) or requested directly
# only SELECTED menuconf mark as selected
# todo: may be add CHECK_SELECTION param to conf?
if menuconf['SELECTED']:
selected, chain = self.search_selected(request, nodes)
rebuild_mode = (
selected and not getattr(selected, 'rebuilt', None) and
selected.on_selected(menuconf, nodes, request))
if rebuild_mode:
selected.selected, selected.rebuilt = False, True
continue
nodes.update(selected=selected, chain=chain)
break
if not rebuild_countdown:
raise Exception('Nodes: too deep rebuild cycle.')
# per-request cached code (POST_SELECT)
self.apply_modifiers(menuconf, nodes, request,
modify_event=POST_SELECT)
request.nodes.menus[menuconf['NAME']] = nodes
if init_only:
return
# clone nodes and run apply_modifiers with DEFAULT modify_event
nodes = copy.deepcopy(nodes)
self.apply_modifiers(menuconf, nodes, request, modify_event=DEFAULT,
modifiers=modifiers, kwargs=kwargs)
return nodes
def apply_modifiers(self, menuconf, nodes, request, modify_event=DEFAULT,
modifiers=None, meta=None, kwargs=None):
"""
Modify nodes by modifiers, related to menu confname.modifiers.
Params:
nodes - dict with nodes and selected node value, also can
contain any other user information (by default it contains
paths for indexed search of selected node). Nodes structure
see in get_nodes method.
modify_event - event, after which modifiers called. Builtin values:
ONCE - run once, before caching nodes data between requests,
PER_REQUEST - run every request once, before any other no-ONCE,
POST_SELECT - run every request after selected node is marked,
DEFAULT - run every time get_nodes called with different args.
meta - additional dict with some runtime tips, which helps next
modifiers speed-up their processing. Builtin keys:
modify_event - event value apply_modifiers called with,
rebuild_mode - in ONCE and PER_REQUEST events means that
apply_modifiers executed second or more time,
modified_ancestors - should be set to True by modifier,
if any parent value modified
modified_descendants - should be set to True by modifier,
if any children value modified
User can provide any other keys to your own modifiers.
"""
# process arguments
menuconf, kwargs = self.menuconf(request, name=menuconf), kwargs or {}
meta = dict({
'modify_event': None, 'rebuild_mode': False,
'modified_ancestors': False, 'modified_descendants': False,
}, **dict(meta or {}, modify_event=modify_event))
# get (cached) value of modifiers by menuconf name and modifiers group
modifconf = modifiers or 'default'
modifname = '%s.%s' % (menuconf['NAME'], modifconf,)
modifiers = self._modifiers.get(modifname, None)
if not modifiers:
modifiers = [self.registry.modifiers[mod]
for mod in menuconf['MODIFIERS'][modifconf]]
self._modifiers[modifname] = modifiers
# process
for modifier in modifiers:
if modify_event & modifier.modify_event:
modifier.modify(request, nodes, meta, **kwargs)
# raw menus nodes list generator
def build_nodes(self, request, menus):
"""Build raw nodes tree"""
final, ids, ignored = [], {}, {}
# get menus from registry and sort by weight attr asc
menus = [m if isinstance(m, Menu) else self.registry.menus[m]
for m in menus]
menus = sorted(menus, key=lambda x: x.weight)
# fetch all nodes from all menus
for menu in menus:
nodes = menu.get_nodes(request)
for node in nodes:
# set namespace attr, default: menu class name
node.namespace = node.namespace or menu.namespace
ids[node.namespace] = ids.get(node.namespace, [])
ignored[node.namespace] = ignored.get(node.namespace, [])
# ignore nodes with duplicated ids
if node.id in ids[node.namespace]:
continue
# process all childs
if node.parent:
found = False
# ignore node if parent also ignored
if node.parent in ignored[node.namespace]:
ignored[node.namespace].append(node.id)
continue
# search parent
for n in nodes:
if n.namespace == node.namespace and n.id == node.parent:
node.parent, found = n, True
break
# append found node to its "brothers" or ignore
if found:
node.parent.children.append(node)
else:
ignored[node.namespace].append(node.id)
continue
# append node and it id to main list
final.append(node)
ids[node.namespace].append(node.id)
return [i for i in final if not i.parent]
def post_build_data_handler(self, menuconf, nodes, request, meta):
"""
By default updates nodes with {"paths": paths,}.
Paths using for indexed search of selected node. If you will find
faster method, you can override all behaviour, including selected node
detection.
All result data must be serializable.
"""
if not meta['rebuild_mode']:
nodes.update({'paths': self.build_paths(nodes['nodes']),})
# Selection speedup by indexed search (with paths dict)
# -----------------------------------------------------
def check_node_url_with_domain(self, domain, node):
return False
def compare_paths(self, node, prevnode):
"""
Return True, if we should replace old item by new one.
Greater weight better.
"""
return node.data.get('weight', 500) >= prevnode.data.get('weight', 500)
def get_path(self, node):
p = urlparse.urlparse(node.url_original)
if p.netloc and not self.check_node_url_with_domain(p.netloc, node):
return None
return p.path.strip('/')
def build_paths(self, nodes):
data = {}
for node in tgenerator(nodes):
path = self.get_path(node)
# ignore nodes with denied domain name and/or empty path
if not path:
continue
# check node is new or it is better match than previous
if not path in data or self.compare_paths(node, data[path]):
data[path] = node
return data
def merge_paths(self, paths, newpaths):
for path, node in newpaths.items():
# check node is new or it is better match than previous
if not path in paths or self.compare_paths(node, paths[path]):
paths[path] = node
def search_selected(self, request, data):
"""Search selected node (indexed search in paths)."""
nodes, paths, path = (data['nodes'], data['paths'],
request.path.strip('/').split('/'),)
# check existance of path starting from current path down to its first
# ancestor: on "/a/b/c/" page look for "a/b/c" or "a/b" or "a" in paths
for pkey in ('/'.join(path[:-i or None]) for i in range(0, len(path))):
selected = paths.get(pkey, None)
if selected:
# save unmodified chain up to root
chain, item = [selected], selected
while item.parent:
item = item.parent
chain.insert(0, item)
# check selected for existance in morphed by
# per_request modifiers nodes list (auth visibility, ect.)
if not chain[0] in nodes:
continue
# mark node as selected and return
selected.selected = True
return selected, chain
return None, None
# Common methods
# --------------
def add_nodes_to_request(self, request):
"""prepare request for menus processing"""
if not hasattr(request, 'nodes'):
metadata = import_path(msettings.META_DATA)
request.nodes = metadata()
def prepare_menus_settings(self):
"""Prepare menus settings and check validity"""
# get menu settings for check
MENUS = msettings.MENUS
DEFAULT_SCHEME = msettings.DEFAULT_SCHEME
# check MENUS
# todo: may be someway disable menus if improperly configured
if not isinstance(MENUS, dict) or not MENUS.has_key('default'):
raise ImproperlyConfigured('Menus "MENUS" setting value'
' is empty/incorrect or not contains'
' "default" key.')
validvalue = lambda val, chk: (set(val).__len__() == val.__len__()
and all([v in chk for v in val]))
errors = {}
for name, value in MENUS.items():
# check menus value
menus = value.get('MENUS', None)
if not menus or not validvalue(menus, self.registry.menus.keys()):
errors[name] = ('Menus "%s" MENUS value (%s)'
' is invalid.' % (name, menus))
continue
# check modifiers value
modifiers = value.get('MODIFIERS', None)
modkeys, invalid = self.registry.modifiers.keys(), False
# prepare modifiers value:
# convert list/tuple to dict with "default" key
# convert any other type to default value
# add default key, if it does not exists in dict value
if isinstance(modifiers, (list, tuple,)):
modifiers = {'default': modifiers,}
if not isinstance(modifiers, dict):
modifiers = {'default': [m for m in DEFAULT_SCHEME['MODIFIERS']
if m in modkeys],}
if 'default' not in modifiers:
modifiers['default'] = [m for m in DEFAULT_SCHEME['MODIFIERS']
if m in modkeys]
for mname, mvalue in modifiers.items():
if mvalue and not validvalue(mvalue, modkeys):
errors[name] = ('Menus "%s" MODIFIERS "%s" value (%s)'
' is invalid.' % (name, mname, mvalue,))
invalid = True
if invalid:
continue
# update conf value (also with defaults)
value.update({
'MODIFIERS': modifiers,
'NAME': name,
'CACHE_TIMEOUT': value.get('CACHE_TIMEOUT',
DEFAULT_SCHEME['CACHE_TIMEOUT']),
'SELECTED': False,
})
if errors:
raise ImproperlyConfigured('\n'.join(errors.values())) | nodes/processor.py | import copy
import re
import urlparse
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.contrib.sites.shortcuts import get_current_site
from django.utils.translation import get_language
from .base import Menu, DEFAULT, ONCE, PER_REQUEST, POST_SELECT
from .utils import import_path, tgenerator
from . import settings as msettings
class Processor(object):
# methods that are expected to be extended if required:
# router - menuconf router
# cache_key - cache name generator for confname
# post_build_data_handler - update data after ONCE
# check_node_url_with_domain - check urls with specified domain
# compare_paths - compare two paths by length, get/hash existance, ect
registry = None
def __init__(self, registry):
self.registry = registry
self._modifiers = {}
def router(self, request):
"""
Simple router implementaion, based on url regexps.
If you need more complex validation, please update this method.
"""
if not hasattr(self, '_ROUTES'):
self._ROUTES = [(name, conf['ROUTE'])
for name, conf in msettings.MENUS.items()
if conf.get('ROUTE', None)] or None
if self._ROUTES:
for name, route in self._ROUTES:
if re.match(route, request.path):
return name
return 'default'
def cache_key(self, request=None, menuconf=None,
lang=None, site_id=None, extra=None, **kwargs):
"""
Generate cache_key by request and menuconf data, by lang and site
note: extra should be list or tuple, any item should be ascii string
"""
lang = lang or get_language()
site_id = site_id or get_current_site(None).pk
extra = '_'.join(map(str, extra)) if extra else ''
return 'nodes_%s_%s_%s%s_cache' % (menuconf['NAME'],
lang, site_id, extra)
def menuconf(self, request, name=None):
"""Get menuconf value, call router if required (once)"""
# get menu configuration data (dict value expected as valid)
# also if menuconf is dict -> request is already meta-processed
if isinstance(name, dict):
return name
# update request with meta
self.add_nodes_to_request(request)
# call router once per request
if not hasattr(request.nodes, '_menuconf'):
request.nodes._menuconf_selected = self.router(request)
request.nodes._menuconf = {}
# get menuconf and cache it
name = name or request.nodes._menuconf_selected
conf = request.nodes._menuconf.get(name, None)
if not conf:
conf = msettings.MENUS.get(name, None)
if conf is None:
raise ValueError('Menus menuconf invalid name (%s).' % name)
conf['SELECTED'] = name == request.nodes._menuconf_selected
request.nodes._menuconf[name] = conf
return conf
# Nodes processing methods
# ------------------------
def get_nodes(self, menuconf, request,
modifiers=None, init_only=False, **kwargs):
"""Generate nodes by menu confname."""
menuconf = self.menuconf(request, name=menuconf)
# cache requested menuconf nodes in request object
nodes = getattr(request.nodes, 'menus', {}).get(menuconf['NAME'], None)
if nodes is None:
request.nodes.menus = getattr(request.nodes, 'menus', {})
cache_key = self.cache_key(request=request, menuconf=menuconf)
cache_required = False
rebuild_mode = False
rebuild_countdown = 10
while rebuild_countdown:
rebuild_countdown -= 1
meta = {'rebuild_mode': rebuild_mode,}
nodes = cache.get(cache_key, None) if nodes is None else nodes
if nodes is None:
nodes = self.build_nodes(request, menuconf['MENUS'])
nodes = {'nodes': nodes, 'selected': None, 'chain': None,}
cache_required = True
# running once cached code (ONCE)
self.apply_modifiers(menuconf, nodes, request,
modify_event=ONCE, meta=meta)
self.post_build_data_handler(menuconf, nodes, request, meta)
if cache_required and not rebuild_mode:
cache.set(cache_key, nodes, menuconf['CACHE_TIMEOUT'])
# per-request cached code (PER_REQUEST)
self.apply_modifiers(menuconf, nodes, request,
modify_event=PER_REQUEST, meta=meta)
# selected node related code
# check - does menu routed (SELECTED) or requested directly
# only SELECTED menuconf mark as selected
# todo: may be add CHECK_SELECTION param to conf?
if menuconf['SELECTED']:
selected, chain = self.search_selected(request, nodes)
rebuild_mode = (
selected and not getattr(selected, 'rebuilt', None) and
selected.on_selected(menuconf, nodes, request))
if rebuild_mode:
selected.selected, selected.rebuilt = False, True
continue
nodes.update(selected=selected, chain=chain)
break
if not rebuild_countdown:
raise Exception('Nodes: too deep rebuild cycle.')
# per-request cached code (POST_SELECT)
self.apply_modifiers(menuconf, nodes, request,
modify_event=POST_SELECT)
request.nodes.menus[menuconf['NAME']] = nodes
if init_only:
return
# clone nodes and run apply_modifiers with DEFAULT modify_event
nodes = copy.deepcopy(nodes)
self.apply_modifiers(menuconf, nodes, request, modify_event=DEFAULT,
modifiers=modifiers, kwargs=kwargs)
return nodes
def apply_modifiers(self, menuconf, nodes, request, modify_event=DEFAULT,
modifiers=None, meta=None, kwargs=None):
"""
Modify nodes by modifiers, related to menu confname.modifiers.
Params:
nodes - dict with nodes and selected node value, also can
contain any other user information (by default it contains
paths for indexed search of selected node). Nodes structure
see in get_nodes method.
modify_event - event, after which modifiers called. Builtin values:
ONCE - run once, before caching nodes data between requests,
PER_REQUEST - run every request once, before any other no-ONCE,
POST_SELECT - run every request after selected node is marked,
DEFAULT - run every time get_nodes called with different args.
meta - additional dict with some runtime tips, which helps next
modifiers speed-up their processing. Builtin keys:
modify_event - event value apply_modifiers called with,
rebuild_mode - in ONCE and PER_REQUEST events means that
apply_modifiers executed second or more time,
modified_ancestors - should be set to True by modifier,
if any parent value modified
modified_descendants - should be set to True by modifier,
if any children value modified
User can provide any other keys to your own modifiers.
"""
# process arguments
menuconf, kwargs = self.menuconf(request, name=menuconf), kwargs or {}
meta = dict({
'modify_event': None, 'rebuild_mode': False,
'modified_ancestors': False, 'modified_descendants': False,
}, **dict(meta or {}, modify_event=modify_event))
# get (cached) value of modifiers by menuconf name and modifiers group
modifconf = modifiers or 'default'
modifname = '%s.%s' % (menuconf['NAME'], modifconf,)
modifiers = self._modifiers.get(modifname, None)
if not modifiers:
modifiers = [self.registry.modifiers[mod]
for mod in menuconf['MODIFIERS'][modifconf]]
self._modifiers[modifname] = modifiers
# process
for modifier in modifiers:
if modify_event & modifier.modify_event:
modifier.modify(request, nodes, meta, **kwargs)
# raw menus nodes list generator
def build_nodes(self, request, menus):
"""Build raw nodes tree"""
final, ids, ignored = [], {}, {}
# get menus from registry and sort by weight attr asc
menus = [m if isinstance(m, Menu) else self.registry.menus[m]
for m in menus]
menus = sorted(menus, key=lambda x: x.weight)
# fetch all nodes from all menus
for menu in menus:
nodes = menu.get_nodes(request)
for node in nodes:
# set namespace attr, default: menu class name
node.namespace = node.namespace or menu.namespace
ids[node.namespace] = ids.get(node.namespace, [])
ignored[node.namespace] = ignored.get(node.namespace, [])
# ignore nodes with duplicated ids
if node.id in ids[node.namespace]:
continue
# process all childs
if node.parent:
found = False
# ignore node if parent also ignored
if node.parent in ignored[node.namespace]:
ignored[node.namespace].append(node.id)
continue
# search parent
for n in nodes:
if n.namespace == node.namespace and n.id == node.parent:
node.parent, found = n, True
break
# append found node to its "brothers" or ignore
if found:
node.parent.children.append(node)
else:
ignored[node.namespace].append(node.id)
continue
# append node and it id to main list
final.append(node)
ids[node.namespace].append(node.id)
return [i for i in final if not i.parent]
def post_build_data_handler(self, menuconf, nodes, request, meta):
"""
By default updates nodes with {"paths": paths,}.
Paths using for indexed search of selected node. If you will find
faster method, you can override all behaviour, including selected node
detection.
All result data must be serializable.
"""
if not meta['rebuild_mode']:
nodes.update({'paths': self.build_paths(nodes['nodes']),})
# Selection speedup by indexed search (with paths dict)
# -----------------------------------------------------
def check_node_url_with_domain(self, domain, node):
return False
def compare_paths(self, node, prevnode):
"""
Return True, if we should replace old item by new one.
Greater weight better.
"""
return node.data.get('weight', 500) >= prevnode.data.get('weight', 500)
def get_path(self, node):
p = urlparse.urlparse(node.url_original)
if p.netloc and not self.check_node_url_with_domain(p.netloc, node):
return None
return p.path.strip('/')
def build_paths(self, nodes):
data = {}
for node in tgenerator(nodes):
path = self.get_path(node)
# ignore nodes with denied domain name and/or empty path
if not path:
continue
# check node is new or it is better match than previous
if not path in data or self.compare_paths(node, data[path]):
data[path] = node
return data
def merge_paths(self, paths, newpaths):
for path, node in newpaths.items():
# check node is new or it is better match than previous
if not path in paths or self.compare_paths(node, paths[path]):
paths[path] = node
def search_selected(self, request, data):
"""Search selected node (indexed search in paths)."""
nodes, paths, path = (data['nodes'], data['paths'],
request.path.strip('/').split('/'),)
# check existance of path starting from current path down to its first
# ancestor: on "/a/b/c/" page look for "a/b/c" or "a/b" or "a" in paths
for pkey in ('/'.join(path[:-i or None]) for i in range(0, len(path))):
selected = paths.get(pkey, None)
if selected:
# save unmodified chain up to root
chain, item = [selected], selected
while item.parent:
item = item.parent
chain.insert(0, item)
# check selected for existance in morphed by
# per_request modifiers nodes list (auth visibility, ect.)
if not chain[0] in nodes:
continue
# mark node as selected and return
selected.selected = True
return selected, chain
return None, None
# Common methods
# --------------
def add_nodes_to_request(self, request):
"""prepare request for menus processing"""
if not hasattr(request, 'nodes'):
metadata = import_path(msettings.META_DATA)
request.nodes = metadata()
def prepare_menus_settings(self):
"""Prepare menus settings and check validity"""
# get menu settings for check
MENUS = msettings.MENUS
DEFAULT_SCHEME = msettings.DEFAULT_SCHEME
# check MENUS
# todo: may be someway disable menus if improperly configured
if not isinstance(MENUS, dict) or not MENUS.has_key('default'):
raise ImproperlyConfigured('Menus "MENUS" setting value'
' is empty/incorrect or not contains'
' "default" key.')
validvalue = lambda val, chk: (set(val).__len__() == val.__len__()
and all([v in chk for v in val]))
errors = {}
for name, value in MENUS.items():
# check menus value
menus = value.get('MENUS', None)
if not menus or not validvalue(menus, self.registry.menus.keys()):
errors[name] = ('Menus "%s" MENUS value (%s)'
' is invalid.' % (name, menus))
continue
# check modifiers value
modifiers = value.get('MODIFIERS', None)
modkeys, invalid = self.registry.modifiers.keys(), False
# prepare modifiers value:
# convert list/tuple to dict with "default" key
# convert any other type to default value
# add default key, if it does not exists in dict value
if isinstance(modifiers, (list, tuple,)):
modifiers = {'default': modifiers,}
if not isinstance(modifiers, dict):
modifiers = {'default': [m for m in DEFAULT_SCHEME['MODIFIERS']
if m in modkeys],}
if 'default' not in modifiers:
modifiers['default'] = [m for m in DEFAULT_SCHEME['MODIFIERS']
if m in modkeys]
for mname, mvalue in modifiers.items():
if mvalue and not validvalue(mvalue, modkeys):
errors[name] = ('Menus "%s" MODIFIERS "%s" value (%s)'
' is invalid.' % (name, mname, mvalue,))
invalid = True
if invalid:
continue
# update conf value (also with defaults)
value.update({
'MODIFIERS': modifiers,
'NAME': name,
'CACHE_TIMEOUT': value.get('CACHE_TIMEOUT',
DEFAULT_SCHEME['CACHE_TIMEOUT']),
'SELECTED': False,
})
if errors:
raise ImproperlyConfigured('\n'.join(errors.values())) | 0.389547 | 0.081374 |
from email.utils import parseaddr
from pony.orm import *
import bcrypt
from . import custom_exceptions as PyUserExceptions
from .auth_type_enum import AUTH_TYPE
class user:
"""
A Class to manage Users in the Database
"""
def __str__(self):
if len(self.__dict__) > 0:
return str(self.__dict__)
return None
def __init__(self, config, username=None, auth_type=AUTH_TYPE.LOCAL):
"""Function to init a User Object
Parameters:
cfg (General_Config): General Config Object used for stuff like simple Parameter Verification
username (str): Username for the specified User
auth_type (AUTH_TYPE enum): Specifies the User Type specified in the AUTH_TYPE enum
"""
self.cfg = config
if username is not None:
self.verify_inputs(username=username)
self.username = str(username)
self.auth_type = auth_type
def get_users(self):
"""
Gets all users including avatars as an array filled with dictionarys
Returns:
List filled with dicts
example:
[{"username": "admin","avatar":"admin.png"},{"username": "testuser","avatar":"default.png"}]
"""
userlist = []
with db_session:
users = self.cfg.db.User.select()
for user in users:
user_dict = {
"username": user.username,
"avatar": user.avatar,
}
userlist.append(user_dict)
return userlist
@staticmethod
def hash_pw(password=None):
"""A Function to hash specified Password (or any other string)
Parameters:
password (str): a string which will get hashed
Returns:
byte: pw_salt (salt used to hash input)
byte: pw_hash (hash of input)
"""
if password is None:
return None, None
else:
pw_salt = bcrypt.gensalt()
pw_hash = bcrypt.hashpw(password.encode("utf-8"), pw_salt)
return pw_salt, pw_hash
def verify_inputs(self, **kwargs):
"""A Function to check some qualitys of parameters
Exceptions:
ValueError -> if any parameter does not match requirements written down in the passed general config (self.cfg)
"""
found_email = False
if (
"email" in kwargs
and kwargs.get("email") == parseaddr(kwargs.get("email"))[1]
):
found_email = True
# verify activated if given
if "activated" in kwargs and not isinstance(kwargs.get("activated"), bool):
raise ValueError("Activated is not bool")
# verify password if gien
if (
"password" in kwargs
and kwargs.get("password",None) is not None
and len(kwargs.get("password")) < self.cfg.password_min_len
):
raise ValueError("password to short")
# verify username if gien
if "username" in kwargs and (
kwargs.get("username") == None
or len(kwargs.get("username")) < self.cfg.username_min_len
):
raise ValueError("username to short")
if self.cfg.email_required and not found_email:
raise ValueError("Email required but no valid provided!")
def create(self, password=<PASSWORD>, **kwargs):
"""A Function to create a User in the Database
Parameters:
password (str) mandatory
self.auth_type (AUTH_TYPE) <- provided by object!
email (str) optional
avatar (str) optional (is a path to the avatar)
activated (bool) if user is already activated
Returns:
success (bool) -> Usualy true since everythign else would raise an Exception
Exceptions:
PyUserExceptions.AlreadyExistsException -> if the user already exists
ValueError -> if parameters do not pass according to verify_inputs
"""
if self.auth_type != AUTH_TYPE.AD and "@" in str(self.username):
raise ValueError("@ in username is reserved for ad Users!")
with db_session:
try:
self.cfg.db.User[self.username]
raise PyUserExceptions.AlreadyExistsException
except ObjectNotFound as err:
self.verify_inputs(**kwargs, password=password)
pw_salt, pw_hash = self.hash_pw(password)
self.cfg.db.User(
username=self.username,
password_hash=pw_hash,
auth_type=self.auth_type,
**kwargs,
)
return True
def delete(self):
"""A Function to delete a User in the Database
Returns:
success (bool) -> Usualy true since everythign else would raise an Exception
Exceptions:
PyUserExceptions.MissingUserException -> if user to delete does not exist!
"""
with db_session:
# check if user exists
requested_user = self.cfg.db.User.get(username=self.username)
if requested_user is None:
raise PyUserExceptions.MissingUserException(
"user to delete does not exist!"
)
else:
requested_user.delete()
return True
def check(self):
"""A Function to check if a user exists
Returns:
success (bool) -> true = user exists, false = user does not exist
"""
with db_session:
# check if user exists
requested_user = self.cfg.db.User.get(username=self.username)
if requested_user is None:
return False
else:
return True
def change(self, **kwargs):
"""A Function to change multiple user Attributes
Parameters: (keyword params only!)
password (str)
email (str)
avatar (str)
Exceptions
see changepw(), changeemail(), changeavatar()
"""
if "email" in kwargs:
self.changeemail(kwargs["email"])
if "password" in kwargs:
self.changepw(kwargs["password"])
if "avatar" in kwargs:
self.changeavatar(kwargs["avatar"])
def changepw(self, password):
"""A Function to change the users password
Parameters:
password (str)
Exceptions
ValueError -> if password is to short or None
"""
if password is None:
raise ValueError("password empty!")
self.verify_inputs(password=password)
with db_session:
try:
user = self.cfg.db.User[self.username]
pw_salt, pw_hash = self.hash_pw(password)
user.password_hash = pw_hash
return True
except ObjectNotFound:
raise PyUserExceptions.MissingUserException
def changeemail(self, email):
"""A Function to change the users email
Parameters:
email (str)
Exceptions
ValueError -> if email is not "valid"
"""
if email is None:
raise ValueError("email is empty!")
self.verify_inputs(email=email)
with db_session:
try:
user = self.cfg.db.User[self.username]
user.email = email
return True
except ObjectNotFound:
raise PyUserExceptions.MissingUserException
def changeavatar(self, avatar):
"""A Function to change the users avatar
Parameters:
avatar (str)
Exceptions
ValueError -> if avatar is None
"""
if avatar is None:
raise ValueError("avatar name is invalid!")
with db_session:
try:
user = self.cfg.db.User[self.username]
user.avatar = avatar
return True
except ObjectNotFound:
raise PyUserExceptions.MissingUserException
def info(self, include_email=False):
"""A Function to return a users public information
Parameters:
include_email (bool) -> if set to true the returned dictionary will include the email address of the user
return:
Dictionary with user information
example:
{"username":"admin", "avatar":"default.png", "activated":True, "email":"<EMAIL>"}
Exceptions
PyUserExceptions.MissingUserException -> if requested user is not found
"""
with db_session:
try:
user = self.cfg.db.User[self.username]
return_dict = {
"username": user.username,
"avatar": user.avatar,
"activated": user.activated,
}
if include_email:
return_dict["email"] = user.email
return return_dict
except ObjectNotFound:
raise PyUserExceptions.MissingUserException
def info_extended(self):
"""A Function to return userinfo + auth token info + perms
return:
Dictionary with user information
example:
{"username":"admin", "avatar":"default.png", "activated":True, "email":"<EMAIL>", token:{"last_login":"01.01.2022 13:37", "valid_until":"02.01.2022 13:37"....},"perms":["admin","testgroup"]}
Exceptions
PyUserExceptions.MissingUserException -> if requested user is not found
"""
with db_session:
try:
user = self.cfg.db.User[self.username]
return_dict = self.info(include_email=True)
token_dict = {}
if user.token is not None:
token_dict["last_login"] = str(user.token.last_login)
token_dict["valid_until"] = str(user.token.valid_until)
token_dict["valid_for"] = user.token.ip
token_dict["token"] = user.token.token
# add perms to dict!
perm_array = []
for perm in user.perms:
perm_array.append(perm.perm_name)
return_dict["token"] = token_dict
return_dict["perms"] = perm_array
return return_dict
except ObjectNotFound:
raise PyUserExceptions.MissingUserException | pyusermanager/user_funcs.py | from email.utils import parseaddr
from pony.orm import *
import bcrypt
from . import custom_exceptions as PyUserExceptions
from .auth_type_enum import AUTH_TYPE
class user:
"""
A Class to manage Users in the Database
"""
def __str__(self):
if len(self.__dict__) > 0:
return str(self.__dict__)
return None
def __init__(self, config, username=None, auth_type=AUTH_TYPE.LOCAL):
"""Function to init a User Object
Parameters:
cfg (General_Config): General Config Object used for stuff like simple Parameter Verification
username (str): Username for the specified User
auth_type (AUTH_TYPE enum): Specifies the User Type specified in the AUTH_TYPE enum
"""
self.cfg = config
if username is not None:
self.verify_inputs(username=username)
self.username = str(username)
self.auth_type = auth_type
def get_users(self):
"""
Gets all users including avatars as an array filled with dictionarys
Returns:
List filled with dicts
example:
[{"username": "admin","avatar":"admin.png"},{"username": "testuser","avatar":"default.png"}]
"""
userlist = []
with db_session:
users = self.cfg.db.User.select()
for user in users:
user_dict = {
"username": user.username,
"avatar": user.avatar,
}
userlist.append(user_dict)
return userlist
@staticmethod
def hash_pw(password=None):
"""A Function to hash specified Password (or any other string)
Parameters:
password (str): a string which will get hashed
Returns:
byte: pw_salt (salt used to hash input)
byte: pw_hash (hash of input)
"""
if password is None:
return None, None
else:
pw_salt = bcrypt.gensalt()
pw_hash = bcrypt.hashpw(password.encode("utf-8"), pw_salt)
return pw_salt, pw_hash
def verify_inputs(self, **kwargs):
"""A Function to check some qualitys of parameters
Exceptions:
ValueError -> if any parameter does not match requirements written down in the passed general config (self.cfg)
"""
found_email = False
if (
"email" in kwargs
and kwargs.get("email") == parseaddr(kwargs.get("email"))[1]
):
found_email = True
# verify activated if given
if "activated" in kwargs and not isinstance(kwargs.get("activated"), bool):
raise ValueError("Activated is not bool")
# verify password if gien
if (
"password" in kwargs
and kwargs.get("password",None) is not None
and len(kwargs.get("password")) < self.cfg.password_min_len
):
raise ValueError("password to short")
# verify username if gien
if "username" in kwargs and (
kwargs.get("username") == None
or len(kwargs.get("username")) < self.cfg.username_min_len
):
raise ValueError("username to short")
if self.cfg.email_required and not found_email:
raise ValueError("Email required but no valid provided!")
def create(self, password=<PASSWORD>, **kwargs):
"""A Function to create a User in the Database
Parameters:
password (str) mandatory
self.auth_type (AUTH_TYPE) <- provided by object!
email (str) optional
avatar (str) optional (is a path to the avatar)
activated (bool) if user is already activated
Returns:
success (bool) -> Usualy true since everythign else would raise an Exception
Exceptions:
PyUserExceptions.AlreadyExistsException -> if the user already exists
ValueError -> if parameters do not pass according to verify_inputs
"""
if self.auth_type != AUTH_TYPE.AD and "@" in str(self.username):
raise ValueError("@ in username is reserved for ad Users!")
with db_session:
try:
self.cfg.db.User[self.username]
raise PyUserExceptions.AlreadyExistsException
except ObjectNotFound as err:
self.verify_inputs(**kwargs, password=password)
pw_salt, pw_hash = self.hash_pw(password)
self.cfg.db.User(
username=self.username,
password_hash=pw_hash,
auth_type=self.auth_type,
**kwargs,
)
return True
def delete(self):
"""A Function to delete a User in the Database
Returns:
success (bool) -> Usualy true since everythign else would raise an Exception
Exceptions:
PyUserExceptions.MissingUserException -> if user to delete does not exist!
"""
with db_session:
# check if user exists
requested_user = self.cfg.db.User.get(username=self.username)
if requested_user is None:
raise PyUserExceptions.MissingUserException(
"user to delete does not exist!"
)
else:
requested_user.delete()
return True
def check(self):
"""A Function to check if a user exists
Returns:
success (bool) -> true = user exists, false = user does not exist
"""
with db_session:
# check if user exists
requested_user = self.cfg.db.User.get(username=self.username)
if requested_user is None:
return False
else:
return True
def change(self, **kwargs):
"""A Function to change multiple user Attributes
Parameters: (keyword params only!)
password (str)
email (str)
avatar (str)
Exceptions
see changepw(), changeemail(), changeavatar()
"""
if "email" in kwargs:
self.changeemail(kwargs["email"])
if "password" in kwargs:
self.changepw(kwargs["password"])
if "avatar" in kwargs:
self.changeavatar(kwargs["avatar"])
def changepw(self, password):
"""A Function to change the users password
Parameters:
password (str)
Exceptions
ValueError -> if password is to short or None
"""
if password is None:
raise ValueError("password empty!")
self.verify_inputs(password=password)
with db_session:
try:
user = self.cfg.db.User[self.username]
pw_salt, pw_hash = self.hash_pw(password)
user.password_hash = pw_hash
return True
except ObjectNotFound:
raise PyUserExceptions.MissingUserException
def changeemail(self, email):
"""A Function to change the users email
Parameters:
email (str)
Exceptions
ValueError -> if email is not "valid"
"""
if email is None:
raise ValueError("email is empty!")
self.verify_inputs(email=email)
with db_session:
try:
user = self.cfg.db.User[self.username]
user.email = email
return True
except ObjectNotFound:
raise PyUserExceptions.MissingUserException
def changeavatar(self, avatar):
"""A Function to change the users avatar
Parameters:
avatar (str)
Exceptions
ValueError -> if avatar is None
"""
if avatar is None:
raise ValueError("avatar name is invalid!")
with db_session:
try:
user = self.cfg.db.User[self.username]
user.avatar = avatar
return True
except ObjectNotFound:
raise PyUserExceptions.MissingUserException
def info(self, include_email=False):
"""A Function to return a users public information
Parameters:
include_email (bool) -> if set to true the returned dictionary will include the email address of the user
return:
Dictionary with user information
example:
{"username":"admin", "avatar":"default.png", "activated":True, "email":"<EMAIL>"}
Exceptions
PyUserExceptions.MissingUserException -> if requested user is not found
"""
with db_session:
try:
user = self.cfg.db.User[self.username]
return_dict = {
"username": user.username,
"avatar": user.avatar,
"activated": user.activated,
}
if include_email:
return_dict["email"] = user.email
return return_dict
except ObjectNotFound:
raise PyUserExceptions.MissingUserException
def info_extended(self):
"""A Function to return userinfo + auth token info + perms
return:
Dictionary with user information
example:
{"username":"admin", "avatar":"default.png", "activated":True, "email":"<EMAIL>", token:{"last_login":"01.01.2022 13:37", "valid_until":"02.01.2022 13:37"....},"perms":["admin","testgroup"]}
Exceptions
PyUserExceptions.MissingUserException -> if requested user is not found
"""
with db_session:
try:
user = self.cfg.db.User[self.username]
return_dict = self.info(include_email=True)
token_dict = {}
if user.token is not None:
token_dict["last_login"] = str(user.token.last_login)
token_dict["valid_until"] = str(user.token.valid_until)
token_dict["valid_for"] = user.token.ip
token_dict["token"] = user.token.token
# add perms to dict!
perm_array = []
for perm in user.perms:
perm_array.append(perm.perm_name)
return_dict["token"] = token_dict
return_dict["perms"] = perm_array
return return_dict
except ObjectNotFound:
raise PyUserExceptions.MissingUserException | 0.721645 | 0.143038 |
jossa data on esitetty vertailuarvoina"""
#Version: 3.9.5
#API:n osoitteet
def getAPIkeys():
with open("keyfile.txt", 'r') as keyfile:
keys = keyfile.read()
keys = keys.split(" ")
return keys
keys=getAPIkeys()
city = "Tampere"
provider1 = "openweathermap"
requestProvider1 = "https://api.openweathermap.org/data/2.5/weather?q={}&units=metric&mode=xml&{}".format(city,keys[0])
provider2 = "HERE"
requestProvider2 = "https://weather.cc.api.here.com/weather/1.0/report.xml?oneobservation=true&product=observation&{}&product=observation&name={}".format(keys[1],city)
outputfile = "comparison.json"
#Koodi
import requests
import xml.etree.ElementTree as ET
import time
import datetime
import json
def getTimeZone(): #Määritä kuinka monta tuntia edellä suomi on GMT:stä (määritä onko Suomi kesä vai talviajassa)
offset = time.timezone if (time.localtime().tm_isdst == 0) else time.altzone
hours = offset / 60 / 60 * -1
hours = int(hours)
return hours
def adjustTime(time): #Muuta GMT -aika Suomen aikaan ISO-standardin mukaisesti
f = time.replace("T", " ")
dif = getTimeZone()
convertedTime = datetime.datetime.strptime(f, '%Y-%m-%d %H:%M:%S')
adjustedTime = convertedTime + datetime.timedelta(hours = dif)
adjustedTime = str(adjustedTime).replace(" ", "T")
adjustedTime = ''.join((adjustedTime,"+0{}:00".format(dif)))
return adjustedTime
def requestAPI(address): #Pyydä API:ltä XML-dataa
attempts = 0 #Tietty määrä yrityksiä vastata ennen kuin ajo keskeytetään
while attempts < 3:
try:
resp = requests.get(address)
attempts = 3
return resp
except requests.exceptions.InvalidSchema as HE:
attempts += 1
print("{}, kelvoton osoite.".format(HE))
except requests.ConnectionError as CE:
attempts += 1
print("{}, ei saada yhteyttä ohjelmointirajapintaan.".format(CE))
else:
input("Paina ENTER lopettaaksesi ")
exit()
def findChildren(response): #Etsi XML-puusta tietyt arvot
varList = []
xml_string = response.content
tree = ET.fromstring(xml_string) #Tee API:n vastauksesta xml-puu
for root in tree.iter(): #Etsi xml:n kaikki keyt
#Lisää säähavaintojen aika
for children in root.findall("lastupdate"):
if children == None:
pass
else:
temp = children.attrib["value"]
temp = adjustTime(temp)
varList.append(temp)
for children in root.findall("observation"):
temp = children.attrib["utcTime"]
varList.append(temp)
#Etsi molemmista puista lämpötilaa vastaavat arvot
for children in root.findall("temperature"):
if children.text == None:
temp = children.attrib["value"]
varList.append(temp)
else:
varList.append(children.text)
#Etsi molemmista puista kosteus% vastaavat arvot
for children in root.findall("humidity"):
if children.text == None:
temp = children.attrib["value"]
varList.append(temp)
else:
varList.append(children.text)
#Etsi molemmista puista taivasta kuvaavat merkinnät.
for children in root.findall("weather"):
if children == None:
pass
else:
temp = children.attrib["value"]
varList.append(temp)
for children in root.findall("skyDescription"):
if children == None:
continue
else:
varList.append(children.text)
return varList
def createJSON(val1, val2): #Luo JSON vertailuarvoille
data = {}
data['Comparison'] = []
data['Comparison'].append({
'Provider': provider1,
'City': city,
'Time': val1[0],
'Temperature': val1[1],
'Humidity': val1[2],
'SkyDesc': val1[3]
})
data['Comparison'].append({
'Provider': provider2,
'City': city,
'Time': val2[0],
'Temperature': val2[1],
'Humidity': val2[2],
'SkyDesc': val2[3]
})
with open(outputfile, 'w') as outfile: #Luo JSON ja kirjoita siihen ylempänä olevat määritteet
json.dump(data, outfile, indent=4)
def ready():
print("Säätiedot kirjoitettu tiedostoon {}".format(outputfile))
input("Paina ENTER lopettaaksesi ")
exit()
if __name__ == "__main__": #Suorita funktiot
resp1 = requestAPI(requestProvider1)
resp2 = requestAPI(requestProvider2)
values1 = findChildren(resp1)
values2 = findChildren(resp2)
createJSON(values1, values2)
ready() | main.py | jossa data on esitetty vertailuarvoina"""
#Version: 3.9.5
#API:n osoitteet
def getAPIkeys():
with open("keyfile.txt", 'r') as keyfile:
keys = keyfile.read()
keys = keys.split(" ")
return keys
keys=getAPIkeys()
city = "Tampere"
provider1 = "openweathermap"
requestProvider1 = "https://api.openweathermap.org/data/2.5/weather?q={}&units=metric&mode=xml&{}".format(city,keys[0])
provider2 = "HERE"
requestProvider2 = "https://weather.cc.api.here.com/weather/1.0/report.xml?oneobservation=true&product=observation&{}&product=observation&name={}".format(keys[1],city)
outputfile = "comparison.json"
#Koodi
import requests
import xml.etree.ElementTree as ET
import time
import datetime
import json
def getTimeZone(): #Määritä kuinka monta tuntia edellä suomi on GMT:stä (määritä onko Suomi kesä vai talviajassa)
offset = time.timezone if (time.localtime().tm_isdst == 0) else time.altzone
hours = offset / 60 / 60 * -1
hours = int(hours)
return hours
def adjustTime(time): #Muuta GMT -aika Suomen aikaan ISO-standardin mukaisesti
f = time.replace("T", " ")
dif = getTimeZone()
convertedTime = datetime.datetime.strptime(f, '%Y-%m-%d %H:%M:%S')
adjustedTime = convertedTime + datetime.timedelta(hours = dif)
adjustedTime = str(adjustedTime).replace(" ", "T")
adjustedTime = ''.join((adjustedTime,"+0{}:00".format(dif)))
return adjustedTime
def requestAPI(address): #Pyydä API:ltä XML-dataa
attempts = 0 #Tietty määrä yrityksiä vastata ennen kuin ajo keskeytetään
while attempts < 3:
try:
resp = requests.get(address)
attempts = 3
return resp
except requests.exceptions.InvalidSchema as HE:
attempts += 1
print("{}, kelvoton osoite.".format(HE))
except requests.ConnectionError as CE:
attempts += 1
print("{}, ei saada yhteyttä ohjelmointirajapintaan.".format(CE))
else:
input("Paina ENTER lopettaaksesi ")
exit()
def findChildren(response): #Etsi XML-puusta tietyt arvot
varList = []
xml_string = response.content
tree = ET.fromstring(xml_string) #Tee API:n vastauksesta xml-puu
for root in tree.iter(): #Etsi xml:n kaikki keyt
#Lisää säähavaintojen aika
for children in root.findall("lastupdate"):
if children == None:
pass
else:
temp = children.attrib["value"]
temp = adjustTime(temp)
varList.append(temp)
for children in root.findall("observation"):
temp = children.attrib["utcTime"]
varList.append(temp)
#Etsi molemmista puista lämpötilaa vastaavat arvot
for children in root.findall("temperature"):
if children.text == None:
temp = children.attrib["value"]
varList.append(temp)
else:
varList.append(children.text)
#Etsi molemmista puista kosteus% vastaavat arvot
for children in root.findall("humidity"):
if children.text == None:
temp = children.attrib["value"]
varList.append(temp)
else:
varList.append(children.text)
#Etsi molemmista puista taivasta kuvaavat merkinnät.
for children in root.findall("weather"):
if children == None:
pass
else:
temp = children.attrib["value"]
varList.append(temp)
for children in root.findall("skyDescription"):
if children == None:
continue
else:
varList.append(children.text)
return varList
def createJSON(val1, val2): #Luo JSON vertailuarvoille
data = {}
data['Comparison'] = []
data['Comparison'].append({
'Provider': provider1,
'City': city,
'Time': val1[0],
'Temperature': val1[1],
'Humidity': val1[2],
'SkyDesc': val1[3]
})
data['Comparison'].append({
'Provider': provider2,
'City': city,
'Time': val2[0],
'Temperature': val2[1],
'Humidity': val2[2],
'SkyDesc': val2[3]
})
with open(outputfile, 'w') as outfile: #Luo JSON ja kirjoita siihen ylempänä olevat määritteet
json.dump(data, outfile, indent=4)
def ready():
print("Säätiedot kirjoitettu tiedostoon {}".format(outputfile))
input("Paina ENTER lopettaaksesi ")
exit()
if __name__ == "__main__": #Suorita funktiot
resp1 = requestAPI(requestProvider1)
resp2 = requestAPI(requestProvider2)
values1 = findChildren(resp1)
values2 = findChildren(resp2)
createJSON(values1, values2)
ready() | 0.208824 | 0.195998 |
from os import getenv
import numpy
from dotenv import load_dotenv
from shapely import geometry
from cu_pass.dpa_calculator.population_retriever.population_retriever import PopulationRetriever
from reference_models.geo.utils import GridPolygon
from reference_models.geo.zones import GetUsBorder
from src.lib.geo import geo_utils
from src.lib.usgs_pop.usgs_pop_driver import UsgsPopDriver
load_dotenv()
POPULATION_DIRECTORY_CENSUS = getenv('POPULATION_DIRECTORY_CENSUS')
POPULATION_RESOLUTION_IN_ARCSECONDS = 100
def ComputeSensorNeighborhood(latitude, longitude, radius_km, res_arcsec):
"""
from src.studies.esc_impact_pop.esc_pop_impact
"""
us_border = GetUsBorder()
sensor_nbor = geo_utils.Buffer(geometry.Point(longitude, latitude), radius_km)
sensor_nbor = sensor_nbor.intersection(us_border)
longitudes, latitudes = list(zip(*GridPolygon(sensor_nbor, res_arcsec)))
return latitudes, longitudes, sensor_nbor
class PopulationRetrieverCensus(PopulationRetriever):
_resolution_in_arcseconds = POPULATION_RESOLUTION_IN_ARCSECONDS
def retrieve(self) -> int:
if not self._area.radius_in_kilometers:
return 0
popper = UsgsPopDriver(pop_directory=POPULATION_DIRECTORY_CENSUS, lazy_load=True)
lats, lons, _ = ComputeSensorNeighborhood(latitude=self._area.center_coordinates.latitude,
longitude=self._area.center_coordinates.longitude,
radius_km=self._area.radius_in_kilometers,
res_arcsec=self._resolution_in_arcseconds)
lats, lons = numpy.array(lats), numpy.array(lons)
idxs = numpy.arange(len(lats))
# Compute the standalone population impact for that sensor.
return round(
geo_utils.AreaPlateCarreePixel(res_arcsec=self._resolution_in_arcseconds,
ref_latitude=self._area.center_coordinates.latitude) *
numpy.sum(popper.GetPopulationDensity(lats[idxs], lons[idxs]))) | src/harness/cu_pass/dpa_calculator/population_retriever/population_retriever_census.py | from os import getenv
import numpy
from dotenv import load_dotenv
from shapely import geometry
from cu_pass.dpa_calculator.population_retriever.population_retriever import PopulationRetriever
from reference_models.geo.utils import GridPolygon
from reference_models.geo.zones import GetUsBorder
from src.lib.geo import geo_utils
from src.lib.usgs_pop.usgs_pop_driver import UsgsPopDriver
load_dotenv()
POPULATION_DIRECTORY_CENSUS = getenv('POPULATION_DIRECTORY_CENSUS')
POPULATION_RESOLUTION_IN_ARCSECONDS = 100
def ComputeSensorNeighborhood(latitude, longitude, radius_km, res_arcsec):
"""
from src.studies.esc_impact_pop.esc_pop_impact
"""
us_border = GetUsBorder()
sensor_nbor = geo_utils.Buffer(geometry.Point(longitude, latitude), radius_km)
sensor_nbor = sensor_nbor.intersection(us_border)
longitudes, latitudes = list(zip(*GridPolygon(sensor_nbor, res_arcsec)))
return latitudes, longitudes, sensor_nbor
class PopulationRetrieverCensus(PopulationRetriever):
_resolution_in_arcseconds = POPULATION_RESOLUTION_IN_ARCSECONDS
def retrieve(self) -> int:
if not self._area.radius_in_kilometers:
return 0
popper = UsgsPopDriver(pop_directory=POPULATION_DIRECTORY_CENSUS, lazy_load=True)
lats, lons, _ = ComputeSensorNeighborhood(latitude=self._area.center_coordinates.latitude,
longitude=self._area.center_coordinates.longitude,
radius_km=self._area.radius_in_kilometers,
res_arcsec=self._resolution_in_arcseconds)
lats, lons = numpy.array(lats), numpy.array(lons)
idxs = numpy.arange(len(lats))
# Compute the standalone population impact for that sensor.
return round(
geo_utils.AreaPlateCarreePixel(res_arcsec=self._resolution_in_arcseconds,
ref_latitude=self._area.center_coordinates.latitude) *
numpy.sum(popper.GetPopulationDensity(lats[idxs], lons[idxs]))) | 0.679391 | 0.368747 |
import logging
import mimetypes
import os
import smtplib
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.mime.audio import MIMEAudio
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
# Logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Handler
log_path = '' # path to log file
fh = logging.FileHandler('{}deliveries.log'.format(log_path))
fh.setLevel(logging.INFO)
# Formatter
formatter = logging.Formatter(
'%(asctime)s : %(name)s : %(levelname)s : %(message)s'
)
fh.setFormatter(formatter)
logger.addHandler(fh)
def send_gmail(
message,
subject,
email_to,
email_from,
password,
reply_to='NoReply',
file_to_send=None
):
'''Sends email with provided file as attachment from email_from to email_to.
Username and Password provided for gmail acount that sends email. Only works
with a gmail account.
PARAMS
---------------
message : message to be included in the email
subject : email subject
email_to : email or list of emails of intended recipient(s)
email_from : email that will be appear as the sender. Also used to log in to
email account using password provided
password : password associated with email_from. Used to log in to email_from
account in order to create and send email
reply_to : email address to which all replies will be addressed
file_to_send : attachment file
'''
# logger.info('Sending Email')
msg = MIMEMultipart()
msg['From'] = email_from
if type(email_to) == list:
msg['To'] = ', '.join(email_to)
else:
msg['To'] = email_to
msg['Reply-To'] = reply_to
msg['Subject'] = subject
body = MIMEText(message)
msg.attach(body)
if file_to_send == None:
pass
elif type(file_to_send) == list: # Allows for multiple attachments
for f in file_to_send:
ctype, encoding = mimetypes.guess_type(f)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'application':
fp = open(f, 'rb')
att = MIMEApplication(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'text':
fp = open(f)
att = MIMEText(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'image':
fp = open(f, 'rb')
att = MIMEImage(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'audio':
fp = open(f, 'rb')
att = MIMEAudio(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(f, 'rb')
att = MIMEBase(maintype, subtype)
att.set_payload(fp.read())
fp.close()
encoders.encode_base64(att)
att.add_header('content-disposition', 'attachment', filename=os.path.basename(f))
msg.attach(att)
else:
ctype, encoding = mimetypes.guess_type(file_to_send)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'application':
fp = open(file_to_send, 'rb')
att = MIMEApplication(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'text':
fp = open(file_to_send)
att = MIMEText(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'image':
fp = open(file_to_send, 'rb')
att = MIMEImage(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'audio':
fp = open(file_to_send, 'rb')
att = MIMEAudio(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(file_to_send, 'rb')
att = MIMEBase(maintype, subtype)
att.set_payload(fp.read())
fp.close()
encoders.encode_base64(att)
att.add_header('content-disposition', 'attachment', filename=os.path.basename(file_to_send))
msg.attach(att)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(email_from, password)
server.sendmail(email_from, email_to, msg.as_string())
server.quit()
return
if __name__ == '__main__':
message = 'This is a test email'
subject = 'Testing send_gmail'
email_to = ''
email_from = ''
password = ''
send_gmail(
message,
subject,
email_to,
email_from,
password
) | src/sendemail.py |
import logging
import mimetypes
import os
import smtplib
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.mime.audio import MIMEAudio
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
# Logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Handler
log_path = '' # path to log file
fh = logging.FileHandler('{}deliveries.log'.format(log_path))
fh.setLevel(logging.INFO)
# Formatter
formatter = logging.Formatter(
'%(asctime)s : %(name)s : %(levelname)s : %(message)s'
)
fh.setFormatter(formatter)
logger.addHandler(fh)
def send_gmail(
message,
subject,
email_to,
email_from,
password,
reply_to='NoReply',
file_to_send=None
):
'''Sends email with provided file as attachment from email_from to email_to.
Username and Password provided for gmail acount that sends email. Only works
with a gmail account.
PARAMS
---------------
message : message to be included in the email
subject : email subject
email_to : email or list of emails of intended recipient(s)
email_from : email that will be appear as the sender. Also used to log in to
email account using password provided
password : password associated with email_from. Used to log in to email_from
account in order to create and send email
reply_to : email address to which all replies will be addressed
file_to_send : attachment file
'''
# logger.info('Sending Email')
msg = MIMEMultipart()
msg['From'] = email_from
if type(email_to) == list:
msg['To'] = ', '.join(email_to)
else:
msg['To'] = email_to
msg['Reply-To'] = reply_to
msg['Subject'] = subject
body = MIMEText(message)
msg.attach(body)
if file_to_send == None:
pass
elif type(file_to_send) == list: # Allows for multiple attachments
for f in file_to_send:
ctype, encoding = mimetypes.guess_type(f)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'application':
fp = open(f, 'rb')
att = MIMEApplication(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'text':
fp = open(f)
att = MIMEText(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'image':
fp = open(f, 'rb')
att = MIMEImage(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'audio':
fp = open(f, 'rb')
att = MIMEAudio(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(f, 'rb')
att = MIMEBase(maintype, subtype)
att.set_payload(fp.read())
fp.close()
encoders.encode_base64(att)
att.add_header('content-disposition', 'attachment', filename=os.path.basename(f))
msg.attach(att)
else:
ctype, encoding = mimetypes.guess_type(file_to_send)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'application':
fp = open(file_to_send, 'rb')
att = MIMEApplication(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'text':
fp = open(file_to_send)
att = MIMEText(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'image':
fp = open(file_to_send, 'rb')
att = MIMEImage(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'audio':
fp = open(file_to_send, 'rb')
att = MIMEAudio(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(file_to_send, 'rb')
att = MIMEBase(maintype, subtype)
att.set_payload(fp.read())
fp.close()
encoders.encode_base64(att)
att.add_header('content-disposition', 'attachment', filename=os.path.basename(file_to_send))
msg.attach(att)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(email_from, password)
server.sendmail(email_from, email_to, msg.as_string())
server.quit()
return
if __name__ == '__main__':
message = 'This is a test email'
subject = 'Testing send_gmail'
email_to = ''
email_from = ''
password = ''
send_gmail(
message,
subject,
email_to,
email_from,
password
) | 0.206974 | 0.082734 |
from dataclasses import dataclass
from functools import partial
from itertools import zip_longest
from pprint import pformat
from typing import Any, Optional
__all__ = [
"Task",
]
def parse_task_set(task_list_str: Optional[str]) -> frozenset[int]:
result = []
for task in (task_list_str or "").split():
task = task.strip(",")
if not task:
continue
task_id = int(task.split("-")[1])
result.append(task_id)
return frozenset(result)
def parse_int(value: str) -> int:
return int(value) if value else 0
def parse_opt_int(value: str) -> Optional[int]:
if value:
return int(value)
else:
return None
def parse_opt_float(value: str) -> Optional[float]:
return float(value) if value else None
def parse_enum(values: list[Any], value: Any) -> Any:
assert value in values, f"Value {value} not in {values}!"
return value
def value_str(value: Any) -> str:
if isinstance(value, (frozenset, set)):
return ", ".join(f"task-{subtask}" for subtask in sorted(value))
else:
return str(value)
@dataclass(eq=True, frozen=True)
class Task:
task_id: int
task_type: str
role: Optional[str]
desc: str
status: str
value: int
effort: int
depends_on: frozenset[int]
best_value: int
best_effort: int
priority: float
best_followups: frozenset[int]
blocked_by: frozenset[int]
statuses = [
f"{i+1} - {status}"
for i, status in enumerate(
["NEXT IN LINE", "WIP", "BLOCKED", "DONE", "NOPE", "IDEA"]
)
]
ordered_column_parsing_funcs = [
("task_id", int),
("task_type", partial(parse_enum, ["Story", "Task"])),
("role", str),
("desc", str),
("status", partial(parse_enum, statuses)),
("value", parse_int),
("effort", parse_int),
("depends_on", parse_task_set),
("best_value", parse_opt_int),
("best_effort", parse_opt_int),
("priority", parse_opt_float),
("best_followups", parse_task_set),
("blocked_by", parse_task_set),
]
@classmethod
def from_row(cls, row: list[Any]) -> "Task":
try:
return cls(
**{
key: parse_value_func(value)
for (value, (key, parse_value_func)) in zip_longest(
row,
Task.ordered_column_parsing_funcs,
)
}
)
except AssertionError as e:
task = {
key: value
for (value, (key, _)) in zip_longest(
row,
Task.ordered_column_parsing_funcs,
)
}
raise ValueError(f"Could not parse task {pformat(task)}:\n{e}")
def row(self) -> list[Any]:
return [
value_str(getattr(self, key))
for (key, _) in Task.ordered_column_parsing_funcs
] | tools/jasmine_tracker/tasks.py | from dataclasses import dataclass
from functools import partial
from itertools import zip_longest
from pprint import pformat
from typing import Any, Optional
__all__ = [
"Task",
]
def parse_task_set(task_list_str: Optional[str]) -> frozenset[int]:
result = []
for task in (task_list_str or "").split():
task = task.strip(",")
if not task:
continue
task_id = int(task.split("-")[1])
result.append(task_id)
return frozenset(result)
def parse_int(value: str) -> int:
return int(value) if value else 0
def parse_opt_int(value: str) -> Optional[int]:
if value:
return int(value)
else:
return None
def parse_opt_float(value: str) -> Optional[float]:
return float(value) if value else None
def parse_enum(values: list[Any], value: Any) -> Any:
assert value in values, f"Value {value} not in {values}!"
return value
def value_str(value: Any) -> str:
if isinstance(value, (frozenset, set)):
return ", ".join(f"task-{subtask}" for subtask in sorted(value))
else:
return str(value)
@dataclass(eq=True, frozen=True)
class Task:
task_id: int
task_type: str
role: Optional[str]
desc: str
status: str
value: int
effort: int
depends_on: frozenset[int]
best_value: int
best_effort: int
priority: float
best_followups: frozenset[int]
blocked_by: frozenset[int]
statuses = [
f"{i+1} - {status}"
for i, status in enumerate(
["NEXT IN LINE", "WIP", "BLOCKED", "DONE", "NOPE", "IDEA"]
)
]
ordered_column_parsing_funcs = [
("task_id", int),
("task_type", partial(parse_enum, ["Story", "Task"])),
("role", str),
("desc", str),
("status", partial(parse_enum, statuses)),
("value", parse_int),
("effort", parse_int),
("depends_on", parse_task_set),
("best_value", parse_opt_int),
("best_effort", parse_opt_int),
("priority", parse_opt_float),
("best_followups", parse_task_set),
("blocked_by", parse_task_set),
]
@classmethod
def from_row(cls, row: list[Any]) -> "Task":
try:
return cls(
**{
key: parse_value_func(value)
for (value, (key, parse_value_func)) in zip_longest(
row,
Task.ordered_column_parsing_funcs,
)
}
)
except AssertionError as e:
task = {
key: value
for (value, (key, _)) in zip_longest(
row,
Task.ordered_column_parsing_funcs,
)
}
raise ValueError(f"Could not parse task {pformat(task)}:\n{e}")
def row(self) -> list[Any]:
return [
value_str(getattr(self, key))
for (key, _) in Task.ordered_column_parsing_funcs
] | 0.822403 | 0.308028 |
import unittest
import os
import numpy as np
from openmdao.api import Problem, Group
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
from pycycle.mp_cycle import Cycle
from pycycle.thermo.cea.species_data import janaf
from pycycle.elements.duct import Duct
from pycycle.elements.flow_start import FlowStart
from pycycle.constants import AIR_ELEMENTS
from pycycle import constants
fpath = os.path.dirname(os.path.realpath(__file__))
ref_data = np.loadtxt(fpath + "/reg_data/duct.csv", delimiter=",", skiprows=1)
header = [
'dPqP',
'Qin',
'Fl_I.W',
'Fl_I.V',
'Fl_I.MN',
'Fl_I.s',
'Fl_I.Pt',
'Fl_I.Tt',
'Fl_I.ht',
'Fl_I.rhot',
'Fl_I.gamt',
'Fl_O.MN',
'Fl_O.s',
'Fl_O.Pt',
'Fl_O.Tt',
'Fl_O.ht',
'Fl_O.rhot',
'Fl_O.gamt',
'Fl_O.Ps',
'Fl_O.Ts',
'Fl_O.hs',
'Fl_O.rhos',
'Fl_O.gams']
h_map = dict(((v_name, i) for i, v_name in enumerate(header)))
np.seterr(all="raise")
class DuctTestCase(unittest.TestCase):
def test_case1(self):
self.prob = Problem()
cycle = self.prob.model = Cycle()
cycle.add_subsystem('flow_start', FlowStart(thermo_data=janaf,
elements=AIR_ELEMENTS), promotes=['MN', 'P', 'T'])
cycle.add_subsystem('duct', Duct(elements=AIR_ELEMENTS), promotes=['MN'])
cycle.pyc_connect_flow('flow_start.Fl_O', 'duct.Fl_I')
cycle.set_input_defaults('MN', 0.5)
cycle.set_input_defaults('duct.dPqP', 0.0)
cycle.set_input_defaults('P', 17., units='psi')
cycle.set_input_defaults('T', 500., units='degR')
cycle.set_input_defaults('flow_start.W', 500., units='lbm/s')
self.prob.setup(check=False, force_alloc_complex=True)
self.prob.set_solver_print(level=-1)
# 6 cases to check against
for i, data in enumerate(ref_data):
self.prob['duct.dPqP'] = data[h_map['dPqP']]
# input flowstation
self.prob['P'] = data[h_map['Fl_I.Pt']]
self.prob['T'] = data[h_map['Fl_I.Tt']]
self.prob['MN'] = data[h_map['Fl_O.MN']]
self.prob['flow_start.W'] = data[h_map['Fl_I.W']]
self.prob['duct.Fl_I:stat:V'] = data[h_map['Fl_I.V']]
# give a decent initial guess for Ps
print(i, self.prob['P'], self.prob['T'], self.prob['MN'])
self.prob.run_model()
# check outputs
pt, ht, ps, ts = data[h_map['Fl_O.Pt']], data[
h_map['Fl_O.ht']], data[h_map['Fl_O.Ps']], data[h_map['Fl_O.Ts']]
pt_computed = self.prob['duct.Fl_O:tot:P']
ht_computed = self.prob['duct.Fl_O:tot:h']
ps_computed = self.prob['duct.Fl_O:stat:P']
ts_computed = self.prob['duct.Fl_O:stat:T']
tol = 2.0e-2
assert_near_equal(pt_computed, pt, tol)
assert_near_equal(ht_computed, ht, tol)
assert_near_equal(ps_computed, ps, tol)
assert_near_equal(ts_computed, ts, tol)
partial_data = self.prob.check_partials(out_stream=None, method='cs',
includes=['duct.*'], excludes=['*.base_thermo.*',])
assert_check_partials(partial_data, atol=1e-8, rtol=1e-8)
def test_case_with_dPqP_MN(self):
self.prob = Problem()
cycle = self.prob.model = Cycle()
cycle.add_subsystem('flow_start', FlowStart(thermo_data=janaf,
elements=AIR_ELEMENTS), promotes=['P', 'T', 'MN', 'W'])
cycle.add_subsystem('flow_start_OD', FlowStart(thermo_data=janaf,
elements=AIR_ELEMENTS), promotes=['P', 'T', 'W'])
expMN = 1.0
cycle.add_subsystem('duct_des', Duct(elements=AIR_ELEMENTS, expMN=expMN), promotes=['MN'])
cycle.add_subsystem('duct_OD', Duct(elements=AIR_ELEMENTS, expMN=expMN, design=False))
cycle.pyc_connect_flow('flow_start.Fl_O', 'duct_des.Fl_I')
cycle.pyc_connect_flow('flow_start_OD.Fl_O', 'duct_OD.Fl_I')
cycle.set_input_defaults('P', 17., units='psi')
cycle.set_input_defaults('T', 500., units='degR')
cycle.set_input_defaults('MN', 0.5)
cycle.set_input_defaults('flow_start_OD.MN', 0.25)
cycle.set_input_defaults('duct_des.dPqP', 0.0)
cycle.set_input_defaults('W', 500., units='lbm/s')
cycle.connect("duct_des.s_dPqP", "duct_OD.s_dPqP")
cycle.connect("duct_des.Fl_O:stat:area", "duct_OD.area")
self.prob.setup(check=False, force_alloc_complex=True)
self.prob.set_solver_print(level=-1)
data = ref_data[0]
self.prob['duct_des.dPqP'] = data[h_map['dPqP']]
# input flowstation
self.prob['P'] = data[h_map['Fl_I.Pt']]
self.prob['T'] = data[h_map['Fl_I.Tt']]
self.prob['MN'] = data[h_map['Fl_O.MN']]
self.prob['W'] = data[h_map['Fl_I.W']]
self.prob['duct_des.Fl_I:stat:V'] = data[h_map['Fl_I.V']]
# give a decent initial guess for Ps
print(self.prob['P'], self.prob['T'], self.prob['MN'])
self.prob.run_model()
# check outputs
pt, ht, ps, ts = data[h_map['Fl_O.Pt']], data[
h_map['Fl_O.ht']], data[h_map['Fl_O.Ps']], data[h_map['Fl_O.Ts']]
pt_computed = self.prob['duct_OD.Fl_O:tot:P']
ht_computed = self.prob['duct_OD.Fl_O:tot:h']
ps_computed = self.prob['duct_OD.Fl_O:stat:P']
ts_computed = self.prob['duct_OD.Fl_O:stat:T']
tol = 1.0e-4
assert_near_equal(pt_computed, 8.84073152, tol)
assert_near_equal(ht_computed, ht, tol)
assert_near_equal(ps_computed, 8.26348914, tol)
assert_near_equal(ts_computed, ts, tol)
partial_data = self.prob.check_partials(out_stream=None, method='cs',
includes=['duct_OD.*'], excludes=['*.base_thermo.*',])
assert_check_partials(partial_data, atol=1e-8, rtol=1e-8)
if __name__ == "__main__":
unittest.main() | pycycle/elements/test/test_duct.py |
import unittest
import os
import numpy as np
from openmdao.api import Problem, Group
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
from pycycle.mp_cycle import Cycle
from pycycle.thermo.cea.species_data import janaf
from pycycle.elements.duct import Duct
from pycycle.elements.flow_start import FlowStart
from pycycle.constants import AIR_ELEMENTS
from pycycle import constants
fpath = os.path.dirname(os.path.realpath(__file__))
ref_data = np.loadtxt(fpath + "/reg_data/duct.csv", delimiter=",", skiprows=1)
header = [
'dPqP',
'Qin',
'Fl_I.W',
'Fl_I.V',
'Fl_I.MN',
'Fl_I.s',
'Fl_I.Pt',
'Fl_I.Tt',
'Fl_I.ht',
'Fl_I.rhot',
'Fl_I.gamt',
'Fl_O.MN',
'Fl_O.s',
'Fl_O.Pt',
'Fl_O.Tt',
'Fl_O.ht',
'Fl_O.rhot',
'Fl_O.gamt',
'Fl_O.Ps',
'Fl_O.Ts',
'Fl_O.hs',
'Fl_O.rhos',
'Fl_O.gams']
h_map = dict(((v_name, i) for i, v_name in enumerate(header)))
np.seterr(all="raise")
class DuctTestCase(unittest.TestCase):
def test_case1(self):
self.prob = Problem()
cycle = self.prob.model = Cycle()
cycle.add_subsystem('flow_start', FlowStart(thermo_data=janaf,
elements=AIR_ELEMENTS), promotes=['MN', 'P', 'T'])
cycle.add_subsystem('duct', Duct(elements=AIR_ELEMENTS), promotes=['MN'])
cycle.pyc_connect_flow('flow_start.Fl_O', 'duct.Fl_I')
cycle.set_input_defaults('MN', 0.5)
cycle.set_input_defaults('duct.dPqP', 0.0)
cycle.set_input_defaults('P', 17., units='psi')
cycle.set_input_defaults('T', 500., units='degR')
cycle.set_input_defaults('flow_start.W', 500., units='lbm/s')
self.prob.setup(check=False, force_alloc_complex=True)
self.prob.set_solver_print(level=-1)
# 6 cases to check against
for i, data in enumerate(ref_data):
self.prob['duct.dPqP'] = data[h_map['dPqP']]
# input flowstation
self.prob['P'] = data[h_map['Fl_I.Pt']]
self.prob['T'] = data[h_map['Fl_I.Tt']]
self.prob['MN'] = data[h_map['Fl_O.MN']]
self.prob['flow_start.W'] = data[h_map['Fl_I.W']]
self.prob['duct.Fl_I:stat:V'] = data[h_map['Fl_I.V']]
# give a decent initial guess for Ps
print(i, self.prob['P'], self.prob['T'], self.prob['MN'])
self.prob.run_model()
# check outputs
pt, ht, ps, ts = data[h_map['Fl_O.Pt']], data[
h_map['Fl_O.ht']], data[h_map['Fl_O.Ps']], data[h_map['Fl_O.Ts']]
pt_computed = self.prob['duct.Fl_O:tot:P']
ht_computed = self.prob['duct.Fl_O:tot:h']
ps_computed = self.prob['duct.Fl_O:stat:P']
ts_computed = self.prob['duct.Fl_O:stat:T']
tol = 2.0e-2
assert_near_equal(pt_computed, pt, tol)
assert_near_equal(ht_computed, ht, tol)
assert_near_equal(ps_computed, ps, tol)
assert_near_equal(ts_computed, ts, tol)
partial_data = self.prob.check_partials(out_stream=None, method='cs',
includes=['duct.*'], excludes=['*.base_thermo.*',])
assert_check_partials(partial_data, atol=1e-8, rtol=1e-8)
def test_case_with_dPqP_MN(self):
self.prob = Problem()
cycle = self.prob.model = Cycle()
cycle.add_subsystem('flow_start', FlowStart(thermo_data=janaf,
elements=AIR_ELEMENTS), promotes=['P', 'T', 'MN', 'W'])
cycle.add_subsystem('flow_start_OD', FlowStart(thermo_data=janaf,
elements=AIR_ELEMENTS), promotes=['P', 'T', 'W'])
expMN = 1.0
cycle.add_subsystem('duct_des', Duct(elements=AIR_ELEMENTS, expMN=expMN), promotes=['MN'])
cycle.add_subsystem('duct_OD', Duct(elements=AIR_ELEMENTS, expMN=expMN, design=False))
cycle.pyc_connect_flow('flow_start.Fl_O', 'duct_des.Fl_I')
cycle.pyc_connect_flow('flow_start_OD.Fl_O', 'duct_OD.Fl_I')
cycle.set_input_defaults('P', 17., units='psi')
cycle.set_input_defaults('T', 500., units='degR')
cycle.set_input_defaults('MN', 0.5)
cycle.set_input_defaults('flow_start_OD.MN', 0.25)
cycle.set_input_defaults('duct_des.dPqP', 0.0)
cycle.set_input_defaults('W', 500., units='lbm/s')
cycle.connect("duct_des.s_dPqP", "duct_OD.s_dPqP")
cycle.connect("duct_des.Fl_O:stat:area", "duct_OD.area")
self.prob.setup(check=False, force_alloc_complex=True)
self.prob.set_solver_print(level=-1)
data = ref_data[0]
self.prob['duct_des.dPqP'] = data[h_map['dPqP']]
# input flowstation
self.prob['P'] = data[h_map['Fl_I.Pt']]
self.prob['T'] = data[h_map['Fl_I.Tt']]
self.prob['MN'] = data[h_map['Fl_O.MN']]
self.prob['W'] = data[h_map['Fl_I.W']]
self.prob['duct_des.Fl_I:stat:V'] = data[h_map['Fl_I.V']]
# give a decent initial guess for Ps
print(self.prob['P'], self.prob['T'], self.prob['MN'])
self.prob.run_model()
# check outputs
pt, ht, ps, ts = data[h_map['Fl_O.Pt']], data[
h_map['Fl_O.ht']], data[h_map['Fl_O.Ps']], data[h_map['Fl_O.Ts']]
pt_computed = self.prob['duct_OD.Fl_O:tot:P']
ht_computed = self.prob['duct_OD.Fl_O:tot:h']
ps_computed = self.prob['duct_OD.Fl_O:stat:P']
ts_computed = self.prob['duct_OD.Fl_O:stat:T']
tol = 1.0e-4
assert_near_equal(pt_computed, 8.84073152, tol)
assert_near_equal(ht_computed, ht, tol)
assert_near_equal(ps_computed, 8.26348914, tol)
assert_near_equal(ts_computed, ts, tol)
partial_data = self.prob.check_partials(out_stream=None, method='cs',
includes=['duct_OD.*'], excludes=['*.base_thermo.*',])
assert_check_partials(partial_data, atol=1e-8, rtol=1e-8)
if __name__ == "__main__":
unittest.main() | 0.305801 | 0.459925 |
import configparser
import logging
import os
import time
import json
import requests
import pygame
from logging.handlers import RotatingFileHandler
from datetime import datetime
import sys
config = configparser.ConfigParser()
config.read('config.ini')
auth_key = config['general'].get('auth_key')
device_uid = config['general'].get('device_uid')
force_playback_only = config['general'].getboolean('force_playback_only')
interval = config['device'].getint('interval')
sample_file = config['device'].get('sampleFile')
logging.basicConfig(
handlers=[RotatingFileHandler(filename='liarbird.log', mode='a', maxBytes=10000000, backupCount=10)],
level=10,
format='%(asctime)s %(levelname)-6s %(lineno)d %(name)-6s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.debug(auth_key)
logging.debug(device_uid)
logging.debug(interval)
logging.debug(sample_file)
if __name__ == '__main__':
internet_connected = True
try:
logging.info('testing internet connectivity')
request = requests.get("http://google.com", timeout=5)
except (requests.ConnectionError, requests.Timeout):
internet_connected = False
try:
if internet_connected and not force_playback_only:
logging.info("internet connection found. running in configuration mode")
if not device_uid:
logging.info('no device identifier set - registering device')
response = requests.post("https://us-central1-liarbird-1df1e.cloudfunctions.net/registerDevice", data={ "authKey": auth_key })
if response.status_code != 200:
logging.error(response)
else:
logging.debug(response.text)
json_response = json.loads(response.text)
config.set('general', 'device_uid', json_response['uid'])
device_uid = json_response['uid']
logging.info('updating config.ini')
config.write(open('config.ini', 'w'))
if device_uid:
logging.info('fetching config')
response = requests.post("https://us-central1-liarbird-1df1e.cloudfunctions.net/getConfiguration", data={ "authKey": auth_key, "uid": device_uid })
if response.status_code != 200:
# failed request
logging.error(response)
else:
logging.info('config retrieved from server')
logging.debug(response.text)
response_data = json.loads(response.text)
if 'playbackFrequency' in response_data:
config.set('device', 'interval', response_data['playbackFrequency'])
config.write(open('config.ini', 'w'))
if 'sampleFile' in response_data:
config.set('device', 'sampleFile', response_data['sampleFile'])
config.write(open('config.ini', 'w'))
if 'sampleUri' in response_data:
logging.info('fetching sample')
response = requests.get(response_data["sampleUri"])
config.write(open('config.ini', 'w'))
logging.info('writing sample to disk')
open(response_data["sampleFile"], 'wb').write(response.content)
else:
logging.info("NO internet connection found. running in playback mode")
if not sample_file:
logging.error("missing sample file!")
elif not interval:
logging.error("missing interval!")
else:
logging.info("running as normal")
pygame.mixer.init()
while True:
logging.info("starting playback of sample_file")
pygame.mixer.music.load(sample_file)
pygame.mixer.music.play()
time.sleep(interval * 60)
except (IOError, SystemExit):
logging.error('IOError or SystemExit')
raise
except KeyboardInterrupt:
logging.error('Ctrl+C Interrupt')
print("Crtl+C Pressed. Shutting down.") | liarbird.py | import configparser
import logging
import os
import time
import json
import requests
import pygame
from logging.handlers import RotatingFileHandler
from datetime import datetime
import sys
config = configparser.ConfigParser()
config.read('config.ini')
auth_key = config['general'].get('auth_key')
device_uid = config['general'].get('device_uid')
force_playback_only = config['general'].getboolean('force_playback_only')
interval = config['device'].getint('interval')
sample_file = config['device'].get('sampleFile')
logging.basicConfig(
handlers=[RotatingFileHandler(filename='liarbird.log', mode='a', maxBytes=10000000, backupCount=10)],
level=10,
format='%(asctime)s %(levelname)-6s %(lineno)d %(name)-6s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.debug(auth_key)
logging.debug(device_uid)
logging.debug(interval)
logging.debug(sample_file)
if __name__ == '__main__':
internet_connected = True
try:
logging.info('testing internet connectivity')
request = requests.get("http://google.com", timeout=5)
except (requests.ConnectionError, requests.Timeout):
internet_connected = False
try:
if internet_connected and not force_playback_only:
logging.info("internet connection found. running in configuration mode")
if not device_uid:
logging.info('no device identifier set - registering device')
response = requests.post("https://us-central1-liarbird-1df1e.cloudfunctions.net/registerDevice", data={ "authKey": auth_key })
if response.status_code != 200:
logging.error(response)
else:
logging.debug(response.text)
json_response = json.loads(response.text)
config.set('general', 'device_uid', json_response['uid'])
device_uid = json_response['uid']
logging.info('updating config.ini')
config.write(open('config.ini', 'w'))
if device_uid:
logging.info('fetching config')
response = requests.post("https://us-central1-liarbird-1df1e.cloudfunctions.net/getConfiguration", data={ "authKey": auth_key, "uid": device_uid })
if response.status_code != 200:
# failed request
logging.error(response)
else:
logging.info('config retrieved from server')
logging.debug(response.text)
response_data = json.loads(response.text)
if 'playbackFrequency' in response_data:
config.set('device', 'interval', response_data['playbackFrequency'])
config.write(open('config.ini', 'w'))
if 'sampleFile' in response_data:
config.set('device', 'sampleFile', response_data['sampleFile'])
config.write(open('config.ini', 'w'))
if 'sampleUri' in response_data:
logging.info('fetching sample')
response = requests.get(response_data["sampleUri"])
config.write(open('config.ini', 'w'))
logging.info('writing sample to disk')
open(response_data["sampleFile"], 'wb').write(response.content)
else:
logging.info("NO internet connection found. running in playback mode")
if not sample_file:
logging.error("missing sample file!")
elif not interval:
logging.error("missing interval!")
else:
logging.info("running as normal")
pygame.mixer.init()
while True:
logging.info("starting playback of sample_file")
pygame.mixer.music.load(sample_file)
pygame.mixer.music.play()
time.sleep(interval * 60)
except (IOError, SystemExit):
logging.error('IOError or SystemExit')
raise
except KeyboardInterrupt:
logging.error('Ctrl+C Interrupt')
print("Crtl+C Pressed. Shutting down.") | 0.200558 | 0.038975 |
import pandas as pd
from sklearn.model_selection import train_test_split
TRAINING_DOWNLOAD_URL = 'https://www.dropbox.com/s/newxt7ifuipiezp/train.csv?dl=1'
TEST_DOWNLOAD_URL = 'https://www.dropbox.com/s/dhqm40csvi0mhhz/test.csv?dl=1'
TARGET = 'Choice'
def get_training_data(validation: bool=False, validation_size: float=0.2) \
-> (pd.DataFrame, pd.DataFrame, pd.DataFrame or None, pd.DataFrame or None):
"""
(1(target: 'Choice') + 22(variables)) columns * 5500 rows
:param validation: (bool) If validation is True, split the train set to train set and validation set
and return them.
:param validation_size: (float) The portion of validation set.
:return x_train: (DataFrame) 22(variables) columns * (5500 * (1 - validation_size)) rows
columns A_follower_count | (int)
A_following_count | (int)
A_listed_count | (int)
A_mentions_received | (float)
A_retweets_received | (float)
A_mentions_sent | (float)
A_retweets_sent | (float)
A_posts | (float)
A_network_feature_1 | (int)
A_network_feature_2 | (float)
A_network_feature_3 | (float)
B_follower_count | (int)
B_following_count | (int)
B_listed_count | (int)
B_mentions_received | (float)
B_retweets_received | (float)
B_mentions_sent | (float)
B_retweets_sent | (float)
B_posts | (float)
B_network_feature_1 | (int)
B_network_feature_2 | (float)
B_network_feature_3 | (float)
:return y_train: (Series[int]) (target: 'Choice') * (5500 * (1 - validation_size))
:return x_val: (DataFrame) 22(variables) columns * (5500 * validation_size) rows
columns A_follower_count | (int)
A_following_count | (int)
A_listed_count | (int)
A_mentions_received | (float)
A_retweets_received | (float)
A_mentions_sent | (float)
A_retweets_sent | (float)
A_posts | (float)
A_network_feature_1 | (int)
A_network_feature_2 | (float)
A_network_feature_3 | (float)
B_follower_count | (int)
B_following_count | (int)
B_listed_count | (int)
B_mentions_received | (float)
B_retweets_received | (float)
B_mentions_sent | (float)
B_retweets_sent | (float)
B_posts | (float)
B_network_feature_1 | (int)
B_network_feature_2 | (float)
B_network_feature_3 | (float)
:return y_val: (Series[int]) (target: 'Choice') * (5500 * validation_size)
"""
if validation and (validation_size <= 0 or validation_size >= 1):
raise ValueError('validation_size should be bigger than 0 and smaller than 1.')
training_dataframe = pd.read_csv(TRAINING_DOWNLOAD_URL)
x_train = training_dataframe.loc[:, training_dataframe.columns != TARGET]
y_train = training_dataframe.loc[:, TARGET]
x_val = None
y_val = None
if validation:
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=validation_size, random_state=1)
return x_train, y_train, x_val, y_val
def get_test_data():
"""
22(variables) columns * 5952 rows
:return x_test: (DataFrame) 22(variables) columns * 5500 rows
columns A_follower_count | (int)
A_following_count | (int)
A_listed_count | (int)
A_mentions_received | (float)
A_retweets_received | (float)
A_mentions_sent | (float)
A_retweets_sent | (float)
A_posts | (float)
A_network_feature_1 | (int)
A_network_feature_2 | (float)
A_network_feature_3 | (float)
B_follower_count | (int)
B_following_count | (int)
B_listed_count | (int)
B_mentions_received | (float)
B_retweets_received | (float)
B_mentions_sent | (float)
B_retweets_sent | (float)
B_posts | (float)
B_network_feature_1 | (int)
B_network_feature_2 | (float)
B_network_feature_3 | (float)
"""
x_test = pd.read_csv(TEST_DOWNLOAD_URL)
return x_test
# Usage examples
if __name__ == '__main__':
# Do not use validation set.
x_train, y_train, _, _ = get_training_data()
print('len(x_train) is 5500.')
print(x_train.head())
print('-' * 70)
print('len(y_train) is 5500.')
print(y_train.head())
print('-' * 70)
# Use validation set.
x_train, y_train, x_val, y_val = get_training_data(validation=True)
print('len(x_train) is (5500 * (1 - 0.2)) = 4400.')
print(x_train.head())
print('-' * 70)
print('len(y_train) is (5500 * (1 - 0.2)) = 4400.')
print(y_train.head())
print('-' * 70)
print('len(x_val) is (5500 * 0.2) = 1100.')
print(x_val.head())
print('-' * 70)
print('len(y_val) is (5500 * 0.2) = 1100.')
print(y_val.head())
print('-' * 70)
# Use test set.
x_test = get_test_data()
print('len(x_test) is 5952.')
print(x_test.head())
print('-' * 70) | assignment_1/data/data_reader.py | import pandas as pd
from sklearn.model_selection import train_test_split
TRAINING_DOWNLOAD_URL = 'https://www.dropbox.com/s/newxt7ifuipiezp/train.csv?dl=1'
TEST_DOWNLOAD_URL = 'https://www.dropbox.com/s/dhqm40csvi0mhhz/test.csv?dl=1'
TARGET = 'Choice'
def get_training_data(validation: bool=False, validation_size: float=0.2) \
-> (pd.DataFrame, pd.DataFrame, pd.DataFrame or None, pd.DataFrame or None):
"""
(1(target: 'Choice') + 22(variables)) columns * 5500 rows
:param validation: (bool) If validation is True, split the train set to train set and validation set
and return them.
:param validation_size: (float) The portion of validation set.
:return x_train: (DataFrame) 22(variables) columns * (5500 * (1 - validation_size)) rows
columns A_follower_count | (int)
A_following_count | (int)
A_listed_count | (int)
A_mentions_received | (float)
A_retweets_received | (float)
A_mentions_sent | (float)
A_retweets_sent | (float)
A_posts | (float)
A_network_feature_1 | (int)
A_network_feature_2 | (float)
A_network_feature_3 | (float)
B_follower_count | (int)
B_following_count | (int)
B_listed_count | (int)
B_mentions_received | (float)
B_retweets_received | (float)
B_mentions_sent | (float)
B_retweets_sent | (float)
B_posts | (float)
B_network_feature_1 | (int)
B_network_feature_2 | (float)
B_network_feature_3 | (float)
:return y_train: (Series[int]) (target: 'Choice') * (5500 * (1 - validation_size))
:return x_val: (DataFrame) 22(variables) columns * (5500 * validation_size) rows
columns A_follower_count | (int)
A_following_count | (int)
A_listed_count | (int)
A_mentions_received | (float)
A_retweets_received | (float)
A_mentions_sent | (float)
A_retweets_sent | (float)
A_posts | (float)
A_network_feature_1 | (int)
A_network_feature_2 | (float)
A_network_feature_3 | (float)
B_follower_count | (int)
B_following_count | (int)
B_listed_count | (int)
B_mentions_received | (float)
B_retweets_received | (float)
B_mentions_sent | (float)
B_retweets_sent | (float)
B_posts | (float)
B_network_feature_1 | (int)
B_network_feature_2 | (float)
B_network_feature_3 | (float)
:return y_val: (Series[int]) (target: 'Choice') * (5500 * validation_size)
"""
if validation and (validation_size <= 0 or validation_size >= 1):
raise ValueError('validation_size should be bigger than 0 and smaller than 1.')
training_dataframe = pd.read_csv(TRAINING_DOWNLOAD_URL)
x_train = training_dataframe.loc[:, training_dataframe.columns != TARGET]
y_train = training_dataframe.loc[:, TARGET]
x_val = None
y_val = None
if validation:
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=validation_size, random_state=1)
return x_train, y_train, x_val, y_val
def get_test_data():
"""
22(variables) columns * 5952 rows
:return x_test: (DataFrame) 22(variables) columns * 5500 rows
columns A_follower_count | (int)
A_following_count | (int)
A_listed_count | (int)
A_mentions_received | (float)
A_retweets_received | (float)
A_mentions_sent | (float)
A_retweets_sent | (float)
A_posts | (float)
A_network_feature_1 | (int)
A_network_feature_2 | (float)
A_network_feature_3 | (float)
B_follower_count | (int)
B_following_count | (int)
B_listed_count | (int)
B_mentions_received | (float)
B_retweets_received | (float)
B_mentions_sent | (float)
B_retweets_sent | (float)
B_posts | (float)
B_network_feature_1 | (int)
B_network_feature_2 | (float)
B_network_feature_3 | (float)
"""
x_test = pd.read_csv(TEST_DOWNLOAD_URL)
return x_test
# Usage examples
if __name__ == '__main__':
# Do not use validation set.
x_train, y_train, _, _ = get_training_data()
print('len(x_train) is 5500.')
print(x_train.head())
print('-' * 70)
print('len(y_train) is 5500.')
print(y_train.head())
print('-' * 70)
# Use validation set.
x_train, y_train, x_val, y_val = get_training_data(validation=True)
print('len(x_train) is (5500 * (1 - 0.2)) = 4400.')
print(x_train.head())
print('-' * 70)
print('len(y_train) is (5500 * (1 - 0.2)) = 4400.')
print(y_train.head())
print('-' * 70)
print('len(x_val) is (5500 * 0.2) = 1100.')
print(x_val.head())
print('-' * 70)
print('len(y_val) is (5500 * 0.2) = 1100.')
print(y_val.head())
print('-' * 70)
# Use test set.
x_test = get_test_data()
print('len(x_test) is 5952.')
print(x_test.head())
print('-' * 70) | 0.724188 | 0.439627 |
import os, time
from flask import request, jsonify, g, send_from_directory
from . import api
from authentication import auth
from .. import db
from ..models import User, Comment, News, Group
from errors import not_found, forbidden, bad_request
from datetime import datetime
UPLOAD_FOLDER = os.path.join(api.root_path, '../../file/')
ALLOWED_PIC_EXTENSIONS = set(['png','jpg','jpeg','gif', 'bmp'])
ALLOWED_FILE_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif', 'bmp',\
'show', 'cell', 'xls', 'xlsm', 'xlsx', 'csv', 'ppt',\
'pptx', 'doc', 'docx', 'hwp', 'pdf', 'txt'])
def allowed_file(filename):
return '.' in filename and\
filename.rsplit('.', 1)[1] in ALLOWED_FILE_EXTENSIONS
def allowed_picture(filename):
return '.' in filename and\
filename.rsplit('.', 1)[1] in ALLOWED_PIC_EXTENSIONS
def addTimestamp(filename):
now = time.localtime()
timestamp = "_%04d%02d%02d_%02d%02d%02d" %\
(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
return filename.rsplit('.', 1)[0] + timestamp + "." + filename.rsplit('.', 1)[1]
@api.route('/news/<int:id>/file', methods=['GET']) # 특정 신송 파일 요청
@auth.login_required
def get_news_file(id):
news = News.query.get(id)
if news is None:
return not_found('News does not exist')
if news.group is not None and g.current_user not in news.house.users\
and g.current_user.id != news.house.create_user:
return forbidden('User does not in this group')
filelocate = news.filelocate
if filelocate is None:
return not_found('File does not exist')
return send_from_directory(UPLOAD_FOLDER, filelocate)
@api.route('/news/<int:id>/file', methods=['POST']) # 특정 신송 파일 추가
@auth.login_required
def post_news_file(id):
news = News.query.get(id)
if news is None:
return not_found('News does not exist')
if g.current_user.id != news.author_id:
return forbidden('Cannot Upload File')
if request.files['file'] is None:
return bad_request('File Request in invaild')
file = request.files['file']
if file and allowed_file(file.filename.lower()):
filename = file.filename
filelocate = addTimestamp(filename)
if len(filename.rsplit('.')) is not 2:
return bad_request('File have abnormal extension')
file.save(os.path.join(UPLOAD_FOLDER, filelocate))
news.filename = filename
news.filelocate = filelocate
db.session.commit()
return jsonify(news.to_json())
@api.route('/news/<int:id>/file', methods=['PUT']) # 특정 신송 파일 수정
@auth.login_required
def put_news_file(id):
news = News.query.get(id)
if news is None:
return not_found('News does not exist')
if g.current_user.id != news.author_id:
return forbidden('Cannot Upload File')
if request.files['file'] is None:
return bad_request('File Request in invaild')
os.remove(os.path.join(UPLOAD_FOLDER, news.filelocate))
file = request.files['file']
if file and allowed_file(file.filename.lower()):
filename = file.filename
filelocate = addTimestamp(filename)
if len(filename.rsplit('.')) is not 2:
return bad_request('File have abnormal extension')
file.save(os.path.join(UPLOAD_FOLDER, filelocate))
news.filename = filename
news.filelocate = filelocate
db.session.commit()
return jsonify(news.to_json())
@api.route('/news/<int:id>/file', methods=['DELETE']) # 특정 신송 파일 삭제
@auth.login_required
def delete_news_file(id):
news = News.query.get(id)
if news is None:
return not_found('News does not exist')
if g.current_user.id != news.author_id:
return forbidden('Cannot Delete File')
os.remove(os.path.join(UPLOAD_FOLDER, news.filelocate))
news.filename = None
news.filelocate = None
db.session.commit()
return '', 204
@api.route('/comments/<int:comment_id>/file', methods=['GET']) # 특정 덧글 파일 요청
@auth.login_required
def get_comment_file(comment_id):
comment = Comment.query.get(comment_id)
if comment is None:
return not_found('Comment does not exist')
filelocate = comment.filelocate
if filelocate is None:
return not_found('File does not exist')
return send_from_directory(UPLOAD_FOLDER, filelocate)
@api.route('/comments/<int:comment_id>/file', methods=['POST']) # 특정 덧글 파일 추가
@auth.login_required
def post_comment_file(comment_id):
comment = Comment.query.get(comment_id)
if comment is None:
return not_found('Comment does not exist')
if g.current_user.id != comment.author_id:
return forbidden('Cannot Upload File')
if request.files['file'] is None:
return bad_request('File Request in invaild')
file = request.files['file']
if file and allowed_file(file.filename.lower()):
filename = file.filename
filelocate = addTimestamp(filename)
if len(filename.rsplit('.')) is not 2:
return bad_request('File have abnormal extension')
file.save(os.path.join(UPLOAD_FOLDER, filelocate))
comment.filename = filename
comment.filelocate = filelocate
db.session.commit()
return jsonify(comment.to_json())
@api.route('/comments/<int:comment_id>/file', methods=['PUT']) # 특정 덧글 파일 수정
@auth.login_required
def put_comment_file(comment_id):
comment = Comment.query.gt(comment_id)
if comment is None:
return not_found('Comment does not exist')
if g.current_user.id != comment.author_id:
return forbidden('Cannot Upload File')
if request.files['file'] is None:
return bad_request('File Request in invaild')
os.remove(os.path.join(UPLOAD_FOLDER, comment.filelocate))
file = request.files['file']
if file and allowed_file(file.filename.lower()):
filename = file.filename
filelocate = addTimestamp(filename)
if len(filename.rsplit('.')) is not 2:
return bad_request('File have abnormal extension')
file.save(os.path.join(UPLOAD_FOLDER, filelocate))
comment.filename = filename
comment.filelocate = filelocate
db.session.commit()
return jsonify(comment.to_json())
@api.route('/comments/<int:comment_id>/file', methods=['DELETE']) # 특정 덧글 파일 삭제
@auth.login_required
def delete_comment_file(comment_id):
comment = Comment.query.get(comment_id)
if comment is None:
return not_found('Comment does not exist')
if g.current_user.id != comment.author_id:
return forbidden('Cannot Delete File')
os.remove(os.path.join(UPLOAD_FOLDER, comment.filelocate))
comment.filename = None
comment.filelocate = None
db.session.commit()
return '', 204
@api.route('/users/<user_id>/picture', methods=['GET']) # 유저 프로필 사진 요청
@auth.login_required
def get_user_picture(user_id):
user = User.query.filter_by(username=user_id).first()
if user is None:
return not_found('User does not exist')
pictureName = user.pictureName
pictureLocate = user.pictureLocate
if picture is None:
return not_found('Picture does not exist')
return send_from_directory(UPLOAD_FOLDER, pictureLocate)
@api.route('/users/<user_id>/picture', methods=['POST']) # 유저 프로필 사진 추가
@auth.login_required
def post_user_picture(user_id):
user = User.query.filter_by(username=user_id).first()
if user is None:
return not_found('User does not exist')
if g.current_user is not user:
return forbidden('Cannot modify other user')
if request.files['file'] is None:
return bad_request('File Request in invaild')
file = request.files['file']
if file and allowed_picture(file.filename.lower()):
pictureName = file.filename
pictureLocate = addTimestamp(pictureName)
file.save(os.path.join(UPLOAD_FOLDER, pictureLocate))
user.pictureName = pictureName
user.pictureLocate = pictureLocate
db.session.commit()
return jsonify(user.to_json()), 200
@api.route('/users/<user_id>/picture', methods=['PUT']) # 유저 프로필 사진 수정
@auth.login_required
def put_user_picture(user_id):
user = User.query.filter_by(username=user_id).first()
if user is None:
return not_found('User does not exist')
if g.current_user is not user:
return forbidden('Cannot modify other user')
if request.files['file'] is None:
return bad_request('File Request in invaild')
os.remove(os.path.join(UPLOAD_FOLDER, user.pictureLocate))
file = request.files['file']
if file and allowed_picture(file.filename.lower()):
pictureName = file.filename
pictureLocate = addTimestamp(pictureName)
file.save(os.path.join(UPLOAD_FOLDER, pictureLocate))
user.pictureName = pictureName
user.pictureLocate = pictureLocate
db.session.commit()
return jsonify(user.to_json())
@api.route('/users/<user_id>/picture', methods=['DELETE']) # 유저 프로필 사진 삭제
@auth.login_required
def delete_user_picture(id):
user = User.query.filter_by(username=user_id).first()
if user is None:
return not_found('User does not exist')
if g.current_user is not user:
return forbidden('Cannot modify other user')
os.remove(os.path.join(UPLOAD_FOLDER, user.pictureLocate))
user.pictureName = None
user.pictureLocate = None
db.session.commit()
return '', 204 | app/api_1_0/files.py | import os, time
from flask import request, jsonify, g, send_from_directory
from . import api
from authentication import auth
from .. import db
from ..models import User, Comment, News, Group
from errors import not_found, forbidden, bad_request
from datetime import datetime
UPLOAD_FOLDER = os.path.join(api.root_path, '../../file/')
ALLOWED_PIC_EXTENSIONS = set(['png','jpg','jpeg','gif', 'bmp'])
ALLOWED_FILE_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif', 'bmp',\
'show', 'cell', 'xls', 'xlsm', 'xlsx', 'csv', 'ppt',\
'pptx', 'doc', 'docx', 'hwp', 'pdf', 'txt'])
def allowed_file(filename):
return '.' in filename and\
filename.rsplit('.', 1)[1] in ALLOWED_FILE_EXTENSIONS
def allowed_picture(filename):
return '.' in filename and\
filename.rsplit('.', 1)[1] in ALLOWED_PIC_EXTENSIONS
def addTimestamp(filename):
now = time.localtime()
timestamp = "_%04d%02d%02d_%02d%02d%02d" %\
(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
return filename.rsplit('.', 1)[0] + timestamp + "." + filename.rsplit('.', 1)[1]
@api.route('/news/<int:id>/file', methods=['GET']) # 특정 신송 파일 요청
@auth.login_required
def get_news_file(id):
news = News.query.get(id)
if news is None:
return not_found('News does not exist')
if news.group is not None and g.current_user not in news.house.users\
and g.current_user.id != news.house.create_user:
return forbidden('User does not in this group')
filelocate = news.filelocate
if filelocate is None:
return not_found('File does not exist')
return send_from_directory(UPLOAD_FOLDER, filelocate)
@api.route('/news/<int:id>/file', methods=['POST']) # 특정 신송 파일 추가
@auth.login_required
def post_news_file(id):
news = News.query.get(id)
if news is None:
return not_found('News does not exist')
if g.current_user.id != news.author_id:
return forbidden('Cannot Upload File')
if request.files['file'] is None:
return bad_request('File Request in invaild')
file = request.files['file']
if file and allowed_file(file.filename.lower()):
filename = file.filename
filelocate = addTimestamp(filename)
if len(filename.rsplit('.')) is not 2:
return bad_request('File have abnormal extension')
file.save(os.path.join(UPLOAD_FOLDER, filelocate))
news.filename = filename
news.filelocate = filelocate
db.session.commit()
return jsonify(news.to_json())
@api.route('/news/<int:id>/file', methods=['PUT']) # 특정 신송 파일 수정
@auth.login_required
def put_news_file(id):
news = News.query.get(id)
if news is None:
return not_found('News does not exist')
if g.current_user.id != news.author_id:
return forbidden('Cannot Upload File')
if request.files['file'] is None:
return bad_request('File Request in invaild')
os.remove(os.path.join(UPLOAD_FOLDER, news.filelocate))
file = request.files['file']
if file and allowed_file(file.filename.lower()):
filename = file.filename
filelocate = addTimestamp(filename)
if len(filename.rsplit('.')) is not 2:
return bad_request('File have abnormal extension')
file.save(os.path.join(UPLOAD_FOLDER, filelocate))
news.filename = filename
news.filelocate = filelocate
db.session.commit()
return jsonify(news.to_json())
@api.route('/news/<int:id>/file', methods=['DELETE']) # 특정 신송 파일 삭제
@auth.login_required
def delete_news_file(id):
news = News.query.get(id)
if news is None:
return not_found('News does not exist')
if g.current_user.id != news.author_id:
return forbidden('Cannot Delete File')
os.remove(os.path.join(UPLOAD_FOLDER, news.filelocate))
news.filename = None
news.filelocate = None
db.session.commit()
return '', 204
@api.route('/comments/<int:comment_id>/file', methods=['GET']) # 특정 덧글 파일 요청
@auth.login_required
def get_comment_file(comment_id):
comment = Comment.query.get(comment_id)
if comment is None:
return not_found('Comment does not exist')
filelocate = comment.filelocate
if filelocate is None:
return not_found('File does not exist')
return send_from_directory(UPLOAD_FOLDER, filelocate)
@api.route('/comments/<int:comment_id>/file', methods=['POST']) # 특정 덧글 파일 추가
@auth.login_required
def post_comment_file(comment_id):
comment = Comment.query.get(comment_id)
if comment is None:
return not_found('Comment does not exist')
if g.current_user.id != comment.author_id:
return forbidden('Cannot Upload File')
if request.files['file'] is None:
return bad_request('File Request in invaild')
file = request.files['file']
if file and allowed_file(file.filename.lower()):
filename = file.filename
filelocate = addTimestamp(filename)
if len(filename.rsplit('.')) is not 2:
return bad_request('File have abnormal extension')
file.save(os.path.join(UPLOAD_FOLDER, filelocate))
comment.filename = filename
comment.filelocate = filelocate
db.session.commit()
return jsonify(comment.to_json())
@api.route('/comments/<int:comment_id>/file', methods=['PUT']) # 특정 덧글 파일 수정
@auth.login_required
def put_comment_file(comment_id):
comment = Comment.query.gt(comment_id)
if comment is None:
return not_found('Comment does not exist')
if g.current_user.id != comment.author_id:
return forbidden('Cannot Upload File')
if request.files['file'] is None:
return bad_request('File Request in invaild')
os.remove(os.path.join(UPLOAD_FOLDER, comment.filelocate))
file = request.files['file']
if file and allowed_file(file.filename.lower()):
filename = file.filename
filelocate = addTimestamp(filename)
if len(filename.rsplit('.')) is not 2:
return bad_request('File have abnormal extension')
file.save(os.path.join(UPLOAD_FOLDER, filelocate))
comment.filename = filename
comment.filelocate = filelocate
db.session.commit()
return jsonify(comment.to_json())
@api.route('/comments/<int:comment_id>/file', methods=['DELETE']) # 특정 덧글 파일 삭제
@auth.login_required
def delete_comment_file(comment_id):
comment = Comment.query.get(comment_id)
if comment is None:
return not_found('Comment does not exist')
if g.current_user.id != comment.author_id:
return forbidden('Cannot Delete File')
os.remove(os.path.join(UPLOAD_FOLDER, comment.filelocate))
comment.filename = None
comment.filelocate = None
db.session.commit()
return '', 204
@api.route('/users/<user_id>/picture', methods=['GET']) # 유저 프로필 사진 요청
@auth.login_required
def get_user_picture(user_id):
user = User.query.filter_by(username=user_id).first()
if user is None:
return not_found('User does not exist')
pictureName = user.pictureName
pictureLocate = user.pictureLocate
if picture is None:
return not_found('Picture does not exist')
return send_from_directory(UPLOAD_FOLDER, pictureLocate)
@api.route('/users/<user_id>/picture', methods=['POST']) # 유저 프로필 사진 추가
@auth.login_required
def post_user_picture(user_id):
user = User.query.filter_by(username=user_id).first()
if user is None:
return not_found('User does not exist')
if g.current_user is not user:
return forbidden('Cannot modify other user')
if request.files['file'] is None:
return bad_request('File Request in invaild')
file = request.files['file']
if file and allowed_picture(file.filename.lower()):
pictureName = file.filename
pictureLocate = addTimestamp(pictureName)
file.save(os.path.join(UPLOAD_FOLDER, pictureLocate))
user.pictureName = pictureName
user.pictureLocate = pictureLocate
db.session.commit()
return jsonify(user.to_json()), 200
@api.route('/users/<user_id>/picture', methods=['PUT']) # 유저 프로필 사진 수정
@auth.login_required
def put_user_picture(user_id):
user = User.query.filter_by(username=user_id).first()
if user is None:
return not_found('User does not exist')
if g.current_user is not user:
return forbidden('Cannot modify other user')
if request.files['file'] is None:
return bad_request('File Request in invaild')
os.remove(os.path.join(UPLOAD_FOLDER, user.pictureLocate))
file = request.files['file']
if file and allowed_picture(file.filename.lower()):
pictureName = file.filename
pictureLocate = addTimestamp(pictureName)
file.save(os.path.join(UPLOAD_FOLDER, pictureLocate))
user.pictureName = pictureName
user.pictureLocate = pictureLocate
db.session.commit()
return jsonify(user.to_json())
@api.route('/users/<user_id>/picture', methods=['DELETE']) # 유저 프로필 사진 삭제
@auth.login_required
def delete_user_picture(id):
user = User.query.filter_by(username=user_id).first()
if user is None:
return not_found('User does not exist')
if g.current_user is not user:
return forbidden('Cannot modify other user')
os.remove(os.path.join(UPLOAD_FOLDER, user.pictureLocate))
user.pictureName = None
user.pictureLocate = None
db.session.commit()
return '', 204 | 0.228845 | 0.061171 |
import math
from dataclasses import dataclass
from typing import Optional, List, Dict
from rlbot.agents.base_agent import SimpleControllerState
from rlbot.utils.game_state_util import GameState, CarState, Vector3, Physics, Rotator
from choreography.drone import Drone
from util.vec import Vec3
@dataclass
class StateAndControls:
state: GameState
controls: SimpleControllerState
class MotionTrack:
def __init__(self, start: Vec3, end: Vec3, speed: float):
self.start = start
self.end = end
self.speed = speed
self.to_end = (end - start)
if self.to_end.is_zero():
self.velocity = Vec3()
else:
self.velocity = self.to_end.rescale(speed)
self.total_time = self.to_end.length() / speed
@dataclass
class Instruction:
drone_action = None
motion_track: MotionTrack = None
@dataclass
class InstructionResult:
finished: bool
car_states: Dict[int, CarState]
class BoostOn(Instruction):
@staticmethod
def boost_on(drone: Drone):
drone.ctrl.boost = True
drone_action = boost_on
class BoostOff(Instruction):
@staticmethod
def boost_off(drone: Drone):
drone.ctrl.boost = False
drone_action = boost_off
class Move(Instruction):
def __init__(self, start: Vec3, end: Vec3, speed: float):
self.motion_track = MotionTrack(start, end, speed)
class BotCnc:
def __init__(self, origin: Vec3, normal: Vec3, scale: float, speed: float):
self.origin = origin
self.normal = normal
self.scale = scale
self.speed = speed
self.previous_position = origin
self.list: List[Instruction] = []
def activate_nozzle(self):
self.list.append(BoostOn())
def deactivate_nozzle(self):
self.list.append(BoostOff())
def move_to_position(self, x: float, y: float):
end = self.origin + Vec3(x, y) * self.scale
# TODO: incorporate self.normal by doing some kind of rotation transform.
self.list.append(Move(self.previous_position, end, self.speed))
self.previous_position = end
@dataclass
class CncExtruder:
def __init__(self, drones: List[Drone], bot_cnc: BotCnc):
self.drones = drones
self.step_index: int = 0
self.step_start_time: float = None
self.bot_cnc = bot_cnc
def is_finished(self):
return self.step_index >= len(self.bot_cnc.list)
def arrange_drones(self, extruder_position: Vec3, velocity: Vec3, game_time: float) -> Dict[int, CarState]:
car_states: Dict[int, CarState] = {}
for i, drone in enumerate(self.drones):
x_offset = i * 100
car_state = CarState(physics=Physics())
car_state.physics.velocity = velocity.to_setter()
car_state.physics.location = Vector3(
extruder_position.x + x_offset,
extruder_position.y,
extruder_position.z)
car_state.physics.rotation = Rotator(math.pi / 2, 0, 0)
car_states[drone.index] = car_state
return car_states
def manipulate_drones(self, game_time: float) -> InstructionResult:
step = self.bot_cnc.list[self.step_index]
step_finished = True
car_states = None
if step.drone_action:
for drone in self.drones:
step.drone_action(drone)
if step.motion_track:
if self.step_start_time:
elapsed = game_time - self.step_start_time
progression = elapsed / (step.motion_track.total_time + .00001) # Avoid division by zero
if progression < 1:
# This is the normal case where we're in the middle of drawing a segment
loc = step.motion_track.start + step.motion_track.to_end * progression
vel = step.motion_track.velocity
else:
# Time has progressed to the point where we should already be done with this line segment.
if self.step_index + 1 < len(self.bot_cnc.list) and self.bot_cnc.list[self.step_index + 1].motion_track:
# The next step is also a line segment, so continue motion onto it
self.step_start_time = self.step_start_time + step.motion_track.total_time
self.step_index += 1
next_step = self.bot_cnc.list[self.step_index]
elapsed = game_time - self.step_start_time
progression = elapsed / (next_step.motion_track.total_time + .00001) # Avoid division by zero
loc = next_step.motion_track.start + next_step.motion_track.to_end * progression
vel = next_step.motion_track.velocity
else:
# The next step is not a line segment, so halt at the end of this one.
loc = step.motion_track.end
vel = Vec3()
else:
# This is the first time we've arrived at this line segment,
# initialize things and start at the beginning.
loc = step.motion_track.start
vel = step.motion_track.velocity
progression = 0
self.step_start_time = game_time
car_states = self.arrange_drones(loc, vel, game_time)
if progression < 1:
step_finished = False
if step_finished:
self.step_index += 1
self.step_start_time = None
return InstructionResult(self.is_finished(), car_states) | ChoreographyHive/cnc/cnc_instructions.py | import math
from dataclasses import dataclass
from typing import Optional, List, Dict
from rlbot.agents.base_agent import SimpleControllerState
from rlbot.utils.game_state_util import GameState, CarState, Vector3, Physics, Rotator
from choreography.drone import Drone
from util.vec import Vec3
@dataclass
class StateAndControls:
state: GameState
controls: SimpleControllerState
class MotionTrack:
def __init__(self, start: Vec3, end: Vec3, speed: float):
self.start = start
self.end = end
self.speed = speed
self.to_end = (end - start)
if self.to_end.is_zero():
self.velocity = Vec3()
else:
self.velocity = self.to_end.rescale(speed)
self.total_time = self.to_end.length() / speed
@dataclass
class Instruction:
drone_action = None
motion_track: MotionTrack = None
@dataclass
class InstructionResult:
finished: bool
car_states: Dict[int, CarState]
class BoostOn(Instruction):
@staticmethod
def boost_on(drone: Drone):
drone.ctrl.boost = True
drone_action = boost_on
class BoostOff(Instruction):
@staticmethod
def boost_off(drone: Drone):
drone.ctrl.boost = False
drone_action = boost_off
class Move(Instruction):
def __init__(self, start: Vec3, end: Vec3, speed: float):
self.motion_track = MotionTrack(start, end, speed)
class BotCnc:
def __init__(self, origin: Vec3, normal: Vec3, scale: float, speed: float):
self.origin = origin
self.normal = normal
self.scale = scale
self.speed = speed
self.previous_position = origin
self.list: List[Instruction] = []
def activate_nozzle(self):
self.list.append(BoostOn())
def deactivate_nozzle(self):
self.list.append(BoostOff())
def move_to_position(self, x: float, y: float):
end = self.origin + Vec3(x, y) * self.scale
# TODO: incorporate self.normal by doing some kind of rotation transform.
self.list.append(Move(self.previous_position, end, self.speed))
self.previous_position = end
@dataclass
class CncExtruder:
def __init__(self, drones: List[Drone], bot_cnc: BotCnc):
self.drones = drones
self.step_index: int = 0
self.step_start_time: float = None
self.bot_cnc = bot_cnc
def is_finished(self):
return self.step_index >= len(self.bot_cnc.list)
def arrange_drones(self, extruder_position: Vec3, velocity: Vec3, game_time: float) -> Dict[int, CarState]:
car_states: Dict[int, CarState] = {}
for i, drone in enumerate(self.drones):
x_offset = i * 100
car_state = CarState(physics=Physics())
car_state.physics.velocity = velocity.to_setter()
car_state.physics.location = Vector3(
extruder_position.x + x_offset,
extruder_position.y,
extruder_position.z)
car_state.physics.rotation = Rotator(math.pi / 2, 0, 0)
car_states[drone.index] = car_state
return car_states
def manipulate_drones(self, game_time: float) -> InstructionResult:
step = self.bot_cnc.list[self.step_index]
step_finished = True
car_states = None
if step.drone_action:
for drone in self.drones:
step.drone_action(drone)
if step.motion_track:
if self.step_start_time:
elapsed = game_time - self.step_start_time
progression = elapsed / (step.motion_track.total_time + .00001) # Avoid division by zero
if progression < 1:
# This is the normal case where we're in the middle of drawing a segment
loc = step.motion_track.start + step.motion_track.to_end * progression
vel = step.motion_track.velocity
else:
# Time has progressed to the point where we should already be done with this line segment.
if self.step_index + 1 < len(self.bot_cnc.list) and self.bot_cnc.list[self.step_index + 1].motion_track:
# The next step is also a line segment, so continue motion onto it
self.step_start_time = self.step_start_time + step.motion_track.total_time
self.step_index += 1
next_step = self.bot_cnc.list[self.step_index]
elapsed = game_time - self.step_start_time
progression = elapsed / (next_step.motion_track.total_time + .00001) # Avoid division by zero
loc = next_step.motion_track.start + next_step.motion_track.to_end * progression
vel = next_step.motion_track.velocity
else:
# The next step is not a line segment, so halt at the end of this one.
loc = step.motion_track.end
vel = Vec3()
else:
# This is the first time we've arrived at this line segment,
# initialize things and start at the beginning.
loc = step.motion_track.start
vel = step.motion_track.velocity
progression = 0
self.step_start_time = game_time
car_states = self.arrange_drones(loc, vel, game_time)
if progression < 1:
step_finished = False
if step_finished:
self.step_index += 1
self.step_start_time = None
return InstructionResult(self.is_finished(), car_states) | 0.792263 | 0.395455 |
from flask import Blueprint, render_template, request, send_file
from flask import current_app as app
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.fields.html5 import DateField
from wtforms.validators import InputRequired, Length
import datetime, io
from types import SimpleNamespace
from wz_core.configuration import Dates
from wz_core.pupils import Pupils
from wz_compat.config import sortingName
from wz_text.coversheet import makeSheets, pupilFields, makeOneSheet
#TODO: school year should be the latest one by default (?), but can be
# stored in the session data to allow access to other years.
_schoolyear = 2020
#TODO: the date should be saved with the year ...
_date = '2020-07-15'
class DateForm(FlaskForm):
dateofissue = DateField('Ausgabedatum',
default=datetime.date.fromisoformat(_date),
validators=[InputRequired()])
# Set up Blueprint
bp = Blueprint('bp_text_cover', # internal name of the Blueprint
__name__, # allows the current package to be found
template_folder='templates') # package-local templates
@bp.route('/', methods=['GET','POST'])
#@admin_required
def textCover():
p = Pupils(_schoolyear)
klasses = [k for k in p.classes() if k >= '01' and k < '13']
#TODO: Maybe a validity test for text report classes?
#TODO: dateofissue
return render_template('text_cover_entry.html',
schoolyear=str(_schoolyear),
dateofissue=Dates.dateConv(_date),
klasses=klasses) #['01', '01K', '02', '02K', '03', '03K']
#TODO: backlink to klasses list (entry page)?
@bp.route('/klass/<klass>', methods=['GET','POST'])
#@admin_required
def klassview(klass):
form = DateForm()
if form.validate_on_submit():
# POST
_d = form.dateofissue.data.isoformat()
pdfBytes = makeSheets (_schoolyear, _d, klass,
#TODO check list not empty ...
pids=request.form.getlist('Pupil'))
return send_file(
io.BytesIO(pdfBytes),
attachment_filename='Mantel_%s.pdf' % klass,
mimetype='application/pdf',
as_attachment=True
)
# GET
p = Pupils(_schoolyear)
pdlist = p.classPupils(klass)
klasses = [k for k in p.classes() if k >= '01' and k < '13']
return render_template('text_cover_klass.html', form=form,
schoolyear=str(_schoolyear),
klass=klass,
klasses=klasses,
pupils=[(pd['PID'], pd.name()) for pd in pdlist])
#TODO: The form has the school-year.
# There might be a checkbox/switch for print/pdf, but print might not
# be available on all hosts.
# It might be helpful to a a little javascript to implement a pupil-
# selection toggle (all/none).
@bp.route('/pupil/<klass>/<pid>', methods=['GET','POST'])
#@admin_required
def pupilview(klass, pid):
fields = pupilFields(klass)
form = DateForm()
if form.validate_on_submit():
# POST
_d = form.dateofissue.data.isoformat()
pupil = SimpleNamespace (**{f: request.form[f] for f, _ in fields})
pdfBytes = makeOneSheet(_schoolyear, _d, klass, pupil)
return send_file(
io.BytesIO(pdfBytes),
attachment_filename='Mantel_%s.pdf' % sortingName(
pupil.FIRSTNAMES, pupil.LASTNAME),
mimetype='application/pdf',
as_attachment=True
)
# GET
p = Pupils(_schoolyear)
pdlist = p.classPupils(klass)
pupils = []
for pdata in pdlist:
_pid = pdata['PID']
pupils.append((_pid, pdata.name()))
if _pid == pid:
pupil = {f: (fname, pdata[f]) for f, fname in fields}
return render_template('text_cover_pupil.html', form=form,
schoolyear=str(_schoolyear),
klass=klass,
pupil=pupil,
pupils=pupils) | zeugs/flask_app/text_cover/text_cover0.py | from flask import Blueprint, render_template, request, send_file
from flask import current_app as app
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.fields.html5 import DateField
from wtforms.validators import InputRequired, Length
import datetime, io
from types import SimpleNamespace
from wz_core.configuration import Dates
from wz_core.pupils import Pupils
from wz_compat.config import sortingName
from wz_text.coversheet import makeSheets, pupilFields, makeOneSheet
#TODO: school year should be the latest one by default (?), but can be
# stored in the session data to allow access to other years.
_schoolyear = 2020
#TODO: the date should be saved with the year ...
_date = '2020-07-15'
class DateForm(FlaskForm):
dateofissue = DateField('Ausgabedatum',
default=datetime.date.fromisoformat(_date),
validators=[InputRequired()])
# Set up Blueprint
bp = Blueprint('bp_text_cover', # internal name of the Blueprint
__name__, # allows the current package to be found
template_folder='templates') # package-local templates
@bp.route('/', methods=['GET','POST'])
#@admin_required
def textCover():
p = Pupils(_schoolyear)
klasses = [k for k in p.classes() if k >= '01' and k < '13']
#TODO: Maybe a validity test for text report classes?
#TODO: dateofissue
return render_template('text_cover_entry.html',
schoolyear=str(_schoolyear),
dateofissue=Dates.dateConv(_date),
klasses=klasses) #['01', '01K', '02', '02K', '03', '03K']
#TODO: backlink to klasses list (entry page)?
@bp.route('/klass/<klass>', methods=['GET','POST'])
#@admin_required
def klassview(klass):
form = DateForm()
if form.validate_on_submit():
# POST
_d = form.dateofissue.data.isoformat()
pdfBytes = makeSheets (_schoolyear, _d, klass,
#TODO check list not empty ...
pids=request.form.getlist('Pupil'))
return send_file(
io.BytesIO(pdfBytes),
attachment_filename='Mantel_%s.pdf' % klass,
mimetype='application/pdf',
as_attachment=True
)
# GET
p = Pupils(_schoolyear)
pdlist = p.classPupils(klass)
klasses = [k for k in p.classes() if k >= '01' and k < '13']
return render_template('text_cover_klass.html', form=form,
schoolyear=str(_schoolyear),
klass=klass,
klasses=klasses,
pupils=[(pd['PID'], pd.name()) for pd in pdlist])
#TODO: The form has the school-year.
# There might be a checkbox/switch for print/pdf, but print might not
# be available on all hosts.
# It might be helpful to a a little javascript to implement a pupil-
# selection toggle (all/none).
@bp.route('/pupil/<klass>/<pid>', methods=['GET','POST'])
#@admin_required
def pupilview(klass, pid):
fields = pupilFields(klass)
form = DateForm()
if form.validate_on_submit():
# POST
_d = form.dateofissue.data.isoformat()
pupil = SimpleNamespace (**{f: request.form[f] for f, _ in fields})
pdfBytes = makeOneSheet(_schoolyear, _d, klass, pupil)
return send_file(
io.BytesIO(pdfBytes),
attachment_filename='Mantel_%s.pdf' % sortingName(
pupil.FIRSTNAMES, pupil.LASTNAME),
mimetype='application/pdf',
as_attachment=True
)
# GET
p = Pupils(_schoolyear)
pdlist = p.classPupils(klass)
pupils = []
for pdata in pdlist:
_pid = pdata['PID']
pupils.append((_pid, pdata.name()))
if _pid == pid:
pupil = {f: (fname, pdata[f]) for f, fname in fields}
return render_template('text_cover_pupil.html', form=form,
schoolyear=str(_schoolyear),
klass=klass,
pupil=pupil,
pupils=pupils) | 0.244453 | 0.118947 |
from sfini.execution import _execution as tscr
import pytest
from unittest import mock
import sfini
import datetime
import json
from sfini.execution import history
@pytest.fixture
def session():
"""AWS session mock."""
return mock.MagicMock(autospec=sfini.AWSSession)
class TestExecution:
"""Test ``sfini.execution._execution.Execution``."""
@pytest.fixture
def eg_input(self):
"""Example execution input."""
return {"a": 42, "b": "bla", "c": {"foo": [1, 2], "bar": None}}
@pytest.fixture
def execution(self, session, eg_input):
"""An example Execution instance."""
return tscr.Execution(
"spam",
"bla-sm:arn",
eg_input,
arn="spam:arn",
session=session)
def test_init(self, execution, session, eg_input):
"""Execution initialisation."""
assert execution.name == "spam"
assert execution.state_machine_arn == "bla-sm:arn"
assert execution.execution_input == eg_input
assert execution.session is session
class TestStr:
"""Execution stringification."""
def test_no_status(self, execution):
"""Execution status is unknown."""
res = str(execution)
assert "spam" in res
def test_with_status(self, execution):
"""Execution status is known."""
execution._status = "SUCCEEDED"
res = str(execution)
assert "spam" in res
assert "SUCCEEDED" in res
class TestRepr:
"""Execution string representation."""
def test_with_arn_container_input(self, execution, session):
"""ARN provided and execution input is a container."""
execution.execution_input = {"a": 42, "b": "bla", "c": [1, 2] * 20}
exp_pref = "Execution("
exp_pos = "'spam', 'bla-sm:arn', len(execution_input)=3"
exp_kw_a = ", arn='spam:arn', session=%r)" % session
exp_kw_b = ", session=%r, arn='spam:arn')" % session
exp_a = exp_pref + exp_pos + exp_kw_a
exp_b = exp_pref + exp_pos + exp_kw_b
res = repr(execution)
assert res in (exp_a, exp_b)
def test_no_arn_container_input(self, execution, session):
"""ARN provided and execution input is a container."""
execution.execution_input = {"a": 42, "b": "bla", "c": [1, 2] * 20}
execution.arn = None
exp_pref = "Execution("
exp_pos = "'spam', 'bla-sm:arn', len(execution_input)=3"
exp_kw = ", session=%r)" % session
exp = exp_pref + exp_pos + exp_kw
res = repr(execution)
assert res == exp
def test_with_arn_scalar_input(self, execution, session):
"""ARN provided and execution input is a scalar."""
execution.execution_input = 42
exp_pref = "Execution("
exp_pos = "'spam', 'bla-sm:arn'"
exp_kw_1 = "execution_input=42"
exp_kw_2 = "arn='spam:arn'"
exp_kw_3 = "session=%r" % session
exp_kws = [
", " + exp_kw_1 + ", " + exp_kw_2 + ", " + exp_kw_3 + ")",
", " + exp_kw_1 + ", " + exp_kw_3 + ", " + exp_kw_2 + ")",
", " + exp_kw_2 + ", " + exp_kw_1 + ", " + exp_kw_3 + ")",
", " + exp_kw_2 + ", " + exp_kw_3 + ", " + exp_kw_1 + ")",
", " + exp_kw_3 + ", " + exp_kw_1 + ", " + exp_kw_2 + ")",
", " + exp_kw_3 + ", " + exp_kw_2 + ", " + exp_kw_1 + ")"]
exps = [exp_pref + exp_pos + exp_kw for exp_kw in exp_kws]
res = repr(execution)
assert res in exps
def test_from_arn(self, session):
"""Construction of Execution by querying AWS."""
# Setup environment
now = datetime.datetime.now()
input_ = {"a": 42, "b": "bla", "c": {"foo": [1, 2], "bar": None}}
output = {"foo": [1, 2], "bar": None}
resp = {
"executionArn": "spam:arn",
"stateMachineArn": "bla-sm:arn",
"name": "spam",
"status": "SUCCEEDED",
"startDate": now - datetime.timedelta(hours=1),
"stopDate": now - datetime.timedelta(minutes=50),
"input": json.dumps(input_),
"output": json.dumps(output)}
session.sfn.describe_execution.return_value = resp
# Build input
arn = "spam:arn"
# Run function
res = tscr.Execution.from_arn(arn, session=session)
# Check result
assert isinstance(res, tscr.Execution)
assert res.name == "spam"
assert res.state_machine_arn == "bla-sm:arn"
assert res.execution_input == input_
assert res.arn == "spam:arn"
assert res.session is session
assert res._status == "SUCCEEDED"
assert res._start_date == now - datetime.timedelta(hours=1)
assert res._stop_date == now - datetime.timedelta(minutes=50)
assert res._output == {"foo": [1, 2], "bar": None}
session.sfn.describe_execution.assert_called_once_with(
executionArn="spam:arn")
def test_from_list_item(self, session):
"""Construction of Execution after querying AWS."""
now = datetime.datetime.now()
item = {
"executionArn": "spam:arn",
"stateMachineArn": "bla-sm:arn",
"name": "spam",
"status": "SUCCEEDED",
"startDate": now - datetime.timedelta(hours=1),
"stopDate": now - datetime.timedelta(minutes=50)}
# Run function
res = tscr.Execution.from_list_item(item, session=session)
# Check result
assert isinstance(res, tscr.Execution)
assert res.name == "spam"
assert res.state_machine_arn == "bla-sm:arn"
assert res.execution_input is res._not_provided
assert res.arn == "spam:arn"
assert res.session is session
assert res._status == "SUCCEEDED"
assert res._start_date == now - datetime.timedelta(hours=1)
assert res._stop_date == now - datetime.timedelta(minutes=50)
class TestStatus:
"""Execution status provided by AWS."""
@pytest.mark.parametrize("status", [None, "RUNNING"])
def test_unknown(self, execution, status):
"""Execution status is not currently known."""
def _update():
execution._status = "TIMED_OUT"
execution._update = mock.Mock(side_effect=_update)
execution._status = status
res = execution.status
assert res == "TIMED_OUT"
execution._update.assert_called_once_with()
@pytest.mark.parametrize(
"status",
["SUCCEEDED", "FAILED", "ABORTED", "TIMED_OUT"])
def test_known(self, execution, status):
"""Execution status is known."""
execution._update = mock.Mock()
execution._status = status
res = execution.status
assert res == status
execution._update.assert_not_called()
class TestStartTime:
"""Execution start-time provided by AWS."""
def test_unknown(self, execution):
"""Execution start-time is not already known."""
def _update():
execution._start_date = now - datetime.timedelta(minutes=10)
now = datetime.datetime.now()
execution._update = mock.Mock(side_effect=_update)
execution._start_date = None
res = execution.start_date
assert res == now - datetime.timedelta(minutes=10)
execution._update.assert_called_once_with()
def test_known(self, execution):
"""Execution start-time is known."""
now = datetime.datetime.now()
execution._update = mock.Mock()
execution._start_date = now - datetime.timedelta(minutes=10)
res = execution.start_date
assert res == now - datetime.timedelta(minutes=10)
execution._update.assert_not_called()
class TestStopTime:
"""Execution stop-time provided by AWS."""
def test_unknown(self, execution):
"""Execution stop-time is not already known."""
def _update():
execution._stop_date = now - datetime.timedelta(minutes=10)
now = datetime.datetime.now()
execution._update = mock.Mock(side_effect=_update)
execution._raise_unfinished = mock.Mock()
execution._stop_date = None
res = execution.stop_date
assert res == now - datetime.timedelta(minutes=10)
execution._update.assert_called_once_with()
execution._raise_unfinished.assert_called_once_with()
def test_known(self, execution):
"""Execution stop-time is known."""
now = datetime.datetime.now()
execution._update = mock.Mock()
execution._raise_unfinished = mock.Mock()
execution._stop_date = now - datetime.timedelta(minutes=10)
res = execution.stop_date
assert res == now - datetime.timedelta(minutes=10)
execution._update.assert_not_called()
execution._raise_unfinished.assert_not_called()
class TestOutput:
"""Execution output provided by AWS."""
def test_unknown(self, execution):
"""Execution output is not already known."""
def _update():
execution._output = {"foo": [1, 2], "bar": None}
execution._update = mock.Mock(side_effect=_update)
execution._raise_unfinished = mock.Mock()
execution._raise_on_failure = mock.Mock()
execution._output = tscr._default
res = execution.output
assert res == {"foo": [1, 2], "bar": None}
execution._update.assert_called_once_with()
execution._raise_unfinished.assert_called_once_with()
execution._raise_on_failure.assert_called_once_with()
def test_known(self, execution):
"""Execution output is known."""
execution._update = mock.Mock()
execution._raise_unfinished = mock.Mock()
execution._raise_on_failure = mock.Mock()
execution._output = {"foo": [1, 2], "bar": None}
res = execution.output
assert res == {"foo": [1, 2], "bar": None}
execution._update.assert_not_called()
execution._raise_unfinished.assert_not_called()
execution._raise_on_failure.assert_not_called()
class TestUpdate:
"""Execution details updating by querying AWS."""
@pytest.mark.parametrize(
("status", "input_"),
[
(None, tscr._default),
("RUNNING", tscr._default),
(None, {"a": 42, "c": {"foo": [1, 2], "bar": None}}),
("SUCCEEDED", tscr._default)])
def test_query(self, execution, session, status, input_):
"""A query of AWS is performed."""
# Setup environment
now = datetime.datetime.now()
rinput_ = {"a": 42, "c": {"foo": [1, 2], "bar": None}}
output = {"foo": [1, 2], "bar": None}
resp = {
"executionArn": "spam:arn",
"stateMachineArn": "bla-sm:arn",
"name": "spam",
"status": "SUCCEEDED",
"startDate": now - datetime.timedelta(hours=1),
"stopDate": now - datetime.timedelta(minutes=50),
"input": json.dumps(rinput_),
"output": json.dumps(output)}
session.sfn.describe_execution.return_value = resp
execution._raise_no_arn = mock.Mock()
execution._status = status
execution.execution_input = input_
# Run function
execution._update()
# Check result
assert execution._status == "SUCCEEDED"
assert execution._start_date == now - datetime.timedelta(hours=1)
assert execution._stop_date == now - datetime.timedelta(minutes=50)
assert execution._output == {"foo": [1, 2], "bar": None}
session.sfn.describe_execution.assert_called_once_with(
executionArn="spam:arn")
execution._raise_no_arn.assert_called_once_with()
def test_finished(self, execution, session):
"""No query of AWS is performed."""
execution._raise_no_arn = mock.Mock()
execution._status = "SUCCEEDED"
execution._update()
session.sfn.describe_execution.assert_not_called()
execution._raise_no_arn.assert_not_called()
class TestRaiseOnFailure:
"""Raising on execution failure."""
@pytest.mark.parametrize("status", ["FAILED", "ABORTED", "TIMED_OUT"])
def test_failure(self, execution, status):
"""Execution has failed."""
execution._status = status
with pytest.raises(RuntimeError) as e:
execution._raise_on_failure()
assert "spam" in str(e.value)
assert status in str(e.value)
@pytest.mark.parametrize("status", ["RUNNING", "SUCCEEDED"])
def test_not_failure(self, execution, status):
"""Execution has not failed."""
execution._status = status
execution._raise_on_failure()
class TestRaiseUnfinished:
"""Raising when execution is unfinished."""
def test_unfinished(self, execution):
"""Execution hasn't finished."""
execution._status = "RUNNING"
with pytest.raises(RuntimeError) as e:
execution._raise_unfinished()
assert "spam" in str(e.value)
assert "finish" in str(e.value)
@pytest.mark.parametrize(
"status",
["FAILED", "ABORTED", "TIMED_OUT", "SUCCEEDED"])
def test_finished(self, execution, status):
"""Execution has finished."""
execution._status = status
execution._raise_unfinished()
class TestRaiseNoArn:
"""Raising when no ARN is provided to execution."""
def test_no_arn(self, execution):
"""Execution has no associated ARN."""
execution.arn = None
with pytest.raises(RuntimeError) as e:
execution._raise_no_arn()
assert "ARN" in str(e.value)
assert "spam" in str(e.value)
def test_finished(self, execution):
"""Execution has finished."""
execution._raise_no_arn()
def test_start(self, execution, session, eg_input):
"""Execution starting."""
# Setup environment
now = datetime.datetime.now()
resp = {"executionArn": "spam:arn", "startDate": now}
session.sfn.start_execution.return_value = resp
execution.arn = None
# Run function
execution.start()
# Check result
assert execution.arn == "spam:arn"
assert execution._start_date == now
assert execution._status == "RUNNING"
session.sfn.start_execution.assert_called_once_with(
stateMachineArn="bla-sm:arn",
name="spam",
input=mock.ANY)
res_se_call = session.sfn.start_execution.call_args_list[0]
res_input_str = res_se_call[1]["input"]
assert json.loads(res_input_str) == eg_input
def test_start_default_input(self, execution, session):
"""Execution starting."""
# Setup environment
now = datetime.datetime.now()
resp = {"executionArn": "spam:arn", "startDate": now}
session.sfn.start_execution.return_value = resp
execution.arn = None
execution.execution_input = tscr._default
# Run function
execution.start()
# Check result
assert execution.arn == "spam:arn"
assert execution._start_date == now
assert execution._status == "RUNNING"
session.sfn.start_execution.assert_called_once_with(
stateMachineArn="bla-sm:arn",
name="spam",
input="{}")
assert execution.execution_input == {}
class TestWait:
"""Waiting on execution to finish."""
@pytest.mark.timeout(1.0)
def test_running(self, execution):
"""Execution is running."""
# Setup environment
_shared = {"j": 0}
def _update():
if _shared["j"] > 3:
execution._status = "FAILED"
return
execution._status = "RUNNING"
_shared["j"] += 1
execution._update = mock.Mock(side_effect=_update)
execution._raise_on_failure = mock.Mock()
execution._wait_sleep_time = 0.01
# Build expectation
exp_ud_calls = [mock.call() for _ in range(5)]
# Run function
execution.wait()
# Check result
assert execution._update.call_args_list == exp_ud_calls
execution._raise_on_failure.assert_called_once_with()
@pytest.mark.timeout(1.0)
def test_no_raise_on_failure(self, execution):
"""Execution is running, then doesn't raise on failure."""
# Setup environment
_shared = {"j": 0}
def _update():
if _shared["j"] > 3:
execution._status = "FAILED"
return
execution._status = "RUNNING"
_shared["j"] += 1
execution._update = mock.Mock(side_effect=_update)
execution._raise_on_failure = mock.Mock()
execution._wait_sleep_time = 0.01
# Build expectation
exp_ud_calls = [mock.call() for _ in range(5)]
# Run function
execution.wait(raise_on_failure=False)
# Check result
assert execution._update.call_args_list == exp_ud_calls
execution._raise_on_failure.assert_not_called()
@pytest.mark.timeout(1.0)
def test_timeout(self, execution):
"""Execution is running, and doesn't finish before time-out."""
# Setup environment
_shared = {"j": 0}
def _update():
if _shared["j"] > 3:
execution._status = "FAILED"
return
execution._status = "RUNNING"
_shared["j"] += 1
execution._update = mock.Mock(side_effect=_update)
execution._raise_on_failure = mock.Mock()
execution._wait_sleep_time = 0.01
# Build expectation
exp_ud_calls = [mock.call() for _ in range(3)]
# Run function
with pytest.raises(RuntimeError) as e:
execution.wait(timeout=0.02)
assert "imeout" in str(e.value) or "ime-out" in str(e.value)
assert "spam" in str(e.value)
# Check result
assert execution._update.call_args_list == exp_ud_calls
execution._raise_on_failure.assert_not_called()
@pytest.mark.timeout(1.0)
def test_finished(self, execution):
"""Execution is finished, then doesn't raise on failure."""
# Setup environment
execution._update = mock.Mock()
execution._raise_on_failure = mock.Mock()
execution._wait_sleep_time = 0.01
execution._status = "SUCCEEDED"
# Run function
execution.wait(raise_on_failure=False)
# Check result
execution._update.assert_called_once_with()
execution._raise_on_failure.assert_not_called()
@pytest.mark.parametrize(
("kwargs", "exp_kwargs"),
[
({}, {}),
({"error_code": "SpamError"}, {"error": "SpamError"}),
({"details": "A spam occured"}, {"cause": "A spam occured"}),
(
{"error_code": "SpamError", "details": "A spam occured"},
{"error": "SpamError", "cause": "A spam occured"})])
def test_stop(self, execution, session, kwargs, exp_kwargs):
"""Execution stopping."""
# Setup environment
now = datetime.datetime.now()
resp = {"stopDate": now}
session.sfn.stop_execution.return_value = resp
execution._raise_no_arn = mock.Mock()
# Run function
execution.stop(**kwargs)
# Check result
assert execution._stop_date == now
session.sfn.stop_execution.assert_called_once_with(
executionArn="spam:arn",
**exp_kwargs)
execution._raise_no_arn.assert_called_once_with()
def test_get_history(self, execution, session):
"""Execution history querying."""
# Setup environment
resp = {"events": [{"id": j} for j in range(4)]}
session.sfn.get_execution_history.return_value = resp
events = [mock.Mock(spec=history.Event) for _ in range(4)]
ph_mock = mock.Mock(return_value=events)
execution._raise_no_arn = mock.Mock()
# Run function
with mock.patch.object(history, "parse_history", ph_mock):
res = execution.get_history()
# Check result
assert res == events
ph_mock.assert_called_once_with([{"id": j} for j in range(4)])
session.sfn.get_execution_history.assert_called_once_with(
executionArn="spam:arn")
execution._raise_no_arn.assert_called_once_with()
@pytest.mark.parametrize(
("output", "exp_suff"),
[
(
{"foo": [1, 2], "bar": None},
"\nOutput: {\"foo\": [1, 2], \"bar\": null}"),
(tscr._default, "")])
def test_format_history(self, execution, output, exp_suff):
"""Execution history formatting."""
# Setup environment
class Event:
def __init__(self, name, details_str):
self.name = name
self.details_str = details_str
def __str__(self):
return self.name
events = [
Event("ev0", "Event details 0"),
Event("ev1", ""),
Event("ev2", "Event details 2"),
Event("ev3", "Event details 3"),
Event("ev4", "")]
execution.get_history = mock.Mock(return_value=events)
execution._update = mock.Mock()
execution._output = output
# Build expectation
exp = (
"ev0:\n"
" Event details 0\n"
"ev1\n"
"ev2:\n"
" Event details 2\n"
"ev3:\n"
" Event details 3\n"
"ev4")
exp += exp_suff
# Test function
res = execution.format_history()
# Check result
assert res == exp
execution.get_history.assert_called_once_with()
execution._update.assert_called_once_with() | tests/test_execution.py |
from sfini.execution import _execution as tscr
import pytest
from unittest import mock
import sfini
import datetime
import json
from sfini.execution import history
@pytest.fixture
def session():
"""AWS session mock."""
return mock.MagicMock(autospec=sfini.AWSSession)
class TestExecution:
"""Test ``sfini.execution._execution.Execution``."""
@pytest.fixture
def eg_input(self):
"""Example execution input."""
return {"a": 42, "b": "bla", "c": {"foo": [1, 2], "bar": None}}
@pytest.fixture
def execution(self, session, eg_input):
"""An example Execution instance."""
return tscr.Execution(
"spam",
"bla-sm:arn",
eg_input,
arn="spam:arn",
session=session)
def test_init(self, execution, session, eg_input):
"""Execution initialisation."""
assert execution.name == "spam"
assert execution.state_machine_arn == "bla-sm:arn"
assert execution.execution_input == eg_input
assert execution.session is session
class TestStr:
"""Execution stringification."""
def test_no_status(self, execution):
"""Execution status is unknown."""
res = str(execution)
assert "spam" in res
def test_with_status(self, execution):
"""Execution status is known."""
execution._status = "SUCCEEDED"
res = str(execution)
assert "spam" in res
assert "SUCCEEDED" in res
class TestRepr:
"""Execution string representation."""
def test_with_arn_container_input(self, execution, session):
"""ARN provided and execution input is a container."""
execution.execution_input = {"a": 42, "b": "bla", "c": [1, 2] * 20}
exp_pref = "Execution("
exp_pos = "'spam', 'bla-sm:arn', len(execution_input)=3"
exp_kw_a = ", arn='spam:arn', session=%r)" % session
exp_kw_b = ", session=%r, arn='spam:arn')" % session
exp_a = exp_pref + exp_pos + exp_kw_a
exp_b = exp_pref + exp_pos + exp_kw_b
res = repr(execution)
assert res in (exp_a, exp_b)
def test_no_arn_container_input(self, execution, session):
"""ARN provided and execution input is a container."""
execution.execution_input = {"a": 42, "b": "bla", "c": [1, 2] * 20}
execution.arn = None
exp_pref = "Execution("
exp_pos = "'spam', 'bla-sm:arn', len(execution_input)=3"
exp_kw = ", session=%r)" % session
exp = exp_pref + exp_pos + exp_kw
res = repr(execution)
assert res == exp
def test_with_arn_scalar_input(self, execution, session):
"""ARN provided and execution input is a scalar."""
execution.execution_input = 42
exp_pref = "Execution("
exp_pos = "'spam', 'bla-sm:arn'"
exp_kw_1 = "execution_input=42"
exp_kw_2 = "arn='spam:arn'"
exp_kw_3 = "session=%r" % session
exp_kws = [
", " + exp_kw_1 + ", " + exp_kw_2 + ", " + exp_kw_3 + ")",
", " + exp_kw_1 + ", " + exp_kw_3 + ", " + exp_kw_2 + ")",
", " + exp_kw_2 + ", " + exp_kw_1 + ", " + exp_kw_3 + ")",
", " + exp_kw_2 + ", " + exp_kw_3 + ", " + exp_kw_1 + ")",
", " + exp_kw_3 + ", " + exp_kw_1 + ", " + exp_kw_2 + ")",
", " + exp_kw_3 + ", " + exp_kw_2 + ", " + exp_kw_1 + ")"]
exps = [exp_pref + exp_pos + exp_kw for exp_kw in exp_kws]
res = repr(execution)
assert res in exps
def test_from_arn(self, session):
"""Construction of Execution by querying AWS."""
# Setup environment
now = datetime.datetime.now()
input_ = {"a": 42, "b": "bla", "c": {"foo": [1, 2], "bar": None}}
output = {"foo": [1, 2], "bar": None}
resp = {
"executionArn": "spam:arn",
"stateMachineArn": "bla-sm:arn",
"name": "spam",
"status": "SUCCEEDED",
"startDate": now - datetime.timedelta(hours=1),
"stopDate": now - datetime.timedelta(minutes=50),
"input": json.dumps(input_),
"output": json.dumps(output)}
session.sfn.describe_execution.return_value = resp
# Build input
arn = "spam:arn"
# Run function
res = tscr.Execution.from_arn(arn, session=session)
# Check result
assert isinstance(res, tscr.Execution)
assert res.name == "spam"
assert res.state_machine_arn == "bla-sm:arn"
assert res.execution_input == input_
assert res.arn == "spam:arn"
assert res.session is session
assert res._status == "SUCCEEDED"
assert res._start_date == now - datetime.timedelta(hours=1)
assert res._stop_date == now - datetime.timedelta(minutes=50)
assert res._output == {"foo": [1, 2], "bar": None}
session.sfn.describe_execution.assert_called_once_with(
executionArn="spam:arn")
def test_from_list_item(self, session):
"""Construction of Execution after querying AWS."""
now = datetime.datetime.now()
item = {
"executionArn": "spam:arn",
"stateMachineArn": "bla-sm:arn",
"name": "spam",
"status": "SUCCEEDED",
"startDate": now - datetime.timedelta(hours=1),
"stopDate": now - datetime.timedelta(minutes=50)}
# Run function
res = tscr.Execution.from_list_item(item, session=session)
# Check result
assert isinstance(res, tscr.Execution)
assert res.name == "spam"
assert res.state_machine_arn == "bla-sm:arn"
assert res.execution_input is res._not_provided
assert res.arn == "spam:arn"
assert res.session is session
assert res._status == "SUCCEEDED"
assert res._start_date == now - datetime.timedelta(hours=1)
assert res._stop_date == now - datetime.timedelta(minutes=50)
class TestStatus:
"""Execution status provided by AWS."""
@pytest.mark.parametrize("status", [None, "RUNNING"])
def test_unknown(self, execution, status):
"""Execution status is not currently known."""
def _update():
execution._status = "TIMED_OUT"
execution._update = mock.Mock(side_effect=_update)
execution._status = status
res = execution.status
assert res == "TIMED_OUT"
execution._update.assert_called_once_with()
@pytest.mark.parametrize(
"status",
["SUCCEEDED", "FAILED", "ABORTED", "TIMED_OUT"])
def test_known(self, execution, status):
"""Execution status is known."""
execution._update = mock.Mock()
execution._status = status
res = execution.status
assert res == status
execution._update.assert_not_called()
class TestStartTime:
"""Execution start-time provided by AWS."""
def test_unknown(self, execution):
"""Execution start-time is not already known."""
def _update():
execution._start_date = now - datetime.timedelta(minutes=10)
now = datetime.datetime.now()
execution._update = mock.Mock(side_effect=_update)
execution._start_date = None
res = execution.start_date
assert res == now - datetime.timedelta(minutes=10)
execution._update.assert_called_once_with()
def test_known(self, execution):
"""Execution start-time is known."""
now = datetime.datetime.now()
execution._update = mock.Mock()
execution._start_date = now - datetime.timedelta(minutes=10)
res = execution.start_date
assert res == now - datetime.timedelta(minutes=10)
execution._update.assert_not_called()
class TestStopTime:
"""Execution stop-time provided by AWS."""
def test_unknown(self, execution):
"""Execution stop-time is not already known."""
def _update():
execution._stop_date = now - datetime.timedelta(minutes=10)
now = datetime.datetime.now()
execution._update = mock.Mock(side_effect=_update)
execution._raise_unfinished = mock.Mock()
execution._stop_date = None
res = execution.stop_date
assert res == now - datetime.timedelta(minutes=10)
execution._update.assert_called_once_with()
execution._raise_unfinished.assert_called_once_with()
def test_known(self, execution):
"""Execution stop-time is known."""
now = datetime.datetime.now()
execution._update = mock.Mock()
execution._raise_unfinished = mock.Mock()
execution._stop_date = now - datetime.timedelta(minutes=10)
res = execution.stop_date
assert res == now - datetime.timedelta(minutes=10)
execution._update.assert_not_called()
execution._raise_unfinished.assert_not_called()
class TestOutput:
"""Execution output provided by AWS."""
def test_unknown(self, execution):
"""Execution output is not already known."""
def _update():
execution._output = {"foo": [1, 2], "bar": None}
execution._update = mock.Mock(side_effect=_update)
execution._raise_unfinished = mock.Mock()
execution._raise_on_failure = mock.Mock()
execution._output = tscr._default
res = execution.output
assert res == {"foo": [1, 2], "bar": None}
execution._update.assert_called_once_with()
execution._raise_unfinished.assert_called_once_with()
execution._raise_on_failure.assert_called_once_with()
def test_known(self, execution):
"""Execution output is known."""
execution._update = mock.Mock()
execution._raise_unfinished = mock.Mock()
execution._raise_on_failure = mock.Mock()
execution._output = {"foo": [1, 2], "bar": None}
res = execution.output
assert res == {"foo": [1, 2], "bar": None}
execution._update.assert_not_called()
execution._raise_unfinished.assert_not_called()
execution._raise_on_failure.assert_not_called()
class TestUpdate:
"""Execution details updating by querying AWS."""
@pytest.mark.parametrize(
("status", "input_"),
[
(None, tscr._default),
("RUNNING", tscr._default),
(None, {"a": 42, "c": {"foo": [1, 2], "bar": None}}),
("SUCCEEDED", tscr._default)])
def test_query(self, execution, session, status, input_):
"""A query of AWS is performed."""
# Setup environment
now = datetime.datetime.now()
rinput_ = {"a": 42, "c": {"foo": [1, 2], "bar": None}}
output = {"foo": [1, 2], "bar": None}
resp = {
"executionArn": "spam:arn",
"stateMachineArn": "bla-sm:arn",
"name": "spam",
"status": "SUCCEEDED",
"startDate": now - datetime.timedelta(hours=1),
"stopDate": now - datetime.timedelta(minutes=50),
"input": json.dumps(rinput_),
"output": json.dumps(output)}
session.sfn.describe_execution.return_value = resp
execution._raise_no_arn = mock.Mock()
execution._status = status
execution.execution_input = input_
# Run function
execution._update()
# Check result
assert execution._status == "SUCCEEDED"
assert execution._start_date == now - datetime.timedelta(hours=1)
assert execution._stop_date == now - datetime.timedelta(minutes=50)
assert execution._output == {"foo": [1, 2], "bar": None}
session.sfn.describe_execution.assert_called_once_with(
executionArn="spam:arn")
execution._raise_no_arn.assert_called_once_with()
def test_finished(self, execution, session):
"""No query of AWS is performed."""
execution._raise_no_arn = mock.Mock()
execution._status = "SUCCEEDED"
execution._update()
session.sfn.describe_execution.assert_not_called()
execution._raise_no_arn.assert_not_called()
class TestRaiseOnFailure:
"""Raising on execution failure."""
@pytest.mark.parametrize("status", ["FAILED", "ABORTED", "TIMED_OUT"])
def test_failure(self, execution, status):
"""Execution has failed."""
execution._status = status
with pytest.raises(RuntimeError) as e:
execution._raise_on_failure()
assert "spam" in str(e.value)
assert status in str(e.value)
@pytest.mark.parametrize("status", ["RUNNING", "SUCCEEDED"])
def test_not_failure(self, execution, status):
"""Execution has not failed."""
execution._status = status
execution._raise_on_failure()
class TestRaiseUnfinished:
"""Raising when execution is unfinished."""
def test_unfinished(self, execution):
"""Execution hasn't finished."""
execution._status = "RUNNING"
with pytest.raises(RuntimeError) as e:
execution._raise_unfinished()
assert "spam" in str(e.value)
assert "finish" in str(e.value)
@pytest.mark.parametrize(
"status",
["FAILED", "ABORTED", "TIMED_OUT", "SUCCEEDED"])
def test_finished(self, execution, status):
"""Execution has finished."""
execution._status = status
execution._raise_unfinished()
class TestRaiseNoArn:
"""Raising when no ARN is provided to execution."""
def test_no_arn(self, execution):
"""Execution has no associated ARN."""
execution.arn = None
with pytest.raises(RuntimeError) as e:
execution._raise_no_arn()
assert "ARN" in str(e.value)
assert "spam" in str(e.value)
def test_finished(self, execution):
"""Execution has finished."""
execution._raise_no_arn()
def test_start(self, execution, session, eg_input):
"""Execution starting."""
# Setup environment
now = datetime.datetime.now()
resp = {"executionArn": "spam:arn", "startDate": now}
session.sfn.start_execution.return_value = resp
execution.arn = None
# Run function
execution.start()
# Check result
assert execution.arn == "spam:arn"
assert execution._start_date == now
assert execution._status == "RUNNING"
session.sfn.start_execution.assert_called_once_with(
stateMachineArn="bla-sm:arn",
name="spam",
input=mock.ANY)
res_se_call = session.sfn.start_execution.call_args_list[0]
res_input_str = res_se_call[1]["input"]
assert json.loads(res_input_str) == eg_input
def test_start_default_input(self, execution, session):
"""Execution starting."""
# Setup environment
now = datetime.datetime.now()
resp = {"executionArn": "spam:arn", "startDate": now}
session.sfn.start_execution.return_value = resp
execution.arn = None
execution.execution_input = tscr._default
# Run function
execution.start()
# Check result
assert execution.arn == "spam:arn"
assert execution._start_date == now
assert execution._status == "RUNNING"
session.sfn.start_execution.assert_called_once_with(
stateMachineArn="bla-sm:arn",
name="spam",
input="{}")
assert execution.execution_input == {}
class TestWait:
"""Waiting on execution to finish."""
@pytest.mark.timeout(1.0)
def test_running(self, execution):
"""Execution is running."""
# Setup environment
_shared = {"j": 0}
def _update():
if _shared["j"] > 3:
execution._status = "FAILED"
return
execution._status = "RUNNING"
_shared["j"] += 1
execution._update = mock.Mock(side_effect=_update)
execution._raise_on_failure = mock.Mock()
execution._wait_sleep_time = 0.01
# Build expectation
exp_ud_calls = [mock.call() for _ in range(5)]
# Run function
execution.wait()
# Check result
assert execution._update.call_args_list == exp_ud_calls
execution._raise_on_failure.assert_called_once_with()
@pytest.mark.timeout(1.0)
def test_no_raise_on_failure(self, execution):
"""Execution is running, then doesn't raise on failure."""
# Setup environment
_shared = {"j": 0}
def _update():
if _shared["j"] > 3:
execution._status = "FAILED"
return
execution._status = "RUNNING"
_shared["j"] += 1
execution._update = mock.Mock(side_effect=_update)
execution._raise_on_failure = mock.Mock()
execution._wait_sleep_time = 0.01
# Build expectation
exp_ud_calls = [mock.call() for _ in range(5)]
# Run function
execution.wait(raise_on_failure=False)
# Check result
assert execution._update.call_args_list == exp_ud_calls
execution._raise_on_failure.assert_not_called()
@pytest.mark.timeout(1.0)
def test_timeout(self, execution):
"""Execution is running, and doesn't finish before time-out."""
# Setup environment
_shared = {"j": 0}
def _update():
if _shared["j"] > 3:
execution._status = "FAILED"
return
execution._status = "RUNNING"
_shared["j"] += 1
execution._update = mock.Mock(side_effect=_update)
execution._raise_on_failure = mock.Mock()
execution._wait_sleep_time = 0.01
# Build expectation
exp_ud_calls = [mock.call() for _ in range(3)]
# Run function
with pytest.raises(RuntimeError) as e:
execution.wait(timeout=0.02)
assert "imeout" in str(e.value) or "ime-out" in str(e.value)
assert "spam" in str(e.value)
# Check result
assert execution._update.call_args_list == exp_ud_calls
execution._raise_on_failure.assert_not_called()
@pytest.mark.timeout(1.0)
def test_finished(self, execution):
"""Execution is finished, then doesn't raise on failure."""
# Setup environment
execution._update = mock.Mock()
execution._raise_on_failure = mock.Mock()
execution._wait_sleep_time = 0.01
execution._status = "SUCCEEDED"
# Run function
execution.wait(raise_on_failure=False)
# Check result
execution._update.assert_called_once_with()
execution._raise_on_failure.assert_not_called()
@pytest.mark.parametrize(
("kwargs", "exp_kwargs"),
[
({}, {}),
({"error_code": "SpamError"}, {"error": "SpamError"}),
({"details": "A spam occured"}, {"cause": "A spam occured"}),
(
{"error_code": "SpamError", "details": "A spam occured"},
{"error": "SpamError", "cause": "A spam occured"})])
def test_stop(self, execution, session, kwargs, exp_kwargs):
"""Execution stopping."""
# Setup environment
now = datetime.datetime.now()
resp = {"stopDate": now}
session.sfn.stop_execution.return_value = resp
execution._raise_no_arn = mock.Mock()
# Run function
execution.stop(**kwargs)
# Check result
assert execution._stop_date == now
session.sfn.stop_execution.assert_called_once_with(
executionArn="spam:arn",
**exp_kwargs)
execution._raise_no_arn.assert_called_once_with()
def test_get_history(self, execution, session):
"""Execution history querying."""
# Setup environment
resp = {"events": [{"id": j} for j in range(4)]}
session.sfn.get_execution_history.return_value = resp
events = [mock.Mock(spec=history.Event) for _ in range(4)]
ph_mock = mock.Mock(return_value=events)
execution._raise_no_arn = mock.Mock()
# Run function
with mock.patch.object(history, "parse_history", ph_mock):
res = execution.get_history()
# Check result
assert res == events
ph_mock.assert_called_once_with([{"id": j} for j in range(4)])
session.sfn.get_execution_history.assert_called_once_with(
executionArn="spam:arn")
execution._raise_no_arn.assert_called_once_with()
@pytest.mark.parametrize(
("output", "exp_suff"),
[
(
{"foo": [1, 2], "bar": None},
"\nOutput: {\"foo\": [1, 2], \"bar\": null}"),
(tscr._default, "")])
def test_format_history(self, execution, output, exp_suff):
"""Execution history formatting."""
# Setup environment
class Event:
def __init__(self, name, details_str):
self.name = name
self.details_str = details_str
def __str__(self):
return self.name
events = [
Event("ev0", "Event details 0"),
Event("ev1", ""),
Event("ev2", "Event details 2"),
Event("ev3", "Event details 3"),
Event("ev4", "")]
execution.get_history = mock.Mock(return_value=events)
execution._update = mock.Mock()
execution._output = output
# Build expectation
exp = (
"ev0:\n"
" Event details 0\n"
"ev1\n"
"ev2:\n"
" Event details 2\n"
"ev3:\n"
" Event details 3\n"
"ev4")
exp += exp_suff
# Test function
res = execution.format_history()
# Check result
assert res == exp
execution.get_history.assert_called_once_with()
execution._update.assert_called_once_with() | 0.835484 | 0.589007 |
import types, sys, re
try:
import logging
except:
import DummyLogger as logging
import simpleTAL
from plasTeX.Renderers.PageTemplate import simpletal
__version__ = simpletal.__version__
DEFAULTVALUE = "This represents a Default value."
class PathNotFoundException (Exception):
pass
class ContextContentException (Exception):
""" This is raised when invalid content has been placed into the Context object.
For example using non-ascii characters instead of Unicode strings.
"""
pass
PATHNOTFOUNDEXCEPTION = PathNotFoundException()
class ContextVariable:
def __init__ (self, value = None):
self.ourValue = value
def value (self, currentPath=None):
if (callable (self.ourValue)):
return apply (self.ourValue, ())
return self.ourValue
def rawValue (self):
return self.ourValue
def __str__ (self):
return repr (self.ourValue)
class RepeatVariable (ContextVariable):
""" To be written"""
def __init__ (self, sequence):
ContextVariable.__init__ (self, 1)
self.sequence = sequence
self.position = 0
self.map = None
def value (self, currentPath=None):
if (self.map is None):
self.createMap()
return self.map
def rawValue (self):
return self.value()
def getCurrentValue (self):
return self.sequence [self.position]
def increment (self):
self.position += 1
if (self.position == len (self.sequence)):
raise IndexError ("Repeat Finished")
def createMap (self):
self.map = {}
self.map ['index'] = self.getIndex
self.map ['number'] = self.getNumber
self.map ['even'] = self.getEven
self.map ['odd'] = self.getOdd
self.map ['start'] = self.getStart
self.map ['end'] = self.getEnd
# TODO: first and last need to be implemented.
self.map ['length'] = len (self.sequence)
self.map ['letter'] = self.getLowerLetter
self.map ['Letter'] = self.getUpperLetter
self.map ['roman'] = self.getLowerRoman
self.map ['Roman'] = self.getUpperRoman
# Repeat implementation goes here
def getIndex (self):
return self.position
def getNumber (self):
return self.position + 1
def getEven (self):
if ((self.position % 2) != 0):
return 0
return 1
def getOdd (self):
if ((self.position % 2) == 0):
return 0
return 1
def getStart (self):
if (self.position == 0):
return 1
return 0
def getEnd (self):
if (self.position == len (self.sequence) - 1):
return 1
return 0
def getLowerLetter (self):
result = ""
nextCol = self.position
if (nextCol == 0):
return 'a'
while (nextCol > 0):
nextCol, thisCol = divmod (nextCol, 26)
result = chr (ord ('a') + thisCol) + result
return result
def getUpperLetter (self):
return self.getLowerLetter().upper()
def getLowerRoman (self):
romanNumeralList = (('m', 1000)
,('cm', 900)
,('d', 500)
,('cd', 400)
,('c', 100)
,('xc', 90)
,('l', 50)
,('xl', 40)
,('x', 10)
,('ix', 9)
,('v', 5)
,('iv', 4)
,('i', 1)
)
if (self.position > 3999):
# Roman numbers only supported up to 4000
return ' '
num = self.position + 1
result = ""
for roman, integer in romanNumeralList:
while (num >= integer):
result += roman
num -= integer
return result
def getUpperRoman (self):
return self.getLowerRoman().upper()
class IteratorRepeatVariable (RepeatVariable):
def __init__ (self, sequence):
RepeatVariable.__init__ (self, sequence)
self.curValue = None
self.iterStatus = 0
def getCurrentValue (self):
if (self.iterStatus == 0):
self.iterStatus = 1
try:
self.curValue = self.sequence.next()
except StopIteration, e:
self.iterStatus = 2
raise IndexError ("Repeat Finished")
return self.curValue
def increment (self):
# Need this for the repeat variable functions.
self.position += 1
try:
self.curValue = self.sequence.next()
except StopIteration, e:
self.iterStatus = 2
raise IndexError ("Repeat Finished")
def createMap (self):
self.map = {}
self.map ['index'] = self.getIndex
self.map ['number'] = self.getNumber
self.map ['even'] = self.getEven
self.map ['odd'] = self.getOdd
self.map ['start'] = self.getStart
self.map ['end'] = self.getEnd
# TODO: first and last need to be implemented.
self.map ['length'] = sys.maxint
self.map ['letter'] = self.getLowerLetter
self.map ['Letter'] = self.getUpperLetter
self.map ['roman'] = self.getLowerRoman
self.map ['Roman'] = self.getUpperRoman
def getEnd (self):
if (self.iterStatus == 2):
return 1
return 0
class PathFunctionVariable (ContextVariable):
def __init__ (self, func):
ContextVariable.__init__ (self, value = func)
self.func = func
def value (self, currentPath=None):
if (currentPath is not None):
index, paths = currentPath
result = ContextVariable (apply (self.func, ('/'.join (paths[index:]),)))
# Fast track the result
raise result
class CachedFuncResult (ContextVariable):
def value (self, currentPath=None):
try:
return self.cachedValue
except:
self.cachedValue = ContextVariable.value (self)
return self.cachedValue
def clearCache (self):
try:
del self.cachedValue
except:
pass
class PythonPathFunctions:
def __init__ (self, context):
self.context = context
self.pathHandler = {}
self.pathHandler['path'] = self.path
self.pathHandler['string'] = self.string
self.pathHandler['exists'] = self.exists
self.pathHandler['nocall'] = self.nocall
self.pathHandler['test'] = self.test
self.pathHandler['stripped'] = self.stripped
def path (self, expr):
return self.context.evaluatePath (expr)
def string (self, expr):
return self.context.evaluateString (expr)
def stripped (self, expr):
return re.sub(r'</?\w+[^>]*>', r'', context.evaluateString (expr))
def exists (self, expr):
return self.context.evaluateExists (expr)
def nocall (self, expr):
return self.context.evaluateNoCall (expr)
def test (self, *arguments):
if (len (arguments) % 2):
# We have an odd number of arguments - which means the last one is a default
pairs = arguments[:-1]
defaultValue = arguments[-1]
else:
# No default - so use None
pairs = arguments
defaultValue = None
index = 0
while (index < len (pairs)):
test = pairs[index]
index += 1
value = pairs[index]
index += 1
if (test):
return value
return defaultValue
class Context:
def __init__ (self, options=None, allowPythonPath=0):
self.allowPythonPath = allowPythonPath
self.globals = {}
self.locals = {}
self.localStack = []
self.repeatStack = []
self.populateDefaultVariables (options)
self.log = logging.getLogger ("simpleTALES.Context")
self.true = 1
self.false = 0
self.pythonPathFuncs = PythonPathFunctions (self)
self.prefixHandlers = {}
self.prefixHandlers['path'] = self.evaluatePath
self.prefixHandlers['exists'] = self.evaluateExists
self.prefixHandlers['nocall'] = self.evaluateNoCall
self.prefixHandlers['not'] = self.evaluateNot
self.prefixHandlers['string'] = self.evaluateString
self.prefixHandlers['python'] = self.evaluatePython
self.prefixHandlers['stripped'] = self.evaluateStripped
def addRepeat (self, name, var, initialValue):
# Pop the current repeat map onto the stack
self.repeatStack.append (self.repeatMap)
self.repeatMap = self.repeatMap.copy()
self.repeatMap [name] = var
# Map this repeatMap into the global space
self.addGlobal ('repeat', self.repeatMap)
# Add in the locals
self.pushLocals()
self.setLocal (name, initialValue)
def removeRepeat (self, name):
# Bring the old repeat map back
self.repeatMap = self.repeatStack.pop()
# Map this repeatMap into the global space
self.addGlobal ('repeat', self.repeatMap)
def addGlobal (self, name, value):
self.globals[name] = value
def pushLocals (self):
# Push the current locals onto a stack so that we can safely over-ride them.
self.localStack.append (self.locals)
self.locals = self.locals.copy()
def setLocal (self, name, value):
# Override the current local if present with the new one
self.locals [name] = value
def popLocals (self):
self.locals = self.localStack.pop()
def evaluate (self, expr, originalAtts = None):
# Returns a ContextVariable
#self.log.debug ("Evaluating %s" % expr)
if (originalAtts is not None):
# Call from outside
self.globals['attrs'] = originalAtts
suppressException = 1
else:
suppressException = 0
# Supports path, exists, nocall, not, and string
expr = expr.strip ()
try:
for key, function in self.prefixHandlers.items():
if expr.startswith (key+':'):
return function (expr[len(key)+1:].lstrip ())
else:
# Not specified - so it's a path
return self.evaluatePath (expr)
except PathNotFoundException, e:
if (suppressException):
return None
raise e
def evaluateStripped(self, expr):
if '${' not in expr:
expr = '${%s}' % expr
return re.sub(r'</?\w+[^>]*>', r'', self.evaluateString (expr))
def evaluatePython (self, expr):
if (not self.allowPythonPath):
self.log.warn ("Parameter allowPythonPath is false. NOT Evaluating python expression %s" % expr)
return self.false
#self.log.debug ("Evaluating python expression %s" % expr)
globals={}
for name, value in self.globals.items():
if (isinstance (value, ContextVariable)): value = value.rawValue()
globals [name] = value
for key, value in self.pythonPathFuncs.pathHandler.items():
globals [key] = value
locals={}
for name, value in self.locals.items():
if (isinstance (value, ContextVariable)): value = value.rawValue()
locals [name] = value
try:
result = eval(expr, globals, locals)
if (isinstance (result, ContextVariable)):
return result.value()
return result
except Exception, e:
# An exception occured evaluating the template, return the exception as text
self.log.warn ("Exception occurred evaluating python path, exception: " + str (e))
return "Exception: %s" % str (e)
def evaluatePath (self, expr):
#self.log.debug ("Evaluating path expression %s" % expr)
allPaths = expr.split ('|')
if (len (allPaths) > 1):
for path in allPaths:
# Evaluate this path
try:
return self.evaluate (path.strip ())
except PathNotFoundException, e:
# Path didn't exist, try the next one
pass
# No paths evaluated - raise exception.
raise PATHNOTFOUNDEXCEPTION
else:
# A single path - so let's evaluate it.
# This *can* raise PathNotFoundException
return self.traversePath (allPaths[0])
def evaluateExists (self, expr):
#self.log.debug ("Evaluating %s to see if it exists" % expr)
allPaths = expr.split ('|')
# The first path is for us
# Return true if this first bit evaluates, otherwise test the rest
try:
result = self.traversePath (allPaths[0], canCall = 0)
return self.true
except PathNotFoundException, e:
# Look at the rest of the paths.
pass
for path in allPaths[1:]:
# Evaluate this path
try:
pathResult = self.evaluate (path.strip ())
# If this is part of a "exists: path1 | exists: path2" path then we need to look at the actual result.
if (pathResult):
return self.true
except PathNotFoundException, e:
pass
# If we get this far then there are *no* paths that exist.
return self.false
def evaluateNoCall (self, expr):
#self.log.debug ("Evaluating %s using nocall" % expr)
allPaths = expr.split ('|')
# The first path is for us
try:
return self.traversePath (allPaths[0], canCall = 0)
except PathNotFoundException, e:
# Try the rest of the paths.
pass
for path in allPaths[1:]:
# Evaluate this path
try:
return self.evaluate (path.strip ())
except PathNotFoundException, e:
pass
# No path evaluated - raise error
raise PATHNOTFOUNDEXCEPTION
def evaluateNot (self, expr):
#self.log.debug ("Evaluating NOT value of %s" % expr)
# Evaluate what I was passed
try:
pathResult = self.evaluate (expr)
except PathNotFoundException, e:
# In SimpleTAL the result of "not: no/such/path" should be TRUE not FALSE.
return self.true
if (pathResult is None):
# Value was Nothing
return self.true
if (pathResult == DEFAULTVALUE):
return self.false
try:
resultLen = len (pathResult)
if (resultLen > 0):
return self.false
else:
return self.true
except:
# Not a sequence object.
pass
if (not pathResult):
return self.true
# Everything else is true, so we return false!
return self.false
def evaluateString (self, expr):
#self.log.debug ("Evaluating String %s" % expr)
result = ""
skipCount = 0
for position in xrange (0,len (expr)):
if (skipCount > 0):
skipCount -= 1
else:
if (expr[position] == '$'):
try:
if (expr[position + 1] == '$'):
# Escaped $ sign
result += '$'
skipCount = 1
elif (expr[position + 1] == '{'):
# Looking for a path!
endPos = expr.find ('}', position + 1)
if (endPos > 0):
path = expr[position + 2:endPos]
# Evaluate the path - missing paths raise exceptions as normal.
try:
pathResult = self.evaluate (path)
except PathNotFoundException, e:
# This part of the path didn't evaluate to anything - leave blank
pathResult = u''
if (pathResult is not None):
if (isinstance (pathResult, types.UnicodeType)):
result += pathResult
else:
# THIS IS NOT A BUG!
# Use Unicode in Context if you aren't using Ascii!
result += unicode (pathResult)
skipCount = endPos - position
else:
# It's a variable
endPos = expr.find (' ', position + 1)
if (endPos == -1):
endPos = len (expr)
path = expr [position + 1:endPos]
# Evaluate the variable - missing paths raise exceptions as normal.
try:
pathResult = self.traversePath (path)
except PathNotFoundException, e:
# This part of the path didn't evaluate to anything - leave blank
pathResult = u''
if (pathResult is not None):
if (isinstance (pathResult, types.UnicodeType)):
result += pathResult
else:
# THIS IS NOT A BUG!
# Use Unicode in Context if you aren't using Ascii!
result += unicode (pathResult)
skipCount = endPos - position - 1
except IndexError, e:
# Trailing $ sign - just suppress it
self.log.warn ("Trailing $ detected")
pass
else:
result += expr[position]
return result
def traversePath (self, expr, canCall=1):
# canCall only applies to the *final* path destination, not points down the path.
# Check for and correct for trailing/leading quotes
if (expr.startswith ('"') or expr.startswith ("'")):
if (expr.endswith ('"') or expr.endswith ("'")):
expr = expr [1:-1]
else:
expr = expr [1:]
elif (expr.endswith ('"') or expr.endswith ("'")):
expr = expr [0:-1]
pathList = expr.split ('/')
path = pathList[0]
if path.startswith ('?'):
path = path[1:]
if self.locals.has_key(path):
path = self.locals[path]
if (isinstance (path, ContextVariable)): path = path.value()
elif (callable (path)):path = apply (path, ())
elif self.globals.has_key(path):
path = self.globals[path]
if (isinstance (path, ContextVariable)): path = path.value()
elif (callable (path)):path = apply (path, ())
#self.log.debug ("Dereferenced to %s" % path)
if self.locals.has_key(path):
val = self.locals[path]
elif self.globals.has_key(path):
val = self.globals[path]
else:
# If we can't find it then raise an exception
raise PATHNOTFOUNDEXCEPTION
index = 1
for path in pathList[1:]:
#self.log.debug ("Looking for path element %s" % path)
if path.startswith ('?'):
path = path[1:]
if self.locals.has_key(path):
path = self.locals[path]
if (isinstance (path, ContextVariable)): path = path.value()
elif (callable (path)):path = apply (path, ())
elif self.globals.has_key(path):
path = self.globals[path]
if (isinstance (path, ContextVariable)): path = path.value()
elif (callable (path)):path = apply (path, ())
#self.log.debug ("Dereferenced to %s" % path)
try:
if (isinstance (val, ContextVariable)): temp = val.value((index,pathList))
elif (callable (val)):temp = apply (val, ())
else: temp = val
except ContextVariable, e:
# Fast path for those functions that return values
return e.value()
except TypeError:
temp = val
if (hasattr (temp, path)):
val = getattr (temp, path)
else:
try:
try:
val = temp[path]
except TypeError:
val = temp[int(path)]
except:
#self.log.debug ("Not found.")
raise PATHNOTFOUNDEXCEPTION
index = index + 1
#self.log.debug ("Found value %s" % str (val))
if (canCall):
try:
if (isinstance (val, ContextVariable)): result = val.value((index,pathList))
elif (callable (val)):result = apply (val, ())
else: result = val
except ContextVariable, e:
# Fast path for those functions that return values
return e.value()
else:
if (isinstance (val, ContextVariable)): result = val.realValue
else: result = val
return result
def __str__ (self):
return "Globals: " + str (self.globals) + "Locals: " + str (self.locals)
def populateDefaultVariables (self, options):
vars = {}
self.repeatMap = {}
vars['nothing'] = None
vars['default'] = DEFAULTVALUE
vars['options'] = options
# To start with there are no repeats
vars['repeat'] = self.repeatMap
vars['attrs'] = None
# Add all of these to the global context
for name in vars.keys():
self.addGlobal (name,vars[name])
# Add also under CONTEXTS
self.addGlobal ('CONTEXTS', vars) | plasTeX/Renderers/PageTemplate/simpletal/simpleTALES.py | import types, sys, re
try:
import logging
except:
import DummyLogger as logging
import simpleTAL
from plasTeX.Renderers.PageTemplate import simpletal
__version__ = simpletal.__version__
DEFAULTVALUE = "This represents a Default value."
class PathNotFoundException (Exception):
pass
class ContextContentException (Exception):
""" This is raised when invalid content has been placed into the Context object.
For example using non-ascii characters instead of Unicode strings.
"""
pass
PATHNOTFOUNDEXCEPTION = PathNotFoundException()
class ContextVariable:
def __init__ (self, value = None):
self.ourValue = value
def value (self, currentPath=None):
if (callable (self.ourValue)):
return apply (self.ourValue, ())
return self.ourValue
def rawValue (self):
return self.ourValue
def __str__ (self):
return repr (self.ourValue)
class RepeatVariable (ContextVariable):
""" To be written"""
def __init__ (self, sequence):
ContextVariable.__init__ (self, 1)
self.sequence = sequence
self.position = 0
self.map = None
def value (self, currentPath=None):
if (self.map is None):
self.createMap()
return self.map
def rawValue (self):
return self.value()
def getCurrentValue (self):
return self.sequence [self.position]
def increment (self):
self.position += 1
if (self.position == len (self.sequence)):
raise IndexError ("Repeat Finished")
def createMap (self):
self.map = {}
self.map ['index'] = self.getIndex
self.map ['number'] = self.getNumber
self.map ['even'] = self.getEven
self.map ['odd'] = self.getOdd
self.map ['start'] = self.getStart
self.map ['end'] = self.getEnd
# TODO: first and last need to be implemented.
self.map ['length'] = len (self.sequence)
self.map ['letter'] = self.getLowerLetter
self.map ['Letter'] = self.getUpperLetter
self.map ['roman'] = self.getLowerRoman
self.map ['Roman'] = self.getUpperRoman
# Repeat implementation goes here
def getIndex (self):
return self.position
def getNumber (self):
return self.position + 1
def getEven (self):
if ((self.position % 2) != 0):
return 0
return 1
def getOdd (self):
if ((self.position % 2) == 0):
return 0
return 1
def getStart (self):
if (self.position == 0):
return 1
return 0
def getEnd (self):
if (self.position == len (self.sequence) - 1):
return 1
return 0
def getLowerLetter (self):
result = ""
nextCol = self.position
if (nextCol == 0):
return 'a'
while (nextCol > 0):
nextCol, thisCol = divmod (nextCol, 26)
result = chr (ord ('a') + thisCol) + result
return result
def getUpperLetter (self):
return self.getLowerLetter().upper()
def getLowerRoman (self):
romanNumeralList = (('m', 1000)
,('cm', 900)
,('d', 500)
,('cd', 400)
,('c', 100)
,('xc', 90)
,('l', 50)
,('xl', 40)
,('x', 10)
,('ix', 9)
,('v', 5)
,('iv', 4)
,('i', 1)
)
if (self.position > 3999):
# Roman numbers only supported up to 4000
return ' '
num = self.position + 1
result = ""
for roman, integer in romanNumeralList:
while (num >= integer):
result += roman
num -= integer
return result
def getUpperRoman (self):
return self.getLowerRoman().upper()
class IteratorRepeatVariable (RepeatVariable):
def __init__ (self, sequence):
RepeatVariable.__init__ (self, sequence)
self.curValue = None
self.iterStatus = 0
def getCurrentValue (self):
if (self.iterStatus == 0):
self.iterStatus = 1
try:
self.curValue = self.sequence.next()
except StopIteration, e:
self.iterStatus = 2
raise IndexError ("Repeat Finished")
return self.curValue
def increment (self):
# Need this for the repeat variable functions.
self.position += 1
try:
self.curValue = self.sequence.next()
except StopIteration, e:
self.iterStatus = 2
raise IndexError ("Repeat Finished")
def createMap (self):
self.map = {}
self.map ['index'] = self.getIndex
self.map ['number'] = self.getNumber
self.map ['even'] = self.getEven
self.map ['odd'] = self.getOdd
self.map ['start'] = self.getStart
self.map ['end'] = self.getEnd
# TODO: first and last need to be implemented.
self.map ['length'] = sys.maxint
self.map ['letter'] = self.getLowerLetter
self.map ['Letter'] = self.getUpperLetter
self.map ['roman'] = self.getLowerRoman
self.map ['Roman'] = self.getUpperRoman
def getEnd (self):
if (self.iterStatus == 2):
return 1
return 0
class PathFunctionVariable (ContextVariable):
def __init__ (self, func):
ContextVariable.__init__ (self, value = func)
self.func = func
def value (self, currentPath=None):
if (currentPath is not None):
index, paths = currentPath
result = ContextVariable (apply (self.func, ('/'.join (paths[index:]),)))
# Fast track the result
raise result
class CachedFuncResult (ContextVariable):
def value (self, currentPath=None):
try:
return self.cachedValue
except:
self.cachedValue = ContextVariable.value (self)
return self.cachedValue
def clearCache (self):
try:
del self.cachedValue
except:
pass
class PythonPathFunctions:
def __init__ (self, context):
self.context = context
self.pathHandler = {}
self.pathHandler['path'] = self.path
self.pathHandler['string'] = self.string
self.pathHandler['exists'] = self.exists
self.pathHandler['nocall'] = self.nocall
self.pathHandler['test'] = self.test
self.pathHandler['stripped'] = self.stripped
def path (self, expr):
return self.context.evaluatePath (expr)
def string (self, expr):
return self.context.evaluateString (expr)
def stripped (self, expr):
return re.sub(r'</?\w+[^>]*>', r'', context.evaluateString (expr))
def exists (self, expr):
return self.context.evaluateExists (expr)
def nocall (self, expr):
return self.context.evaluateNoCall (expr)
def test (self, *arguments):
if (len (arguments) % 2):
# We have an odd number of arguments - which means the last one is a default
pairs = arguments[:-1]
defaultValue = arguments[-1]
else:
# No default - so use None
pairs = arguments
defaultValue = None
index = 0
while (index < len (pairs)):
test = pairs[index]
index += 1
value = pairs[index]
index += 1
if (test):
return value
return defaultValue
class Context:
def __init__ (self, options=None, allowPythonPath=0):
self.allowPythonPath = allowPythonPath
self.globals = {}
self.locals = {}
self.localStack = []
self.repeatStack = []
self.populateDefaultVariables (options)
self.log = logging.getLogger ("simpleTALES.Context")
self.true = 1
self.false = 0
self.pythonPathFuncs = PythonPathFunctions (self)
self.prefixHandlers = {}
self.prefixHandlers['path'] = self.evaluatePath
self.prefixHandlers['exists'] = self.evaluateExists
self.prefixHandlers['nocall'] = self.evaluateNoCall
self.prefixHandlers['not'] = self.evaluateNot
self.prefixHandlers['string'] = self.evaluateString
self.prefixHandlers['python'] = self.evaluatePython
self.prefixHandlers['stripped'] = self.evaluateStripped
def addRepeat (self, name, var, initialValue):
# Pop the current repeat map onto the stack
self.repeatStack.append (self.repeatMap)
self.repeatMap = self.repeatMap.copy()
self.repeatMap [name] = var
# Map this repeatMap into the global space
self.addGlobal ('repeat', self.repeatMap)
# Add in the locals
self.pushLocals()
self.setLocal (name, initialValue)
def removeRepeat (self, name):
# Bring the old repeat map back
self.repeatMap = self.repeatStack.pop()
# Map this repeatMap into the global space
self.addGlobal ('repeat', self.repeatMap)
def addGlobal (self, name, value):
self.globals[name] = value
def pushLocals (self):
# Push the current locals onto a stack so that we can safely over-ride them.
self.localStack.append (self.locals)
self.locals = self.locals.copy()
def setLocal (self, name, value):
# Override the current local if present with the new one
self.locals [name] = value
def popLocals (self):
self.locals = self.localStack.pop()
def evaluate (self, expr, originalAtts = None):
# Returns a ContextVariable
#self.log.debug ("Evaluating %s" % expr)
if (originalAtts is not None):
# Call from outside
self.globals['attrs'] = originalAtts
suppressException = 1
else:
suppressException = 0
# Supports path, exists, nocall, not, and string
expr = expr.strip ()
try:
for key, function in self.prefixHandlers.items():
if expr.startswith (key+':'):
return function (expr[len(key)+1:].lstrip ())
else:
# Not specified - so it's a path
return self.evaluatePath (expr)
except PathNotFoundException, e:
if (suppressException):
return None
raise e
def evaluateStripped(self, expr):
if '${' not in expr:
expr = '${%s}' % expr
return re.sub(r'</?\w+[^>]*>', r'', self.evaluateString (expr))
def evaluatePython (self, expr):
if (not self.allowPythonPath):
self.log.warn ("Parameter allowPythonPath is false. NOT Evaluating python expression %s" % expr)
return self.false
#self.log.debug ("Evaluating python expression %s" % expr)
globals={}
for name, value in self.globals.items():
if (isinstance (value, ContextVariable)): value = value.rawValue()
globals [name] = value
for key, value in self.pythonPathFuncs.pathHandler.items():
globals [key] = value
locals={}
for name, value in self.locals.items():
if (isinstance (value, ContextVariable)): value = value.rawValue()
locals [name] = value
try:
result = eval(expr, globals, locals)
if (isinstance (result, ContextVariable)):
return result.value()
return result
except Exception, e:
# An exception occured evaluating the template, return the exception as text
self.log.warn ("Exception occurred evaluating python path, exception: " + str (e))
return "Exception: %s" % str (e)
def evaluatePath (self, expr):
#self.log.debug ("Evaluating path expression %s" % expr)
allPaths = expr.split ('|')
if (len (allPaths) > 1):
for path in allPaths:
# Evaluate this path
try:
return self.evaluate (path.strip ())
except PathNotFoundException, e:
# Path didn't exist, try the next one
pass
# No paths evaluated - raise exception.
raise PATHNOTFOUNDEXCEPTION
else:
# A single path - so let's evaluate it.
# This *can* raise PathNotFoundException
return self.traversePath (allPaths[0])
def evaluateExists (self, expr):
#self.log.debug ("Evaluating %s to see if it exists" % expr)
allPaths = expr.split ('|')
# The first path is for us
# Return true if this first bit evaluates, otherwise test the rest
try:
result = self.traversePath (allPaths[0], canCall = 0)
return self.true
except PathNotFoundException, e:
# Look at the rest of the paths.
pass
for path in allPaths[1:]:
# Evaluate this path
try:
pathResult = self.evaluate (path.strip ())
# If this is part of a "exists: path1 | exists: path2" path then we need to look at the actual result.
if (pathResult):
return self.true
except PathNotFoundException, e:
pass
# If we get this far then there are *no* paths that exist.
return self.false
def evaluateNoCall (self, expr):
#self.log.debug ("Evaluating %s using nocall" % expr)
allPaths = expr.split ('|')
# The first path is for us
try:
return self.traversePath (allPaths[0], canCall = 0)
except PathNotFoundException, e:
# Try the rest of the paths.
pass
for path in allPaths[1:]:
# Evaluate this path
try:
return self.evaluate (path.strip ())
except PathNotFoundException, e:
pass
# No path evaluated - raise error
raise PATHNOTFOUNDEXCEPTION
def evaluateNot (self, expr):
#self.log.debug ("Evaluating NOT value of %s" % expr)
# Evaluate what I was passed
try:
pathResult = self.evaluate (expr)
except PathNotFoundException, e:
# In SimpleTAL the result of "not: no/such/path" should be TRUE not FALSE.
return self.true
if (pathResult is None):
# Value was Nothing
return self.true
if (pathResult == DEFAULTVALUE):
return self.false
try:
resultLen = len (pathResult)
if (resultLen > 0):
return self.false
else:
return self.true
except:
# Not a sequence object.
pass
if (not pathResult):
return self.true
# Everything else is true, so we return false!
return self.false
def evaluateString (self, expr):
#self.log.debug ("Evaluating String %s" % expr)
result = ""
skipCount = 0
for position in xrange (0,len (expr)):
if (skipCount > 0):
skipCount -= 1
else:
if (expr[position] == '$'):
try:
if (expr[position + 1] == '$'):
# Escaped $ sign
result += '$'
skipCount = 1
elif (expr[position + 1] == '{'):
# Looking for a path!
endPos = expr.find ('}', position + 1)
if (endPos > 0):
path = expr[position + 2:endPos]
# Evaluate the path - missing paths raise exceptions as normal.
try:
pathResult = self.evaluate (path)
except PathNotFoundException, e:
# This part of the path didn't evaluate to anything - leave blank
pathResult = u''
if (pathResult is not None):
if (isinstance (pathResult, types.UnicodeType)):
result += pathResult
else:
# THIS IS NOT A BUG!
# Use Unicode in Context if you aren't using Ascii!
result += unicode (pathResult)
skipCount = endPos - position
else:
# It's a variable
endPos = expr.find (' ', position + 1)
if (endPos == -1):
endPos = len (expr)
path = expr [position + 1:endPos]
# Evaluate the variable - missing paths raise exceptions as normal.
try:
pathResult = self.traversePath (path)
except PathNotFoundException, e:
# This part of the path didn't evaluate to anything - leave blank
pathResult = u''
if (pathResult is not None):
if (isinstance (pathResult, types.UnicodeType)):
result += pathResult
else:
# THIS IS NOT A BUG!
# Use Unicode in Context if you aren't using Ascii!
result += unicode (pathResult)
skipCount = endPos - position - 1
except IndexError, e:
# Trailing $ sign - just suppress it
self.log.warn ("Trailing $ detected")
pass
else:
result += expr[position]
return result
def traversePath (self, expr, canCall=1):
# canCall only applies to the *final* path destination, not points down the path.
# Check for and correct for trailing/leading quotes
if (expr.startswith ('"') or expr.startswith ("'")):
if (expr.endswith ('"') or expr.endswith ("'")):
expr = expr [1:-1]
else:
expr = expr [1:]
elif (expr.endswith ('"') or expr.endswith ("'")):
expr = expr [0:-1]
pathList = expr.split ('/')
path = pathList[0]
if path.startswith ('?'):
path = path[1:]
if self.locals.has_key(path):
path = self.locals[path]
if (isinstance (path, ContextVariable)): path = path.value()
elif (callable (path)):path = apply (path, ())
elif self.globals.has_key(path):
path = self.globals[path]
if (isinstance (path, ContextVariable)): path = path.value()
elif (callable (path)):path = apply (path, ())
#self.log.debug ("Dereferenced to %s" % path)
if self.locals.has_key(path):
val = self.locals[path]
elif self.globals.has_key(path):
val = self.globals[path]
else:
# If we can't find it then raise an exception
raise PATHNOTFOUNDEXCEPTION
index = 1
for path in pathList[1:]:
#self.log.debug ("Looking for path element %s" % path)
if path.startswith ('?'):
path = path[1:]
if self.locals.has_key(path):
path = self.locals[path]
if (isinstance (path, ContextVariable)): path = path.value()
elif (callable (path)):path = apply (path, ())
elif self.globals.has_key(path):
path = self.globals[path]
if (isinstance (path, ContextVariable)): path = path.value()
elif (callable (path)):path = apply (path, ())
#self.log.debug ("Dereferenced to %s" % path)
try:
if (isinstance (val, ContextVariable)): temp = val.value((index,pathList))
elif (callable (val)):temp = apply (val, ())
else: temp = val
except ContextVariable, e:
# Fast path for those functions that return values
return e.value()
except TypeError:
temp = val
if (hasattr (temp, path)):
val = getattr (temp, path)
else:
try:
try:
val = temp[path]
except TypeError:
val = temp[int(path)]
except:
#self.log.debug ("Not found.")
raise PATHNOTFOUNDEXCEPTION
index = index + 1
#self.log.debug ("Found value %s" % str (val))
if (canCall):
try:
if (isinstance (val, ContextVariable)): result = val.value((index,pathList))
elif (callable (val)):result = apply (val, ())
else: result = val
except ContextVariable, e:
# Fast path for those functions that return values
return e.value()
else:
if (isinstance (val, ContextVariable)): result = val.realValue
else: result = val
return result
def __str__ (self):
return "Globals: " + str (self.globals) + "Locals: " + str (self.locals)
def populateDefaultVariables (self, options):
vars = {}
self.repeatMap = {}
vars['nothing'] = None
vars['default'] = DEFAULTVALUE
vars['options'] = options
# To start with there are no repeats
vars['repeat'] = self.repeatMap
vars['attrs'] = None
# Add all of these to the global context
for name in vars.keys():
self.addGlobal (name,vars[name])
# Add also under CONTEXTS
self.addGlobal ('CONTEXTS', vars) | 0.155976 | 0.210594 |
from typing import List
from model import Entry, Directory, NormalFile, VirusFile
def __dir_arg_parse(directory: Directory, directory_path: str) -> Entry:
"""Parses a concatenated directory path to return the proper target
which may be a file or directory
"""
dir_split = directory_path.split("/")
for target in dir_split:
if target == "..":
if directory.get_parent():
directory = directory.get_parent()
elif directory.get_name() != target and target != ".":
directory = directory.get_entry(target)
return directory
def ls(console, args):
"""Mimics the ls command to list the contents of a Directory
which will distinguish the directories from files
by placing the Directories first and Files second
"""
# Keep track of the options for the ls command
options = {
"show_hidden": {
"identifier": "a",
"value": False}}
targets = []
# Iterate through all of the args, separating options from targets
for arg in args:
if arg.startswith("-"):
for opt in options:
options[opt]["value"] = options[opt]["identifier"] in arg
else:
targets.append(arg)
# List the results
if len(targets) == 0:
return console.get_current_dir().list_contents(options["show_hidden"]["value"])
results = []
for target in targets:
current_dir = __dir_arg_parse(console.get_current_dir(), target)
if current_dir:
if len(targets) > 1:
results.append(f"{target}{':' if isinstance(current_dir, Directory) else ''}")
if isinstance(current_dir, Directory):
results.append(current_dir.list_contents(options["show_hidden"]["value"]))
else:
results.append(f"ls: {target}: No such file or directory")
return "\n".join(results)
def cd(console, args):
"""Mimics the cd command to change Directories"""
if len(args) > 1:
return "usage: cd <directory>"
if len(args) == 0:
usr_dir = console.get_root().get_entry("usr")
username = console.get_save().get_username()
console.set_current_dir(usr_dir.get_entry(username))
return
target = args[0].split("/")
for tgt in target:
current_dir = console.get_current_dir()
if tgt == ".":
continue
elif tgt == "..":
if (console.is_in_play() or console.is_in_tutorial()) and current_dir == console.get_save().get_trash():
console.set_current_dir(console.get_previous_dir())
elif current_dir.get_parent():
console.set_current_dir(current_dir.get_parent())
continue
elif tgt == "Trash" and (console.is_in_play() or console.is_in_tutorial()):
console.set_previous_dir(console.get_current_dir())
if console.is_in_play():
console.set_current_dir(console.get_save().get_trash())
elif console.is_in_tutorial():
console.set_current_dir(console.get_tutorial_trash())
return
found = False
for entry in current_dir.get_entries():
if entry.get_name() == tgt:
if isinstance(entry, Directory):
found = True
console.set_current_dir(entry)
else:
return f"cd: not a directory: {tgt}"
if not found:
return f"cd: {tgt}: No such file or directory"
def cat(console, args):
if len(args) == 0:
return "usage: cat <file(s)>"
result = []
for file in args:
file = __dir_arg_parse(console.get_current_dir(), file)
if file:
if isinstance(file, Directory):
result.append(f"cat: {file.get_name()}: Is a directory")
break
else:
file_result = ""
total = 0
for byte in file.get_bytes():
file_result += f"{hex(byte)[2:].rjust(2, '0')} "
total += 1
if total % 16 == 0:
file_result += "\n"
result.append(file_result)
return "\n".join(result)
def rm(console, args):
if len(args) == 0:
return "usage: rm [-r] file ..."
recursive = "-r" in args or (len(args) > 0 and args[0].startswith("-") and "r" in args[0])
target = None
for entry in console.get_current_dir().get_entries():
if entry.get_name() == args[-1]:
target = entry
if not target or console.get_root() is None:
return f"rm: {args[-1]}: No such file or directory"
# The wrong virus file was deleted
if isinstance(target, VirusFile):
if target.get_number() != console.get_save().get_virus_files()[0] + 1:
console.get_save().increase_speed(target)
return "rm: Incorrect virus file deleted: File moved to new location; New file spawned"
else:
console.get_save().remove_virus(target)
return f"rm: Successful deletion: {target} removed"
else:
removed = __rm_helper(target, recursive)
console.get_current_dir().remove_entry(target)
for entry in removed:
entry.set_parent(console.get_trash())
console.get_trash().add_entries(removed)
def __rm_helper(directory: Directory, recursive: bool = True) -> List[Entry]:
removed = []
for entry in directory.get_entries():
if isinstance(entry, Directory):
if entry.get_size() == 0 or recursive:
removed.append(entry)
elif isinstance(entry, NormalFile):
removed.append(entry)
for entry in removed:
directory.remove_entry(entry)
return removed
def restore(console, args):
if len(args) == 0:
return "usage: restore <file>"
if console.get_current_dir() != console.get_trash():
return "restore: must be in Trash directory"
if len(args) == 1 and args[0] == "*":
args = [entry.get_name() for entry in console.get_trash().get_entries()]
result = []
for entry in args:
file = __dir_arg_parse(console.get_trash(), entry)
if file:
if isinstance(file, NormalFile):
file = file.restore(console.get_root())
console.get_save().restored_file()
result.append(f"{file.get_name()} restored to {str(file)}")
else:
result.append(f"restore: {file.get_name()}: is not a valid file")
else:
result.append(f"restore: {entry}: No such file")
return "\n".join(result)
def trace(console, args):
if len(args) == 0:
return "usage: trace <file(s)>"
if console.is_in_play():
trash = console.get_trash()
result = []
for file in args:
file = __dir_arg_parse(trash, file)
if file:
if isinstance(file, Directory):
result.append(f"trace: {file.get_name()}: Is a directory")
continue
else:
for log in console.get_save().get_deletion_log():
if file.get_name() == log[1].split("/")[-1]:
result.append(log[2])
return "\n".join(result)
def mntr(console, args):
if len(args) != 0:
return "usage: mntr"
save = console.get_save()
log = save.get_deletion_log()
speed = save.get_speed()
result = "last log entry: {}\nspeed: {}s\nvirus files deleted: {}\nfiles deleted by virus: {}"
return result.format(
log[-1][1]
if len(log) != 0
else "None found", speed,
save.get_virus_files()[0], save.get_normal_files()[0])
def track(console, args):
if len(args) == 0:
tracked_files = console.get_save().get_tracked_files()
return "\n".join([
f"{i + 1}: {tracked_files[i]}"
for i in range(len(tracked_files))
if tracked_files[i] is not None
])
elif len(args) % 2 != 0:
return "usage: track [<number> <file> ...]"
target_numbers = []
targets = []
for i in range(0, len(args), 2):
if not args[i].isdigit():
return f"track: {args[i]}: not a number"
target_numbers.append(int(args[i]))
targets.append(args[i + 1])
messages = []
for i in range(len(targets)):
target = targets[i]
target_number = target_numbers[i]
tgt = __dir_arg_parse(console.get_current_dir(), target)
messages.append("track: {}".format(
f"{tgt} tracked"
if tgt is not None
else f"{tgt}: No such file or directory"))
if tgt:
console.get_save().track_virus(target_number, tgt)
return "\n".join(messages)
def tut(_, args):
if len(args) != 0:
return "usage: tut"
return "Type ./tutorial.sh"
def help_command():
return ("ls [directory] -> Lists the specified directory, or the current one if none is given\n" +
"cd [directory] -> Changes the current directory, or moves to the beginning directory if none is given\n" +
"cat <file> -> Prints out the contents of a file\n" +
"rm [-r] [directory OR file] -> Removes a directory or file and moves it to the Trash\n" +
"track [<number> <file> ...] -> Allows you to track a virus file with a number to identify it easier.\n" +
"\tIf nothing is given, it will show you the files you're tracking currently.\n" +
"trace <file> -> (Can only be used in the Trash directory) Allows you to trace where a file was deleted from\n" +
"mntr -> Shows you the most recently deleted file, the speed at which files are deleted by the virus, how\n" +
"\tmany virus files you've deleted, and how many files have been deleted by the virus.\n" +
"restore <file> -> Restores a file to its original location (Can only be used in the Trash directory)\n" +
"help -> Shows this help message!") | model/util/command.py | from typing import List
from model import Entry, Directory, NormalFile, VirusFile
def __dir_arg_parse(directory: Directory, directory_path: str) -> Entry:
"""Parses a concatenated directory path to return the proper target
which may be a file or directory
"""
dir_split = directory_path.split("/")
for target in dir_split:
if target == "..":
if directory.get_parent():
directory = directory.get_parent()
elif directory.get_name() != target and target != ".":
directory = directory.get_entry(target)
return directory
def ls(console, args):
"""Mimics the ls command to list the contents of a Directory
which will distinguish the directories from files
by placing the Directories first and Files second
"""
# Keep track of the options for the ls command
options = {
"show_hidden": {
"identifier": "a",
"value": False}}
targets = []
# Iterate through all of the args, separating options from targets
for arg in args:
if arg.startswith("-"):
for opt in options:
options[opt]["value"] = options[opt]["identifier"] in arg
else:
targets.append(arg)
# List the results
if len(targets) == 0:
return console.get_current_dir().list_contents(options["show_hidden"]["value"])
results = []
for target in targets:
current_dir = __dir_arg_parse(console.get_current_dir(), target)
if current_dir:
if len(targets) > 1:
results.append(f"{target}{':' if isinstance(current_dir, Directory) else ''}")
if isinstance(current_dir, Directory):
results.append(current_dir.list_contents(options["show_hidden"]["value"]))
else:
results.append(f"ls: {target}: No such file or directory")
return "\n".join(results)
def cd(console, args):
"""Mimics the cd command to change Directories"""
if len(args) > 1:
return "usage: cd <directory>"
if len(args) == 0:
usr_dir = console.get_root().get_entry("usr")
username = console.get_save().get_username()
console.set_current_dir(usr_dir.get_entry(username))
return
target = args[0].split("/")
for tgt in target:
current_dir = console.get_current_dir()
if tgt == ".":
continue
elif tgt == "..":
if (console.is_in_play() or console.is_in_tutorial()) and current_dir == console.get_save().get_trash():
console.set_current_dir(console.get_previous_dir())
elif current_dir.get_parent():
console.set_current_dir(current_dir.get_parent())
continue
elif tgt == "Trash" and (console.is_in_play() or console.is_in_tutorial()):
console.set_previous_dir(console.get_current_dir())
if console.is_in_play():
console.set_current_dir(console.get_save().get_trash())
elif console.is_in_tutorial():
console.set_current_dir(console.get_tutorial_trash())
return
found = False
for entry in current_dir.get_entries():
if entry.get_name() == tgt:
if isinstance(entry, Directory):
found = True
console.set_current_dir(entry)
else:
return f"cd: not a directory: {tgt}"
if not found:
return f"cd: {tgt}: No such file or directory"
def cat(console, args):
if len(args) == 0:
return "usage: cat <file(s)>"
result = []
for file in args:
file = __dir_arg_parse(console.get_current_dir(), file)
if file:
if isinstance(file, Directory):
result.append(f"cat: {file.get_name()}: Is a directory")
break
else:
file_result = ""
total = 0
for byte in file.get_bytes():
file_result += f"{hex(byte)[2:].rjust(2, '0')} "
total += 1
if total % 16 == 0:
file_result += "\n"
result.append(file_result)
return "\n".join(result)
def rm(console, args):
if len(args) == 0:
return "usage: rm [-r] file ..."
recursive = "-r" in args or (len(args) > 0 and args[0].startswith("-") and "r" in args[0])
target = None
for entry in console.get_current_dir().get_entries():
if entry.get_name() == args[-1]:
target = entry
if not target or console.get_root() is None:
return f"rm: {args[-1]}: No such file or directory"
# The wrong virus file was deleted
if isinstance(target, VirusFile):
if target.get_number() != console.get_save().get_virus_files()[0] + 1:
console.get_save().increase_speed(target)
return "rm: Incorrect virus file deleted: File moved to new location; New file spawned"
else:
console.get_save().remove_virus(target)
return f"rm: Successful deletion: {target} removed"
else:
removed = __rm_helper(target, recursive)
console.get_current_dir().remove_entry(target)
for entry in removed:
entry.set_parent(console.get_trash())
console.get_trash().add_entries(removed)
def __rm_helper(directory: Directory, recursive: bool = True) -> List[Entry]:
removed = []
for entry in directory.get_entries():
if isinstance(entry, Directory):
if entry.get_size() == 0 or recursive:
removed.append(entry)
elif isinstance(entry, NormalFile):
removed.append(entry)
for entry in removed:
directory.remove_entry(entry)
return removed
def restore(console, args):
if len(args) == 0:
return "usage: restore <file>"
if console.get_current_dir() != console.get_trash():
return "restore: must be in Trash directory"
if len(args) == 1 and args[0] == "*":
args = [entry.get_name() for entry in console.get_trash().get_entries()]
result = []
for entry in args:
file = __dir_arg_parse(console.get_trash(), entry)
if file:
if isinstance(file, NormalFile):
file = file.restore(console.get_root())
console.get_save().restored_file()
result.append(f"{file.get_name()} restored to {str(file)}")
else:
result.append(f"restore: {file.get_name()}: is not a valid file")
else:
result.append(f"restore: {entry}: No such file")
return "\n".join(result)
def trace(console, args):
if len(args) == 0:
return "usage: trace <file(s)>"
if console.is_in_play():
trash = console.get_trash()
result = []
for file in args:
file = __dir_arg_parse(trash, file)
if file:
if isinstance(file, Directory):
result.append(f"trace: {file.get_name()}: Is a directory")
continue
else:
for log in console.get_save().get_deletion_log():
if file.get_name() == log[1].split("/")[-1]:
result.append(log[2])
return "\n".join(result)
def mntr(console, args):
if len(args) != 0:
return "usage: mntr"
save = console.get_save()
log = save.get_deletion_log()
speed = save.get_speed()
result = "last log entry: {}\nspeed: {}s\nvirus files deleted: {}\nfiles deleted by virus: {}"
return result.format(
log[-1][1]
if len(log) != 0
else "None found", speed,
save.get_virus_files()[0], save.get_normal_files()[0])
def track(console, args):
if len(args) == 0:
tracked_files = console.get_save().get_tracked_files()
return "\n".join([
f"{i + 1}: {tracked_files[i]}"
for i in range(len(tracked_files))
if tracked_files[i] is not None
])
elif len(args) % 2 != 0:
return "usage: track [<number> <file> ...]"
target_numbers = []
targets = []
for i in range(0, len(args), 2):
if not args[i].isdigit():
return f"track: {args[i]}: not a number"
target_numbers.append(int(args[i]))
targets.append(args[i + 1])
messages = []
for i in range(len(targets)):
target = targets[i]
target_number = target_numbers[i]
tgt = __dir_arg_parse(console.get_current_dir(), target)
messages.append("track: {}".format(
f"{tgt} tracked"
if tgt is not None
else f"{tgt}: No such file or directory"))
if tgt:
console.get_save().track_virus(target_number, tgt)
return "\n".join(messages)
def tut(_, args):
if len(args) != 0:
return "usage: tut"
return "Type ./tutorial.sh"
def help_command():
return ("ls [directory] -> Lists the specified directory, or the current one if none is given\n" +
"cd [directory] -> Changes the current directory, or moves to the beginning directory if none is given\n" +
"cat <file> -> Prints out the contents of a file\n" +
"rm [-r] [directory OR file] -> Removes a directory or file and moves it to the Trash\n" +
"track [<number> <file> ...] -> Allows you to track a virus file with a number to identify it easier.\n" +
"\tIf nothing is given, it will show you the files you're tracking currently.\n" +
"trace <file> -> (Can only be used in the Trash directory) Allows you to trace where a file was deleted from\n" +
"mntr -> Shows you the most recently deleted file, the speed at which files are deleted by the virus, how\n" +
"\tmany virus files you've deleted, and how many files have been deleted by the virus.\n" +
"restore <file> -> Restores a file to its original location (Can only be used in the Trash directory)\n" +
"help -> Shows this help message!") | 0.620737 | 0.208179 |
import enum
from typing import Any, Iterable, Optional, Tuple, Type, TypeVar, Union
__all__ = [
"Choices",
"Enum",
"auto", # also export auto for convenience
"Switch",
"is_choices",
"is_enum",
"is_optional",
"unwrap_optional",
]
auto = enum.auto
NoneType = type(None)
T = TypeVar('T')
class _Choices:
def __new__(cls, values=None):
self = super().__new__(cls)
self.__values__ = values
return self
def __getitem__(self, values: Union[str, Iterable[str]]):
if isinstance(values, Iterable) and not isinstance(values, str):
parsed_values = tuple(values)
else:
parsed_values = (values,)
if len(parsed_values) == 0:
raise TypeError("Choices must contain at least one element")
return self.__class__(parsed_values)
Choices: Any = _Choices()
class Enum(enum.Enum):
# pylint: disable=no-self-argument, unused-argument
def _generate_next_value_(name, start, count, last_values):
return name.lower()
# pylint: enable=no-self-argument, unused-argument
def __eq__(self, other):
return self.value == other or super().__eq__(other)
# Switch is a type that's different but equivalent to `bool`.
# It is defined as the `Union` of `bool` and a dummy type, because:
# 1. `bool` cannot be sub-typed.
# >> Switch = type('Switch', (bool,), {})
# 2. `Union` with a single (possibly duplicated) type is flattened into that type.
# >> Switch = Union[bool]
# 3. `NewType` forbids implicit casts from `bool`.
# >> Switch = NewType('Switch', bool)
__dummy_type__ = type("__dummy_type__", (), {}) # the names must match for pickle to work
Switch = Union[bool, __dummy_type__] # type: ignore[valid-type]
HAS_LITERAL = False
_Literal = None
try:
from typing import Literal # type: ignore
HAS_LITERAL = True
except ImportError:
try:
from typing_extensions import Literal # type: ignore
try:
from typing_extensions import _Literal # type: ignore # compat. with Python 3.6
except ImportError:
pass
HAS_LITERAL = True
except ImportError:
pass
if HAS_LITERAL:
def is_choices(typ: type) -> bool:
r"""Check whether a type is a choices type (:class:`Choices` or :class:`Literal`). This cannot be checked using
traditional methods, since :class:`Choices` is a metaclass.
"""
return (isinstance(typ, _Choices) or
getattr(typ, '__origin__', None) is Literal or
type(typ) is _Literal) # pylint: disable=unidiomatic-typecheck
def unwrap_choices(typ: type) -> Tuple[str, ...]:
r"""Return the string literals associated with the choices type. Literal type in Python 3.7+ stores the literals
in ``typ.__args__``, but in Python 3.6- it's in ``typ.__values__``.
"""
return typ.__values__ if hasattr(typ, "__values__") else typ.__args__ # type: ignore[attr-defined]
else:
def is_choices(typ: type) -> bool:
r"""Check whether a type is a choices type (:class:`Choices`). This cannot be checked using traditional methods,
since :class:`Choices` is a metaclass.
"""
return isinstance(typ, _Choices)
def unwrap_choices(typ: type) -> Tuple[str, ...]:
r"""Return the string literals associated with the choices type."""
return typ.__values__ # type: ignore[attr-defined]
def is_enum(typ: Any) -> bool:
r"""Check whether a type is an Enum type. Since we're using ``issubclass``, we need to check whether :arg:`typ`
is a type first."""
return isinstance(typ, type) and issubclass(typ, enum.Enum)
def is_optional(typ: type) -> bool:
r"""Check whether a type is `Optional[T]`. `Optional` is internally implemented as `Union` with `type(None)`."""
return getattr(typ, '__origin__', None) is Union and NoneType in typ.__args__ # type: ignore
def unwrap_optional(typ: Type[Optional[T]]) -> Type[T]:
r"""Return the inner type inside an `Optional[T]` type."""
return next(t for t in typ.__args__ if not isinstance(t, NoneType)) # type: ignore | argtyped/custom_types.py | import enum
from typing import Any, Iterable, Optional, Tuple, Type, TypeVar, Union
__all__ = [
"Choices",
"Enum",
"auto", # also export auto for convenience
"Switch",
"is_choices",
"is_enum",
"is_optional",
"unwrap_optional",
]
auto = enum.auto
NoneType = type(None)
T = TypeVar('T')
class _Choices:
def __new__(cls, values=None):
self = super().__new__(cls)
self.__values__ = values
return self
def __getitem__(self, values: Union[str, Iterable[str]]):
if isinstance(values, Iterable) and not isinstance(values, str):
parsed_values = tuple(values)
else:
parsed_values = (values,)
if len(parsed_values) == 0:
raise TypeError("Choices must contain at least one element")
return self.__class__(parsed_values)
Choices: Any = _Choices()
class Enum(enum.Enum):
# pylint: disable=no-self-argument, unused-argument
def _generate_next_value_(name, start, count, last_values):
return name.lower()
# pylint: enable=no-self-argument, unused-argument
def __eq__(self, other):
return self.value == other or super().__eq__(other)
# Switch is a type that's different but equivalent to `bool`.
# It is defined as the `Union` of `bool` and a dummy type, because:
# 1. `bool` cannot be sub-typed.
# >> Switch = type('Switch', (bool,), {})
# 2. `Union` with a single (possibly duplicated) type is flattened into that type.
# >> Switch = Union[bool]
# 3. `NewType` forbids implicit casts from `bool`.
# >> Switch = NewType('Switch', bool)
__dummy_type__ = type("__dummy_type__", (), {}) # the names must match for pickle to work
Switch = Union[bool, __dummy_type__] # type: ignore[valid-type]
HAS_LITERAL = False
_Literal = None
try:
from typing import Literal # type: ignore
HAS_LITERAL = True
except ImportError:
try:
from typing_extensions import Literal # type: ignore
try:
from typing_extensions import _Literal # type: ignore # compat. with Python 3.6
except ImportError:
pass
HAS_LITERAL = True
except ImportError:
pass
if HAS_LITERAL:
def is_choices(typ: type) -> bool:
r"""Check whether a type is a choices type (:class:`Choices` or :class:`Literal`). This cannot be checked using
traditional methods, since :class:`Choices` is a metaclass.
"""
return (isinstance(typ, _Choices) or
getattr(typ, '__origin__', None) is Literal or
type(typ) is _Literal) # pylint: disable=unidiomatic-typecheck
def unwrap_choices(typ: type) -> Tuple[str, ...]:
r"""Return the string literals associated with the choices type. Literal type in Python 3.7+ stores the literals
in ``typ.__args__``, but in Python 3.6- it's in ``typ.__values__``.
"""
return typ.__values__ if hasattr(typ, "__values__") else typ.__args__ # type: ignore[attr-defined]
else:
def is_choices(typ: type) -> bool:
r"""Check whether a type is a choices type (:class:`Choices`). This cannot be checked using traditional methods,
since :class:`Choices` is a metaclass.
"""
return isinstance(typ, _Choices)
def unwrap_choices(typ: type) -> Tuple[str, ...]:
r"""Return the string literals associated with the choices type."""
return typ.__values__ # type: ignore[attr-defined]
def is_enum(typ: Any) -> bool:
r"""Check whether a type is an Enum type. Since we're using ``issubclass``, we need to check whether :arg:`typ`
is a type first."""
return isinstance(typ, type) and issubclass(typ, enum.Enum)
def is_optional(typ: type) -> bool:
r"""Check whether a type is `Optional[T]`. `Optional` is internally implemented as `Union` with `type(None)`."""
return getattr(typ, '__origin__', None) is Union and NoneType in typ.__args__ # type: ignore
def unwrap_optional(typ: Type[Optional[T]]) -> Type[T]:
r"""Return the inner type inside an `Optional[T]` type."""
return next(t for t in typ.__args__ if not isinstance(t, NoneType)) # type: ignore | 0.839438 | 0.376021 |
class Collect:
"""Collect data from the loader relevant to the specific task.
This keeps the items in ``keys`` as it is, and collect items in
``meta_keys`` into a meta item called ``meta_name``.This is usually
the last stage of the data loader pipeline.
For example, when keys='imgs', meta_keys=('filename', 'label',
'original_shape'), meta_name='img_metas', the results will be a dict with
keys 'imgs' and 'img_metas', where 'img_metas' is a DataContainer of
another dict with keys 'filename', 'label', 'original_shape'.
Args:
keys (Sequence[str]): Required keys to be collected.
meta_name (str): The name of the key that contains meta infomation.
This key is always populated. Default: "img_metas".
meta_keys (Sequence[str]): Keys that are collected under meta_name.
The contents of the ``meta_name`` dictionary depends on
``meta_keys``.
By default this includes:
- "filename": path to the image file
- "label": label of the image file
- "original_shape": original shape of the image as a tuple
(h, w, c)
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the
bottom/right, if the batch tensor is larger than this shape.
- "pad_shape": image shape after padding
- "flip_direction": a str in ("horiziontal", "vertival") to
indicate if the image is fliped horizontally or vertically.
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
nested (bool): If set as True, will apply data[x] = [data[x]] to all
items in data. The arg is added for compatibility. Default: False.
"""
def __init__(self,
keys,
meta_keys=('filename', 'label', 'original_shape', 'img_shape',
'pad_shape', 'flip_direction', 'img_norm_cfg'),
meta_name='img_metas',
nested=False):
self.keys = keys
self.meta_keys = meta_keys
self.meta_name = meta_name
self.nested = nested
def __call__(self, results):
"""Performs the Collect formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
data = {}
for key in self.keys:
data[key] = results[key]
if len(self.meta_keys) != 0:
meta = {}
for key in self.meta_keys:
meta[key] = results[key]
data[self.meta_name] = DC(meta, cpu_only=True)
if self.nested:
for k in data:
data[k] = [data[k]]
return data
def __repr__(self):
return (f'{self.__class__.__name__}('
f'keys={self.keys}, meta_keys={self.meta_keys}, '
f'nested={self.nested})') | features_extraction/dataloader/collect.py | class Collect:
"""Collect data from the loader relevant to the specific task.
This keeps the items in ``keys`` as it is, and collect items in
``meta_keys`` into a meta item called ``meta_name``.This is usually
the last stage of the data loader pipeline.
For example, when keys='imgs', meta_keys=('filename', 'label',
'original_shape'), meta_name='img_metas', the results will be a dict with
keys 'imgs' and 'img_metas', where 'img_metas' is a DataContainer of
another dict with keys 'filename', 'label', 'original_shape'.
Args:
keys (Sequence[str]): Required keys to be collected.
meta_name (str): The name of the key that contains meta infomation.
This key is always populated. Default: "img_metas".
meta_keys (Sequence[str]): Keys that are collected under meta_name.
The contents of the ``meta_name`` dictionary depends on
``meta_keys``.
By default this includes:
- "filename": path to the image file
- "label": label of the image file
- "original_shape": original shape of the image as a tuple
(h, w, c)
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the
bottom/right, if the batch tensor is larger than this shape.
- "pad_shape": image shape after padding
- "flip_direction": a str in ("horiziontal", "vertival") to
indicate if the image is fliped horizontally or vertically.
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
nested (bool): If set as True, will apply data[x] = [data[x]] to all
items in data. The arg is added for compatibility. Default: False.
"""
def __init__(self,
keys,
meta_keys=('filename', 'label', 'original_shape', 'img_shape',
'pad_shape', 'flip_direction', 'img_norm_cfg'),
meta_name='img_metas',
nested=False):
self.keys = keys
self.meta_keys = meta_keys
self.meta_name = meta_name
self.nested = nested
def __call__(self, results):
"""Performs the Collect formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
data = {}
for key in self.keys:
data[key] = results[key]
if len(self.meta_keys) != 0:
meta = {}
for key in self.meta_keys:
meta[key] = results[key]
data[self.meta_name] = DC(meta, cpu_only=True)
if self.nested:
for k in data:
data[k] = [data[k]]
return data
def __repr__(self):
return (f'{self.__class__.__name__}('
f'keys={self.keys}, meta_keys={self.meta_keys}, '
f'nested={self.nested})') | 0.931127 | 0.68875 |
import gc
from itertools import chain
from random import sample
from nltk import ngrams
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import train_test_split
from pathlib import Path
import SQLite_handler
from joblib import dump, load
from my_weapon import *
from myclf import *
from Trump_Clinton_Classifer.TwProcess import CustomTweetTokenizer
from Trump_Clinton_Classifer.TwSentiment import (bag_of_words,
bag_of_words_and_bigrams)
class Fake_Classifer(object):
def __init__(self):
self.MAP_LABELS = {
"0": "fake",
"1": "extreme bias (right)",
"2": "right",
"3": "right leaning",
"4": "center",
"5": "left leaning",
"6": "left",
"7": "extreme bias (left)"
}
def get_train_data(self):
"""
获取训练文本
"""
print("loading all tweets_csv ...")
all_tweets = pd.read_csv("disk/all-tweets.csv", dtype=str, usecols=["tweet_id", "c_alex"])
print("finished!")
labels = [
"fake",
"extreme bias (right)",
"right",
"right leaning",
"center",
"left leaning",
"left",
"extreme bias (left)"
]
for label in labels:
print(label, "...")
tweets_id = all_tweets[all_tweets["c_alex"] == label].tweet_id
rst = SQLite_handler.find_tweets(tweets_id)
print(len(rst))
with open("disk/train_data_fake/{}.txt".format(label), "w") as f:
for d in rst:
if "text" not in d:
continue
elif d["text"].startswith("RT"):
continue
f.write(d["text"] + "\n")
def get_tokens(self):
"""
text > tokens
"""
tokenizer = CustomTweetTokenizer()
labels = [
"fake",
"extreme bias (right)",
"right",
"right leaning",
"center",
"left leaning",
"left",
"extreme bias (left)"
]
for label in labels:
print(label, "...")
with open("disk/tokens_fake/{}.txt".format(label), "w") as f:
for line in open("disk/train_data_fake/{}.txt".format(label)):
words = tokenizer.tokenize(line.strip())
if len(words) > 0:
f.write(" ".join(words) + "\n")
def train(self):
"""
fake, non-fake
fake, left, center, right √ 优先
left, center, right
"""
# read data
X = []
y = []
labels = [
"fake",
"extreme bias (right)",
"right",
"right leaning",
"center",
"left leaning",
"left",
"extreme bias (left)"
]
w_of_categories = [[], [], [], []]
for label in labels:
print(label, "...")
if label == "fake":
y_i = 0
elif label in ["extreme bias (right)", "right", "right leaning"]:
y_i = 1
elif label == "center":
y_i = 2
elif label in ["extreme bias (left)", "left", "left leaning"]:
y_i = 3
for i, line in enumerate(open("disk/tokens_fake/{}.txt".format(label))):
w = line.strip().split(" ")
if len(w) > 0 and w[0] != "RT":
w_of_categories[y_i].append(w)
# X.append(bag_of_words_and_bigrams(w))
# # print(X[-1])
# y.append(y_i)
for i in range(len(w_of_categories)):
print("len of category:", len(w_of_categories[i]))
w_of_categories[i] = sample(w_of_categories[i], 1000000)
for w in w_of_categories[i]:
X.append(bag_of_words_and_bigrams(w))
y.append(i)
print("Reading data finished! count:", len(y))
# split train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
print("Splitting data finished!")
# build one hot embedding
v = DictVectorizer(dtype=np.int8, sparse=True, sort=False)
X_train = v.fit_transform(X_train)
X_test = v.transform(X_test)
dump(v, 'model/20190415-DictVectorizer.joblib')
print("Building word embedding finished!")
print(X_train[0].shape, X_train[1].shape)
print(X_train.shape, X_test.shape)
# machine learning model
list_classifiers = ['LR']
# list_classifiers = ['LR', 'NB', 'SVC']
# list_classifiers = ['GBDT']
classifiers = {
'NB': naive_bayes_classifier,
'KNN': knn_classifier,
'LR': logistic_regression_classifier,
'RF': random_forest_classifier,
'DT': decision_tree_classifier,
'SVM': svm_classifier,
'SVMCV': svm_cross_validation,
'GBDT': gradient_boosting_classifier,
'SVC': svm_linear_classifier,
}
for classifier in list_classifiers:
print('******************* {} ********************'.format(classifier))
if classifier == "LR":
clf = LogisticRegression(penalty='l2', multi_class="multinomial", solver="sag", max_iter=10e8)
clf.fit(X_train, y_train)
elif classifier == "GBDT":
clf = GradientBoostingClassifier(learning_rate=0.1, max_depth=3)
clf.fit(X_train, y_train)
else:
clf = classifiers[classifier](X_train, y_train)
# print("fitting finished! Lets evaluate!")
self.evaluate(clf, X_train, y_train, X_test, y_test)
dump(clf, 'model/20190415-{}.joblib'.format(classifier))
def evaluate(self, clf, X_train, y_train, X_test, y_test):
# CV
print('accuracy of CV=10:', cross_val_score(clf, X_train, y_train, cv=5).mean())
# 模型评估
y_pred = clf.predict(X_test)
print(classification_report(y_test, y_pred))
def predict(self):
tokenizer = CustomTweetTokenizer()
v = load('model/20190415-DictVectorizer.joblib')
clf = load('model/20190415-LR.joblib')
ele_tweets = pd.read_csv('data/ira-tweets-ele.csv', dtype=str)
X = []
uids = []
batch_size = 1000
with open("data/ira_predicted_tweets.txt", "w") as f:
for i, row in tqdm(ele_tweets.iterrows()):
uids.append(row["userid"])
text = row["tweet_text"].replace("\n", " ").replace("\t", " ")
words = bag_of_words_and_bigrams(tokenizer.tokenize(text))
X.append(words)
if len(X) >= batch_size:
# print(X)
X = v.transform(X)
y = clf.predict(X)
for i in range(len(y)):
f.write("{},{}\n".format(uids[i], y[i]))
X = []
uids = []
if __name__ == "__main__":
Lebron = Fake_Classifer()
# Lebron.get_train_data()
# Lebron.get_tokens()
# Lebron.train()
Lebron.predict() | fake_classfier.py |
import gc
from itertools import chain
from random import sample
from nltk import ngrams
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import train_test_split
from pathlib import Path
import SQLite_handler
from joblib import dump, load
from my_weapon import *
from myclf import *
from Trump_Clinton_Classifer.TwProcess import CustomTweetTokenizer
from Trump_Clinton_Classifer.TwSentiment import (bag_of_words,
bag_of_words_and_bigrams)
class Fake_Classifer(object):
def __init__(self):
self.MAP_LABELS = {
"0": "fake",
"1": "extreme bias (right)",
"2": "right",
"3": "right leaning",
"4": "center",
"5": "left leaning",
"6": "left",
"7": "extreme bias (left)"
}
def get_train_data(self):
"""
获取训练文本
"""
print("loading all tweets_csv ...")
all_tweets = pd.read_csv("disk/all-tweets.csv", dtype=str, usecols=["tweet_id", "c_alex"])
print("finished!")
labels = [
"fake",
"extreme bias (right)",
"right",
"right leaning",
"center",
"left leaning",
"left",
"extreme bias (left)"
]
for label in labels:
print(label, "...")
tweets_id = all_tweets[all_tweets["c_alex"] == label].tweet_id
rst = SQLite_handler.find_tweets(tweets_id)
print(len(rst))
with open("disk/train_data_fake/{}.txt".format(label), "w") as f:
for d in rst:
if "text" not in d:
continue
elif d["text"].startswith("RT"):
continue
f.write(d["text"] + "\n")
def get_tokens(self):
"""
text > tokens
"""
tokenizer = CustomTweetTokenizer()
labels = [
"fake",
"extreme bias (right)",
"right",
"right leaning",
"center",
"left leaning",
"left",
"extreme bias (left)"
]
for label in labels:
print(label, "...")
with open("disk/tokens_fake/{}.txt".format(label), "w") as f:
for line in open("disk/train_data_fake/{}.txt".format(label)):
words = tokenizer.tokenize(line.strip())
if len(words) > 0:
f.write(" ".join(words) + "\n")
def train(self):
"""
fake, non-fake
fake, left, center, right √ 优先
left, center, right
"""
# read data
X = []
y = []
labels = [
"fake",
"extreme bias (right)",
"right",
"right leaning",
"center",
"left leaning",
"left",
"extreme bias (left)"
]
w_of_categories = [[], [], [], []]
for label in labels:
print(label, "...")
if label == "fake":
y_i = 0
elif label in ["extreme bias (right)", "right", "right leaning"]:
y_i = 1
elif label == "center":
y_i = 2
elif label in ["extreme bias (left)", "left", "left leaning"]:
y_i = 3
for i, line in enumerate(open("disk/tokens_fake/{}.txt".format(label))):
w = line.strip().split(" ")
if len(w) > 0 and w[0] != "RT":
w_of_categories[y_i].append(w)
# X.append(bag_of_words_and_bigrams(w))
# # print(X[-1])
# y.append(y_i)
for i in range(len(w_of_categories)):
print("len of category:", len(w_of_categories[i]))
w_of_categories[i] = sample(w_of_categories[i], 1000000)
for w in w_of_categories[i]:
X.append(bag_of_words_and_bigrams(w))
y.append(i)
print("Reading data finished! count:", len(y))
# split train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
print("Splitting data finished!")
# build one hot embedding
v = DictVectorizer(dtype=np.int8, sparse=True, sort=False)
X_train = v.fit_transform(X_train)
X_test = v.transform(X_test)
dump(v, 'model/20190415-DictVectorizer.joblib')
print("Building word embedding finished!")
print(X_train[0].shape, X_train[1].shape)
print(X_train.shape, X_test.shape)
# machine learning model
list_classifiers = ['LR']
# list_classifiers = ['LR', 'NB', 'SVC']
# list_classifiers = ['GBDT']
classifiers = {
'NB': naive_bayes_classifier,
'KNN': knn_classifier,
'LR': logistic_regression_classifier,
'RF': random_forest_classifier,
'DT': decision_tree_classifier,
'SVM': svm_classifier,
'SVMCV': svm_cross_validation,
'GBDT': gradient_boosting_classifier,
'SVC': svm_linear_classifier,
}
for classifier in list_classifiers:
print('******************* {} ********************'.format(classifier))
if classifier == "LR":
clf = LogisticRegression(penalty='l2', multi_class="multinomial", solver="sag", max_iter=10e8)
clf.fit(X_train, y_train)
elif classifier == "GBDT":
clf = GradientBoostingClassifier(learning_rate=0.1, max_depth=3)
clf.fit(X_train, y_train)
else:
clf = classifiers[classifier](X_train, y_train)
# print("fitting finished! Lets evaluate!")
self.evaluate(clf, X_train, y_train, X_test, y_test)
dump(clf, 'model/20190415-{}.joblib'.format(classifier))
def evaluate(self, clf, X_train, y_train, X_test, y_test):
# CV
print('accuracy of CV=10:', cross_val_score(clf, X_train, y_train, cv=5).mean())
# 模型评估
y_pred = clf.predict(X_test)
print(classification_report(y_test, y_pred))
def predict(self):
tokenizer = CustomTweetTokenizer()
v = load('model/20190415-DictVectorizer.joblib')
clf = load('model/20190415-LR.joblib')
ele_tweets = pd.read_csv('data/ira-tweets-ele.csv', dtype=str)
X = []
uids = []
batch_size = 1000
with open("data/ira_predicted_tweets.txt", "w") as f:
for i, row in tqdm(ele_tweets.iterrows()):
uids.append(row["userid"])
text = row["tweet_text"].replace("\n", " ").replace("\t", " ")
words = bag_of_words_and_bigrams(tokenizer.tokenize(text))
X.append(words)
if len(X) >= batch_size:
# print(X)
X = v.transform(X)
y = clf.predict(X)
for i in range(len(y)):
f.write("{},{}\n".format(uids[i], y[i]))
X = []
uids = []
if __name__ == "__main__":
Lebron = Fake_Classifer()
# Lebron.get_train_data()
# Lebron.get_tokens()
# Lebron.train()
Lebron.predict() | 0.441673 | 0.26943 |
from datetime import datetime
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_text
from django.views.debug import get_exception_reporter_filter
import logging
import os
from unidecode import unidecode
class BaseHandler(logging.Handler):
def get_request_repr(self, record):
try:
request = record.request
exception_reporter_filter = get_exception_reporter_filter(request)
request_repr = force_text(exception_reporter_filter.get_request_repr(request))
except Exception, e:
request_repr = None
return request_repr
def get_user(self, record, default = 'AnonymousUser'):
if hasattr(record, 'user'):
user = record.user
elif hasattr(record, 'request') and hasattr(record.request, 'user'):
user = record.request.user
else:
user = None
return unicode(user) if user else unicode(default)
def get_extra_info(self, record):
if hasattr(record, 'extra_info'):
if type(record.extra_info) in (tuple, list):
extra_info = "\n".join([unicode(obj) for obj in record.extra_info])
elif type(record.extra_info) in (dict, SortedDict):
extra_info = ""
if 'list' in record.extra_info:
extra_info = "\n".join(record.extra_info.pop('list')) + "\n\n"
extra_info += "\n".join(["%s: %s" % (key, val) for key, val in record.extra_info.iteritems()])
else:
extra_info = unicode(record.extra_info)
else:
extra_info = None
return extra_info
def get_name(self, record):
return u"[%s] %s" % (record.levelname, record.getMessage())
def get_message(self, record):
self.format(record)
exc_text = getattr(record, 'exc_text', None)
extra_info = self.get_extra_info(record)
request_repr = self.get_request_repr(record)
return "\n\n".join([unidecode(text) for text in [exc_text, extra_info, request_repr] if text]) or ""
class ConsoleHandler(BaseHandler):
def emit(self, record):
line = "\n%s\n" % ("#" * 72)
print line
print self.get_name(record)
print u"User: %s" % self.get_user(record)
print u"Time: %s" % str(datetime.now())
print ""
print self.get_message(record)
print line
class FileHandler(BaseHandler):
def emit(self, record):
now = datetime.now()
date = now.strftime("%Y-%m-%d")
time = now.strftime("%Y-%m-%d %H:%M:%S")
filename = "%s.txt" % date
file_path = os.path.join(settings.CONGO_LOG_ROOT, filename)
header = u"### %s \n%s (%s)\n\n" % (time, self.get_name(record), self.get_user(record))
content = u"%s\n\n" % self.get_message(record)
try:
f = open(file_path, 'a')
f.write(header.encode('utf8'))
f.write(content.encode('utf8'))
f.close()
except:
pass
class DataBaseHandler(BaseHandler):
def emit(self, record):
model_name = settings.CONGO_LOG_MODEL
if not model_name:
raise ImproperlyConfigured("In order to use Log model, configure settings.CONGO_LOG_MODEL first.")
model = apps.get_model(*model_name.split('.', 1))
try:
log = model(name = record.name, level = record.levelno, user = self.get_user(record), message = record.getMessage(), args = self.get_message(record))
log.save()
except:
pass | congo/maintenance/logs/handlers.py | from datetime import datetime
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_text
from django.views.debug import get_exception_reporter_filter
import logging
import os
from unidecode import unidecode
class BaseHandler(logging.Handler):
def get_request_repr(self, record):
try:
request = record.request
exception_reporter_filter = get_exception_reporter_filter(request)
request_repr = force_text(exception_reporter_filter.get_request_repr(request))
except Exception, e:
request_repr = None
return request_repr
def get_user(self, record, default = 'AnonymousUser'):
if hasattr(record, 'user'):
user = record.user
elif hasattr(record, 'request') and hasattr(record.request, 'user'):
user = record.request.user
else:
user = None
return unicode(user) if user else unicode(default)
def get_extra_info(self, record):
if hasattr(record, 'extra_info'):
if type(record.extra_info) in (tuple, list):
extra_info = "\n".join([unicode(obj) for obj in record.extra_info])
elif type(record.extra_info) in (dict, SortedDict):
extra_info = ""
if 'list' in record.extra_info:
extra_info = "\n".join(record.extra_info.pop('list')) + "\n\n"
extra_info += "\n".join(["%s: %s" % (key, val) for key, val in record.extra_info.iteritems()])
else:
extra_info = unicode(record.extra_info)
else:
extra_info = None
return extra_info
def get_name(self, record):
return u"[%s] %s" % (record.levelname, record.getMessage())
def get_message(self, record):
self.format(record)
exc_text = getattr(record, 'exc_text', None)
extra_info = self.get_extra_info(record)
request_repr = self.get_request_repr(record)
return "\n\n".join([unidecode(text) for text in [exc_text, extra_info, request_repr] if text]) or ""
class ConsoleHandler(BaseHandler):
def emit(self, record):
line = "\n%s\n" % ("#" * 72)
print line
print self.get_name(record)
print u"User: %s" % self.get_user(record)
print u"Time: %s" % str(datetime.now())
print ""
print self.get_message(record)
print line
class FileHandler(BaseHandler):
def emit(self, record):
now = datetime.now()
date = now.strftime("%Y-%m-%d")
time = now.strftime("%Y-%m-%d %H:%M:%S")
filename = "%s.txt" % date
file_path = os.path.join(settings.CONGO_LOG_ROOT, filename)
header = u"### %s \n%s (%s)\n\n" % (time, self.get_name(record), self.get_user(record))
content = u"%s\n\n" % self.get_message(record)
try:
f = open(file_path, 'a')
f.write(header.encode('utf8'))
f.write(content.encode('utf8'))
f.close()
except:
pass
class DataBaseHandler(BaseHandler):
def emit(self, record):
model_name = settings.CONGO_LOG_MODEL
if not model_name:
raise ImproperlyConfigured("In order to use Log model, configure settings.CONGO_LOG_MODEL first.")
model = apps.get_model(*model_name.split('.', 1))
try:
log = model(name = record.name, level = record.levelno, user = self.get_user(record), message = record.getMessage(), args = self.get_message(record))
log.save()
except:
pass | 0.171061 | 0.046877 |
import bpy
class OrderByX():
def __init__(self):
"""
This will search all of the objects in the room, and apply a suffix to objects with a specified name in ascending order, E.G name.000, name.001.
This order is determined by their x coordinate (lower number = lower suffix), so things will need to be ordered along the x axis.
This could also be reworked quite easily to change the starting name of the object, essentially turning this code into a renaming script:
E.G find all objects with "button" at the start of their name, and change them to "key"
"""
name = "CDPKey." # The name of the objects you want to find.
# # There should be nothing before the name, but the suffix doesn't matter
nameLen = len(name) # Length of the name, will be used to check if an object should have its name modified.
allObjects = bpy.data.objects
position = lambda sort: sort[1] # In the 2D array, return the second entry, which is the position in the X axis.
# Also, my IDE telling me "do not assign a lambda expression just use a def lol" is going to make me explode with anger heck you PEP 8 I want readability
objectNum = len(allObjects)
sortList = [] # Build the list to be the max amount of objects. This will always leave unused values, but will be easier on the CPU.
j = 0
for i in range(objectNum):
doesObjectNameMatch = allObjects[i].name[:nameLen] == name # This will only impact objects that start with the name variable, above.
if(doesObjectNameMatch): # If this is one of the key objects designed to be ordered...
sortList.append([])
sortList[j].append(allObjects[i])
sortList[j].append(allObjects[i].location[0])
j += 1
# Sort into order, from -x to +x (Based on object origin)
sortList.sort(key = position) # Sort based on the X. Higher (positive) X will return a higher position.
toSortNumber = len(sortList)
# Rename all of the objects based on the sorted list.
for i in range(toSortNumber):
number = "00" + str(i)
sortList[i][0].name = name + number[-3:]
# Align all object origins along the Y axis.
targetYPosition = 2.8 / 100
for i in range(toSortNumber):
obj = sortList[i][0]
objectPosition = obj.location
distanceFromY = targetYPosition - objectPosition[1]
obj.location = ([objectPosition[0], targetYPosition, objectPosition[2]]) # Move the whole object (origin) to the desired spot.
OrderByX() | Scripts/OrderByX.py | import bpy
class OrderByX():
def __init__(self):
"""
This will search all of the objects in the room, and apply a suffix to objects with a specified name in ascending order, E.G name.000, name.001.
This order is determined by their x coordinate (lower number = lower suffix), so things will need to be ordered along the x axis.
This could also be reworked quite easily to change the starting name of the object, essentially turning this code into a renaming script:
E.G find all objects with "button" at the start of their name, and change them to "key"
"""
name = "CDPKey." # The name of the objects you want to find.
# # There should be nothing before the name, but the suffix doesn't matter
nameLen = len(name) # Length of the name, will be used to check if an object should have its name modified.
allObjects = bpy.data.objects
position = lambda sort: sort[1] # In the 2D array, return the second entry, which is the position in the X axis.
# Also, my IDE telling me "do not assign a lambda expression just use a def lol" is going to make me explode with anger heck you PEP 8 I want readability
objectNum = len(allObjects)
sortList = [] # Build the list to be the max amount of objects. This will always leave unused values, but will be easier on the CPU.
j = 0
for i in range(objectNum):
doesObjectNameMatch = allObjects[i].name[:nameLen] == name # This will only impact objects that start with the name variable, above.
if(doesObjectNameMatch): # If this is one of the key objects designed to be ordered...
sortList.append([])
sortList[j].append(allObjects[i])
sortList[j].append(allObjects[i].location[0])
j += 1
# Sort into order, from -x to +x (Based on object origin)
sortList.sort(key = position) # Sort based on the X. Higher (positive) X will return a higher position.
toSortNumber = len(sortList)
# Rename all of the objects based on the sorted list.
for i in range(toSortNumber):
number = "00" + str(i)
sortList[i][0].name = name + number[-3:]
# Align all object origins along the Y axis.
targetYPosition = 2.8 / 100
for i in range(toSortNumber):
obj = sortList[i][0]
objectPosition = obj.location
distanceFromY = targetYPosition - objectPosition[1]
obj.location = ([objectPosition[0], targetYPosition, objectPosition[2]]) # Move the whole object (origin) to the desired spot.
OrderByX() | 0.177882 | 0.621024 |
from bs4 import BeautifulSoup
import pytest
def test_home_page(test_client):
response = test_client.get('/')
assert response.status_code == 200
text_list = ['RCS Gugulethu AC', 'Home', 'Search Runner',
'Search Race', 'Predict Race Time', 'Login']
text_list_bytes = [str.encode(x) for x in text_list]
assert all(x in response.data for x in text_list_bytes)
def test_valid_login_logout(test_client, init_database):
data = {'email': '<EMAIL>',
'password': '<PASSWORD>',
'follow_redirects': True}
response = test_client.post('/login', data=data, follow_redirects=True)
text_list = ['Welcome, Some User', 'Home', 'Search Runner',
'Search Race', 'Predict Race Time', 'Logout']
text_list_bytes = [str.encode(x) for x in text_list]
assert all(x in response.data for x in text_list_bytes)
response = test_client.get('/logout', follow_redirects=True)
assert response.status_code == 200
assert b'Login' in response.data
@pytest.mark.parametrize('name', ['<NAME>', '<NAME>'])
def test_runner_race_search(test_client, init_database, name):
data = {'select': 'Runner Name', 'search': f'{name}'}
response = test_client.post('/runner_race_search',
data=data,
follow_redirects=True)
assert response.status_code == 200
assert b'Search Results - GUGS DB' in response.data
result_names = []
soup = BeautifulSoup(response.data, 'html.parser')
for row in soup.findAll('table')[0].tbody.findAll('tr'):
result_names.append(row.findAll('td')[1].contents[0])
assert all(x == result_names[0] for x in result_names)
# TODO Add a parametrize fixture here
@pytest.mark.parametrize('gender', ['male', 'female'])
def test_top_runners(test_client, init_database, gender):
data = {'select': f'{gender}', 'search': 'peninsula', 'n': 10}
response = test_client.post('/top_runners_search',
data=data,
follow_redirects=True)
assert response.status_code == 200
assert b'Search Results - GUGS DB' in response.data
soup = BeautifulSoup(response.data, 'html.parser')
assert len(soup.findAll('table')[0].tbody.findAll('tr')) == data['n']
@pytest.mark.parametrize('name', ['<NAME>', '<NAME>'])
def test_prediction(test_client, init_database, name):
data = {'search': name}
response = test_client.post('/predict', data=data, follow_redirects=True)
assert response.status_code == 200
assert str.encode('results for {}'.format(name.lower())) in response.data.lower() | tests/test_wsgi.py | from bs4 import BeautifulSoup
import pytest
def test_home_page(test_client):
response = test_client.get('/')
assert response.status_code == 200
text_list = ['RCS Gugulethu AC', 'Home', 'Search Runner',
'Search Race', 'Predict Race Time', 'Login']
text_list_bytes = [str.encode(x) for x in text_list]
assert all(x in response.data for x in text_list_bytes)
def test_valid_login_logout(test_client, init_database):
data = {'email': '<EMAIL>',
'password': '<PASSWORD>',
'follow_redirects': True}
response = test_client.post('/login', data=data, follow_redirects=True)
text_list = ['Welcome, Some User', 'Home', 'Search Runner',
'Search Race', 'Predict Race Time', 'Logout']
text_list_bytes = [str.encode(x) for x in text_list]
assert all(x in response.data for x in text_list_bytes)
response = test_client.get('/logout', follow_redirects=True)
assert response.status_code == 200
assert b'Login' in response.data
@pytest.mark.parametrize('name', ['<NAME>', '<NAME>'])
def test_runner_race_search(test_client, init_database, name):
data = {'select': 'Runner Name', 'search': f'{name}'}
response = test_client.post('/runner_race_search',
data=data,
follow_redirects=True)
assert response.status_code == 200
assert b'Search Results - GUGS DB' in response.data
result_names = []
soup = BeautifulSoup(response.data, 'html.parser')
for row in soup.findAll('table')[0].tbody.findAll('tr'):
result_names.append(row.findAll('td')[1].contents[0])
assert all(x == result_names[0] for x in result_names)
# TODO Add a parametrize fixture here
@pytest.mark.parametrize('gender', ['male', 'female'])
def test_top_runners(test_client, init_database, gender):
data = {'select': f'{gender}', 'search': 'peninsula', 'n': 10}
response = test_client.post('/top_runners_search',
data=data,
follow_redirects=True)
assert response.status_code == 200
assert b'Search Results - GUGS DB' in response.data
soup = BeautifulSoup(response.data, 'html.parser')
assert len(soup.findAll('table')[0].tbody.findAll('tr')) == data['n']
@pytest.mark.parametrize('name', ['<NAME>', '<NAME>'])
def test_prediction(test_client, init_database, name):
data = {'search': name}
response = test_client.post('/predict', data=data, follow_redirects=True)
assert response.status_code == 200
assert str.encode('results for {}'.format(name.lower())) in response.data.lower() | 0.259638 | 0.391813 |
import pickle as pkl
import numpy as np
import matplotlib.pyplot as plt
import random
from tqdm import tqdm
import argparse
from typing import List
from json import JSONEncoder, dumps
from wikipediaapi import Wikipedia
class Paragraph:
def __init__(self,
context: str):
self.context = context
self.qas = []
class Article:
def __init__(self,
title: str,
paragraphs: List[str],
oldid: str):
self.title = title
self.paragraphs = [Paragraph(p) for p in paragraphs]
self.oldid = oldid
class Dataset:
class CustomEncoder(JSONEncoder):
def default(self, o):
return o.__dict__
def __init__(self,
articles: List[Article],
version: str = 'frenchqa_1.0'):
self.data = articles
self.version = version
def to_json(self):
return dumps(self, indent=4, cls=self.CustomEncoder)
def check_number_paragraphs(article_stats, min_len_paragraphs=500, max_len_paragraphs=1000):
if article_stats['total_text_length'] < min_len_paragraphs:
return []
# We do flatten the para by section list that is composed of a list of section, and a section being a list
# of paragraphs. This list by section was done this way to also study context length by section
flatten_section = [para for section in article_stats['paragraph_length_by_sections'] for para in section]
all_paras = article_stats['paragraph_length_by_summary'] + flatten_section
all_paras_filtered = [para for para in all_paras if para >= min_len_paragraphs and para < max_len_paragraphs]
return all_paras_filtered
def get_number_paragraphs(stats_all_articles, min_len_paragraphs=500):
nb_paragraphs = [len(check_number_paragraphs(article_stats, min_len_paragraphs)) for article_stats in
stats_all_articles.values()]
return nb_paragraphs
def compute_min_len_paras_on_dic(article_stats, min_len_paragraphs=500, max_len_paragraphs=1000):
article_stats['paras'] = check_number_paragraphs(article_stats, min_len_paragraphs, max_len_paragraphs)
return article_stats
def filter_article_by_categories(article_stats, draft=False, homonym=False):
try:
if article_stats['homonym_in_category'] == homonym:
if article_stats['draft_in_category'] == draft:
return True
return False
except:
return False
def filter_dic(stats, min_len_paragraphs=500, draft=False, homonym=False, max_len_paragraphs=1000):
filtered_dic = {filename: check_number_paragraphs(stats[filename], min_len_paragraphs, max_len_paragraphs) for
filename in stats if filter_article_by_categories(stats[filename], draft, homonym)}
return filtered_dic
def print_para_if_max(stats, max_para_len=8000):
for filename in stats:
for para in stats[filename]:
if para > max_para_len:
print(filename, para)
def filter_min_paras(stats_with_para_len, min_nb_paras):
filtered_dic = {filename: stats_with_para_len[filename] for filename in stats_with_para_len if
len(stats_with_para_len[filename]) >= min_nb_paras}
return filtered_dic
def get_section_text(section, level=1):
s_text = ''
for s in section.sections:
s_text += s.text
s_text += '\n' + get_section_text(s, level + 1)
return s_text
def filter_years_articles(page_pkl_fn):
# If 'Evenements' is in sections title, then it means it is a year article.
with open(page_pkl_fn, 'rb') as f:
page = pkl.load(f)
for section in page.sections:
if section.title in ['Événements']:
return False
return True
def get_section_paragraphs_text(page_pkl_fn, min_len_para=500, max_len_para=1000, wiki_path=None, html_path=None):
if wiki_path is None:
wiki_path = ''
with open(wiki_path + '/' + page_pkl_fn, 'rb') as f:
page = pkl.load(f)
if html_path is not None:
with open(html_path + '/' + page_pkl_fn, 'rb') as f:
page_html = pkl.load(f)
paragraphs = [paragraph for paragraph in page.summary.split('\n') if
len(paragraph) >= min_len_para and len(paragraph) < max_len_para]
for i, section in enumerate(page.sections):
if section.title in ['Voir aussi', 'Articles connexes', 'Liens externes', 'Notes et références']:
break
# We check if the section contains lists
if html_path is not None and '<li>' in page_html.sections[i].text:
current_section_text = ''
new_html_section_text = get_section_text(page_html.sections[i])
else:
current_section_text = section.text + '\n'
new_html_section_text = current_section_text
if html_path is not None and '<li>' in new_html_section_text:
new_section_text = ''
else:
new_section_text = get_section_text(section)
section_text = current_section_text + new_section_text
for paragraph in section_text.split('\n'):
if len(paragraph) >= min_len_para and len(paragraph) < max_len_para:
paragraphs.append(paragraph)
return paragraphs
def get_filtered_complete_dic(pkl_with_stats_fn, min_paragraphs=5, min_len_paragraphs=500, max_len_paragraphs=1000,
draft=False, homonym=False, years=False, wiki_path=None, clean_duplicates=False):
with open(pkl_with_stats_fn, 'rb') as f:
stats_uncleaned = pkl.load(f)
# We filter out the sections errors
stats = {key: stats_uncleaned[key] for key in stats_uncleaned if stats_uncleaned[key] != 'SectionError'}
filtered_stats = filter_dic(stats, min_len_paragraphs=min_len_paragraphs, draft=draft, homonym=homonym,
max_len_paragraphs=max_len_paragraphs)
filtered_stats = filter_min_paras(filtered_stats, min_paragraphs)
# We filter the years
if clean_duplicates:
if wiki_path is None:
print("Error : give a wikipath for duplicates cleaning")
return
new_ft_stats = {}
wiki_obj = Wikipedia('fr')
for filename, stats in filtered_stats.items():
try:
with open(wiki_path + '/' + filename, 'rb') as f:
page = pkl.load(f)
except FileNotFoundError:
print("Not found :" + filename)
continue
page_info = wiki_obj.info(page)
new_title = title = page_info.title
new_title = new_title.replace(' ', '_')
new_title += '.pkl'
new_ft_stats[new_title] = stats
filtered_stats = new_ft_stats
if not years:
print("Length before year fitering :", len(filtered_stats))
if wiki_path is None:
filtered_stats = {filename: filtered_stats[filename] for filename in filtered_stats if
filter_years_articles(filename)}
else:
filtered_stats = {filename: filtered_stats[filename] for filename in filtered_stats if
filter_years_articles(wiki_path + filename)}
print("Final length : ", len(filtered_stats))
return filtered_stats
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--pkl_stats_dic_fn", default=None, type=str, required=True,
help="Pkl file where the stats are already dumped")
parser.add_argument("--output_json_article_fn", default=None, type=str, required=True,
help="output_json_article_fn")
parser.add_argument("--min_paragraphs", default=5, type=int, required=False,
help="Minimum number of paragraphs per article")
parser.add_argument("--min_len_paragraphs", default=500, type=int, required=False,
help="Minimum len of paragraphs")
parser.add_argument("--max_len_paragraphs", default=1000, type=int, required=False,
help="Max len of paragraphs")
parser.add_argument("--nb_articles_to_print", default=None, type=int, required=False,
help="Number of articles to print if output_json_article_fn is not None")
parser.add_argument("--wiki_path", default=None, type=str, required=True,
help="Path to where the wiki pages are saved")
parser.add_argument("--html_path", default=None, type=str, required=True,
help="Path to where the html pages are saved")
args = parser.parse_args()
stats = get_filtered_complete_dic(args.pkl_stats_dic_fn, min_paragraphs=args.min_paragraphs,
min_len_paragraphs=args.min_len_paragraphs,
max_len_paragraphs=args.max_len_paragraphs, draft=False, homonym=False,
years=True, wiki_path=args.wiki_path, clean_duplicates=False)
if args.output_json_article_fn is not None:
articles_filename = list(stats.keys())
random.shuffle(articles_filename)
if args.nb_articles_to_print is not None:
articles_filename = articles_filename[:args.nb_articles_to_print]
articles_list = []
for article_fn in tqdm(articles_filename):
try:
paragraphs = get_section_paragraphs_text(article_fn, min_len_para=args.min_len_paragraphs,
max_len_para=args.max_len_paragraphs, wiki_path=args.wiki_path,
html_path=args.html_path)
filename = article_fn.split('/')[-1]
except FileNotFoundError:
continue
# File may have been deleted already because it was a duplicate
with open(args.wiki_path + '/' + filename, 'rb') as f:
page = pkl.load(f)
filename = filename.replace('_', ' ')
filename = filename.replace('.pkl', '')
articles_list.append(Article(filename, paragraphs, oldid=str(page.lastrevid)))
dataset = Dataset(articles_list)
with open(args.output_json_article_fn, 'w') as f:
f.write(dataset.to_json())
if __name__ == "__main__":
main() | wiki-preparation/stats_analysis_results.py | import pickle as pkl
import numpy as np
import matplotlib.pyplot as plt
import random
from tqdm import tqdm
import argparse
from typing import List
from json import JSONEncoder, dumps
from wikipediaapi import Wikipedia
class Paragraph:
def __init__(self,
context: str):
self.context = context
self.qas = []
class Article:
def __init__(self,
title: str,
paragraphs: List[str],
oldid: str):
self.title = title
self.paragraphs = [Paragraph(p) for p in paragraphs]
self.oldid = oldid
class Dataset:
class CustomEncoder(JSONEncoder):
def default(self, o):
return o.__dict__
def __init__(self,
articles: List[Article],
version: str = 'frenchqa_1.0'):
self.data = articles
self.version = version
def to_json(self):
return dumps(self, indent=4, cls=self.CustomEncoder)
def check_number_paragraphs(article_stats, min_len_paragraphs=500, max_len_paragraphs=1000):
if article_stats['total_text_length'] < min_len_paragraphs:
return []
# We do flatten the para by section list that is composed of a list of section, and a section being a list
# of paragraphs. This list by section was done this way to also study context length by section
flatten_section = [para for section in article_stats['paragraph_length_by_sections'] for para in section]
all_paras = article_stats['paragraph_length_by_summary'] + flatten_section
all_paras_filtered = [para for para in all_paras if para >= min_len_paragraphs and para < max_len_paragraphs]
return all_paras_filtered
def get_number_paragraphs(stats_all_articles, min_len_paragraphs=500):
nb_paragraphs = [len(check_number_paragraphs(article_stats, min_len_paragraphs)) for article_stats in
stats_all_articles.values()]
return nb_paragraphs
def compute_min_len_paras_on_dic(article_stats, min_len_paragraphs=500, max_len_paragraphs=1000):
article_stats['paras'] = check_number_paragraphs(article_stats, min_len_paragraphs, max_len_paragraphs)
return article_stats
def filter_article_by_categories(article_stats, draft=False, homonym=False):
try:
if article_stats['homonym_in_category'] == homonym:
if article_stats['draft_in_category'] == draft:
return True
return False
except:
return False
def filter_dic(stats, min_len_paragraphs=500, draft=False, homonym=False, max_len_paragraphs=1000):
filtered_dic = {filename: check_number_paragraphs(stats[filename], min_len_paragraphs, max_len_paragraphs) for
filename in stats if filter_article_by_categories(stats[filename], draft, homonym)}
return filtered_dic
def print_para_if_max(stats, max_para_len=8000):
for filename in stats:
for para in stats[filename]:
if para > max_para_len:
print(filename, para)
def filter_min_paras(stats_with_para_len, min_nb_paras):
filtered_dic = {filename: stats_with_para_len[filename] for filename in stats_with_para_len if
len(stats_with_para_len[filename]) >= min_nb_paras}
return filtered_dic
def get_section_text(section, level=1):
s_text = ''
for s in section.sections:
s_text += s.text
s_text += '\n' + get_section_text(s, level + 1)
return s_text
def filter_years_articles(page_pkl_fn):
# If 'Evenements' is in sections title, then it means it is a year article.
with open(page_pkl_fn, 'rb') as f:
page = pkl.load(f)
for section in page.sections:
if section.title in ['Événements']:
return False
return True
def get_section_paragraphs_text(page_pkl_fn, min_len_para=500, max_len_para=1000, wiki_path=None, html_path=None):
if wiki_path is None:
wiki_path = ''
with open(wiki_path + '/' + page_pkl_fn, 'rb') as f:
page = pkl.load(f)
if html_path is not None:
with open(html_path + '/' + page_pkl_fn, 'rb') as f:
page_html = pkl.load(f)
paragraphs = [paragraph for paragraph in page.summary.split('\n') if
len(paragraph) >= min_len_para and len(paragraph) < max_len_para]
for i, section in enumerate(page.sections):
if section.title in ['Voir aussi', 'Articles connexes', 'Liens externes', 'Notes et références']:
break
# We check if the section contains lists
if html_path is not None and '<li>' in page_html.sections[i].text:
current_section_text = ''
new_html_section_text = get_section_text(page_html.sections[i])
else:
current_section_text = section.text + '\n'
new_html_section_text = current_section_text
if html_path is not None and '<li>' in new_html_section_text:
new_section_text = ''
else:
new_section_text = get_section_text(section)
section_text = current_section_text + new_section_text
for paragraph in section_text.split('\n'):
if len(paragraph) >= min_len_para and len(paragraph) < max_len_para:
paragraphs.append(paragraph)
return paragraphs
def get_filtered_complete_dic(pkl_with_stats_fn, min_paragraphs=5, min_len_paragraphs=500, max_len_paragraphs=1000,
draft=False, homonym=False, years=False, wiki_path=None, clean_duplicates=False):
with open(pkl_with_stats_fn, 'rb') as f:
stats_uncleaned = pkl.load(f)
# We filter out the sections errors
stats = {key: stats_uncleaned[key] for key in stats_uncleaned if stats_uncleaned[key] != 'SectionError'}
filtered_stats = filter_dic(stats, min_len_paragraphs=min_len_paragraphs, draft=draft, homonym=homonym,
max_len_paragraphs=max_len_paragraphs)
filtered_stats = filter_min_paras(filtered_stats, min_paragraphs)
# We filter the years
if clean_duplicates:
if wiki_path is None:
print("Error : give a wikipath for duplicates cleaning")
return
new_ft_stats = {}
wiki_obj = Wikipedia('fr')
for filename, stats in filtered_stats.items():
try:
with open(wiki_path + '/' + filename, 'rb') as f:
page = pkl.load(f)
except FileNotFoundError:
print("Not found :" + filename)
continue
page_info = wiki_obj.info(page)
new_title = title = page_info.title
new_title = new_title.replace(' ', '_')
new_title += '.pkl'
new_ft_stats[new_title] = stats
filtered_stats = new_ft_stats
if not years:
print("Length before year fitering :", len(filtered_stats))
if wiki_path is None:
filtered_stats = {filename: filtered_stats[filename] for filename in filtered_stats if
filter_years_articles(filename)}
else:
filtered_stats = {filename: filtered_stats[filename] for filename in filtered_stats if
filter_years_articles(wiki_path + filename)}
print("Final length : ", len(filtered_stats))
return filtered_stats
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--pkl_stats_dic_fn", default=None, type=str, required=True,
help="Pkl file where the stats are already dumped")
parser.add_argument("--output_json_article_fn", default=None, type=str, required=True,
help="output_json_article_fn")
parser.add_argument("--min_paragraphs", default=5, type=int, required=False,
help="Minimum number of paragraphs per article")
parser.add_argument("--min_len_paragraphs", default=500, type=int, required=False,
help="Minimum len of paragraphs")
parser.add_argument("--max_len_paragraphs", default=1000, type=int, required=False,
help="Max len of paragraphs")
parser.add_argument("--nb_articles_to_print", default=None, type=int, required=False,
help="Number of articles to print if output_json_article_fn is not None")
parser.add_argument("--wiki_path", default=None, type=str, required=True,
help="Path to where the wiki pages are saved")
parser.add_argument("--html_path", default=None, type=str, required=True,
help="Path to where the html pages are saved")
args = parser.parse_args()
stats = get_filtered_complete_dic(args.pkl_stats_dic_fn, min_paragraphs=args.min_paragraphs,
min_len_paragraphs=args.min_len_paragraphs,
max_len_paragraphs=args.max_len_paragraphs, draft=False, homonym=False,
years=True, wiki_path=args.wiki_path, clean_duplicates=False)
if args.output_json_article_fn is not None:
articles_filename = list(stats.keys())
random.shuffle(articles_filename)
if args.nb_articles_to_print is not None:
articles_filename = articles_filename[:args.nb_articles_to_print]
articles_list = []
for article_fn in tqdm(articles_filename):
try:
paragraphs = get_section_paragraphs_text(article_fn, min_len_para=args.min_len_paragraphs,
max_len_para=args.max_len_paragraphs, wiki_path=args.wiki_path,
html_path=args.html_path)
filename = article_fn.split('/')[-1]
except FileNotFoundError:
continue
# File may have been deleted already because it was a duplicate
with open(args.wiki_path + '/' + filename, 'rb') as f:
page = pkl.load(f)
filename = filename.replace('_', ' ')
filename = filename.replace('.pkl', '')
articles_list.append(Article(filename, paragraphs, oldid=str(page.lastrevid)))
dataset = Dataset(articles_list)
with open(args.output_json_article_fn, 'w') as f:
f.write(dataset.to_json())
if __name__ == "__main__":
main() | 0.473292 | 0.162347 |
#coding utf-8
'''print("请输入不小于10的整数")
n=input()
if n.isdigit()==True:
a=int(n)
print("%d"%(a/10))
else:
print("数据输入错误")'''
'''a=input("请你猜我的名字:")
if a=="xxx":
print("猜对了")
else:
print("猜错了")'''
'''a=input("请你猜我的名字:")
b="猜对了"
c="猜错了"
d=b if a=="xxx" else c
print(d) '''
'''print("猜猜我的薪水")
a=int(input("请输入"))
y="猜少了"
z="猜多了"
b="猜对了" if a=='1993' else if a<'1993' y else z
print(b)'''
'''h=int(input("please enter my height:"))
while h!=170:
if h<170:
print("less")
h=int(input("please re-enter my height:"))
else:
print("more")
h=int(input("please re-enter my height:"))
if h==170:
print("correct")'''
'''h=int(input("please enter my height:"))
while True:
if h<170:
print("less")
h=int(input("please re-enter my height:"))
else:
if h==170:
print("correct")
break
else:
print("more")
h=int(input("please re-enter my height:"))'''
'''array=[1,2,5,3,6,8,4]
for i in range(len(array)-1,0,-1):
print(i)
for j in range(i):
print(j)
if array[j]>array[j+1]:
array[j],array[j+1]=array[j+1],array[j]
print(array)'''
'''s=0
a=1
while a<10000000000:
s=s+a
a=a+1
print(s)'''
'''import random
l=['杨康','俊辉','文师','洁琼','英福','陈锦','远定','周杰','文杰']
print(l[random.randint(0,len(l)-1)])
while True:
pass'''
'''m=[]
arr=[10,2,3,1,5]
while arr:
i=0
o=int(arr[0])
while i<len(arr):
if o>=arr[i]:
i+=1
else:
o=int(arr[i])
m.append(arr.pop(arr.index(o)))
print(m) #可以输出,但非冒泡输出其正确做法'''
'''a=int(input("请输入第一个数字:"))
b=int(input("请输入第二个数字:"))
c=int(input("请输入第三个数字:"))
l=[a,b,c]
while l:
i=0
o=int(l[0])
while i<len(l):
if o<=l[i]:
i+=1
else:
o=int(l[i])
print(l.pop(l.index(o)))'''
'''for i in range(1, 10):
for j in range(1, i+1):
print("%dx%d=%d"%(i,j,i*j),' ',end='')
print()'''
'''for i in range(4):
for j in range(3):
print(j,end='')
print(i)'''
'''arr=[10,2,3,1,5,66,55,22,88,99,101,50,603]
t=0
for j in range(len(arr)-1):
for i in range(len(arr)-1-j):
if arr[i]>arr[i+1]:
t=arr[i]
arr[i]=arr[i+1]
arr[i+1]=t
print(arr)#正确的冒泡输出做法'''
'''def juedu(x):
if x<0:
b=-x
else:
b=x
return b
while True:
a=juedu(int(input()))
print(a)'''
'''def zuida(a,b,c):
if a>=b and a>=c:
return a
elif b>=a and b>=c:
return b
else:
return c
while True:
x=int(input())
y=int(input())
z=int(input())
print("max",zuida(x,y,z))'''
'''import re
line = "Cats are smarter than dogs"
matchObj = re.match( r'(.*) are (.*?) .*', line, re.M|re.I)
if matchObj:
print ("matchObj.group() : ", matchObj.group())
print ("matchObj.group(1) : ", matchObj.group(1))
print ("matchObj.group(2) : ", matchObj.group(2))
else:
print ("No match!!")'''
'''def ctd(d): #0523作业1
dx=xx=n=ot=0
L1='QWERTYUIOPLKJHGFDSAZXCVBNM'
L2='qwertyuioplkjhgfdsazxcvbnm'
L3='0123456789'
for i in d:
if i in L1:
dx+=1
elif i in L2:
xx+=1
elif i in L3:
n+=1
else:
ot+=1 #实现字符识别和统计
return ("大写字母数目",dx),("小写字母数目",xx),("数字数目",n),("其他字符数目",ot)
#元组显示
x=input()
print(ctd(x))'''
'''import random #0523作业2
i=0
m=[]
while i<20:
b=random.randint(0,100)
m.append(b)
i+=1
print(m)
print(len(m))#生成列表
for j in range(9):
for i in range(9):
if m[i]>m[i+1]:
t=m[i]
m[i]=m[i+1]
m[i+1]=t#对前10位数进行升序
for j in range(10,19):
for i in range(10,19):
if m[i]<m[i+1]:
o=m[i]
m[i]=m[i+1]
m[i+1]=o#对后10位数进行降序
print(m)'''
'''def smsort(x,y): #0523作业3
if y==1: #降序识别
for j in range(len(x)-1):
for i in range(len(x)-1-j):
if x[i]<x[i+1]:
t=x[i]
x[i]=x[i+1]
x[i+1]=t
return x
elif y==0: #升序识别
for j in range(len(x)-1):
for i in range(len(x)-1-j):
if x[i]>x[i+1]:
p=x[i]
x[i]=x[i+1]
x[i+1]=p
return x
else:
print("参数定义错误")
print(smsort([1,3,6,5,4,3,9,4,5],1))'''
'''def mult(x,y):
s=x+y
d=x-y
a=x*y
p=x/y
return s,d,a,p
print(mult(9,3))'''
'''def power(x):
return x**3
print(power(5.6))'''
'''def max1(*args):
t=0
for j in range(len(args)-1):
if args[j]>=args[j+1] and args[j]>=t:
t=args[j]
elif args[-1]>=t:
t=args[-1]
print(t)
max1(3,5,15,-3,2,)'''
'''def max2(*arr):
i=0
o=int(arr[0])
while i<len(arr):
if o>=arr[i]:
i+=1
else:
o=int(arr[i])
print(o)
max2(3,5,15,-3,2)'''
'''def max3(*args):
t=args[0]
for j in args:
if j>=t:
t=j
print(t)
max3(3,5,15,-3,2)'''
'''def test():
print(666)
return test()
test()'''
'''def jiec(x):
t=1
while x!=0:
t*=x
x=x-1
return t
print(jiec(5))'''
'''def jiec(x):
a=1
for i in range(1,x+1):
a*=i
print(a)
print(jiec(5))'''
'''import random
w=int(input("guess my weight:"))
w1=random.randint(100,200)
while True:
if w==w1:
print("correct")
print(w1)
break
if w>w1:
print("more")
print(w1)
w=int(input("guess my weight:"))
else:
print("less")
print(w1)
w=int(input("guess my weight:"))'''
'''x = 'iplaypython'
for i in x+5:
print(i)'''
'''n='x'+
print(n)'''
'''a=int()
b=int()
c=int()
d=int()
e=int()
f=int()
y=[a,b,c,d,e,f]
print(y)'''
'''n=input("请输入你的账号:")
p=input("请输入你的密码:")
if n=="test" and p=="mercury":
print("恭喜你登录成功")
else:
print("你的账号或密码错误")'''
'''print("加密程序")
p=input("你输入的明文:")
m=p+5
print("你发出的密文为:%s"%m)'''
'''df=input()
if df=="加密程序":
p=input("你输入的明文:")
l=list(p)
m=[]
for i in range(len(l)):
m.append(chr(ord(l[i])+5))
o=''.join(m)
print("你发出的密文为:",o)
elif df=="解密程序":
p=input("你输入的密文:")
l=list(p)
m=[]
for i in range(len(l)):
m.append(chr(ord(l[i])-5))
o=''.join(m)
print("你发出的明文为:",o)
else:
print("程序选择错误")'''
'''a=input()
print(end='')
for i in a:
print(i)'''
'''a=input("请你猜我的名字:")
while a!="xxx":
print("wrong")
a=input("请你猜我的名字:")
print("correct")'''
'''import urllib.request
url="http://www.baidu.com"
data=urllib.request.urlopen(url).read()
data=data.decode('UTF-8')
print(data)'''
'''import urllib
import urllib.request
data={}
data['word']='Jecvay Notes'
url_values=urllib.parse.urlencode(data)
url="http://www.baidu.com/s?"
full_url=url+url_values
data=urllib.request.urlopen(full_url).read()
data=data.decode('UTF-8')
dt=str(data)
#mydic=open("C:\\Users\\ETC\\Desktop\\1.txt","a+")
#mydic.write("sssss")
def sf(s,p):
f_obj=open(p,'w')
f_obj.write(s)
f_obj.close() #文档备份函数,注意s参数的str输入
sf(dt,'E:\\tmp.txt')'''
'''class human():
def setname(self,name):
self.name=name
def getname(self):
print(self.name)
oo=human()
oo.setname('Joker')
oo.getname()'''
'''for i in range(1,5):
for j in range(1,i+1):
print(i,j)
print()'''
'''import random
ca=random.randint(1,30)
while True:
age=int(input("猜猜我的年龄:"))
if age==ca:
print("猜对了")
break
elif age>ca:
print("猜多了")
else:
print("猜少了")'''
'''a=int(input("输入第一个数:"))
b=int(input("输入第二个数:"))
c=int(input("输入第三个数:"))
d=int(input("输入第四个数:"))
e=int(input("输入第五个数:"))
m=[a,b,c,d,e]
for j in range(len(m)-1):
for i in range(len(m)-1-j):
if m[i]>m[i+1]:
t=m[i]
m[i]=m[i+1]
m[i+1]=t
print(m)''' | test.py | #coding utf-8
'''print("请输入不小于10的整数")
n=input()
if n.isdigit()==True:
a=int(n)
print("%d"%(a/10))
else:
print("数据输入错误")'''
'''a=input("请你猜我的名字:")
if a=="xxx":
print("猜对了")
else:
print("猜错了")'''
'''a=input("请你猜我的名字:")
b="猜对了"
c="猜错了"
d=b if a=="xxx" else c
print(d) '''
'''print("猜猜我的薪水")
a=int(input("请输入"))
y="猜少了"
z="猜多了"
b="猜对了" if a=='1993' else if a<'1993' y else z
print(b)'''
'''h=int(input("please enter my height:"))
while h!=170:
if h<170:
print("less")
h=int(input("please re-enter my height:"))
else:
print("more")
h=int(input("please re-enter my height:"))
if h==170:
print("correct")'''
'''h=int(input("please enter my height:"))
while True:
if h<170:
print("less")
h=int(input("please re-enter my height:"))
else:
if h==170:
print("correct")
break
else:
print("more")
h=int(input("please re-enter my height:"))'''
'''array=[1,2,5,3,6,8,4]
for i in range(len(array)-1,0,-1):
print(i)
for j in range(i):
print(j)
if array[j]>array[j+1]:
array[j],array[j+1]=array[j+1],array[j]
print(array)'''
'''s=0
a=1
while a<10000000000:
s=s+a
a=a+1
print(s)'''
'''import random
l=['杨康','俊辉','文师','洁琼','英福','陈锦','远定','周杰','文杰']
print(l[random.randint(0,len(l)-1)])
while True:
pass'''
'''m=[]
arr=[10,2,3,1,5]
while arr:
i=0
o=int(arr[0])
while i<len(arr):
if o>=arr[i]:
i+=1
else:
o=int(arr[i])
m.append(arr.pop(arr.index(o)))
print(m) #可以输出,但非冒泡输出其正确做法'''
'''a=int(input("请输入第一个数字:"))
b=int(input("请输入第二个数字:"))
c=int(input("请输入第三个数字:"))
l=[a,b,c]
while l:
i=0
o=int(l[0])
while i<len(l):
if o<=l[i]:
i+=1
else:
o=int(l[i])
print(l.pop(l.index(o)))'''
'''for i in range(1, 10):
for j in range(1, i+1):
print("%dx%d=%d"%(i,j,i*j),' ',end='')
print()'''
'''for i in range(4):
for j in range(3):
print(j,end='')
print(i)'''
'''arr=[10,2,3,1,5,66,55,22,88,99,101,50,603]
t=0
for j in range(len(arr)-1):
for i in range(len(arr)-1-j):
if arr[i]>arr[i+1]:
t=arr[i]
arr[i]=arr[i+1]
arr[i+1]=t
print(arr)#正确的冒泡输出做法'''
'''def juedu(x):
if x<0:
b=-x
else:
b=x
return b
while True:
a=juedu(int(input()))
print(a)'''
'''def zuida(a,b,c):
if a>=b and a>=c:
return a
elif b>=a and b>=c:
return b
else:
return c
while True:
x=int(input())
y=int(input())
z=int(input())
print("max",zuida(x,y,z))'''
'''import re
line = "Cats are smarter than dogs"
matchObj = re.match( r'(.*) are (.*?) .*', line, re.M|re.I)
if matchObj:
print ("matchObj.group() : ", matchObj.group())
print ("matchObj.group(1) : ", matchObj.group(1))
print ("matchObj.group(2) : ", matchObj.group(2))
else:
print ("No match!!")'''
'''def ctd(d): #0523作业1
dx=xx=n=ot=0
L1='QWERTYUIOPLKJHGFDSAZXCVBNM'
L2='qwertyuioplkjhgfdsazxcvbnm'
L3='0123456789'
for i in d:
if i in L1:
dx+=1
elif i in L2:
xx+=1
elif i in L3:
n+=1
else:
ot+=1 #实现字符识别和统计
return ("大写字母数目",dx),("小写字母数目",xx),("数字数目",n),("其他字符数目",ot)
#元组显示
x=input()
print(ctd(x))'''
'''import random #0523作业2
i=0
m=[]
while i<20:
b=random.randint(0,100)
m.append(b)
i+=1
print(m)
print(len(m))#生成列表
for j in range(9):
for i in range(9):
if m[i]>m[i+1]:
t=m[i]
m[i]=m[i+1]
m[i+1]=t#对前10位数进行升序
for j in range(10,19):
for i in range(10,19):
if m[i]<m[i+1]:
o=m[i]
m[i]=m[i+1]
m[i+1]=o#对后10位数进行降序
print(m)'''
'''def smsort(x,y): #0523作业3
if y==1: #降序识别
for j in range(len(x)-1):
for i in range(len(x)-1-j):
if x[i]<x[i+1]:
t=x[i]
x[i]=x[i+1]
x[i+1]=t
return x
elif y==0: #升序识别
for j in range(len(x)-1):
for i in range(len(x)-1-j):
if x[i]>x[i+1]:
p=x[i]
x[i]=x[i+1]
x[i+1]=p
return x
else:
print("参数定义错误")
print(smsort([1,3,6,5,4,3,9,4,5],1))'''
'''def mult(x,y):
s=x+y
d=x-y
a=x*y
p=x/y
return s,d,a,p
print(mult(9,3))'''
'''def power(x):
return x**3
print(power(5.6))'''
'''def max1(*args):
t=0
for j in range(len(args)-1):
if args[j]>=args[j+1] and args[j]>=t:
t=args[j]
elif args[-1]>=t:
t=args[-1]
print(t)
max1(3,5,15,-3,2,)'''
'''def max2(*arr):
i=0
o=int(arr[0])
while i<len(arr):
if o>=arr[i]:
i+=1
else:
o=int(arr[i])
print(o)
max2(3,5,15,-3,2)'''
'''def max3(*args):
t=args[0]
for j in args:
if j>=t:
t=j
print(t)
max3(3,5,15,-3,2)'''
'''def test():
print(666)
return test()
test()'''
'''def jiec(x):
t=1
while x!=0:
t*=x
x=x-1
return t
print(jiec(5))'''
'''def jiec(x):
a=1
for i in range(1,x+1):
a*=i
print(a)
print(jiec(5))'''
'''import random
w=int(input("guess my weight:"))
w1=random.randint(100,200)
while True:
if w==w1:
print("correct")
print(w1)
break
if w>w1:
print("more")
print(w1)
w=int(input("guess my weight:"))
else:
print("less")
print(w1)
w=int(input("guess my weight:"))'''
'''x = 'iplaypython'
for i in x+5:
print(i)'''
'''n='x'+
print(n)'''
'''a=int()
b=int()
c=int()
d=int()
e=int()
f=int()
y=[a,b,c,d,e,f]
print(y)'''
'''n=input("请输入你的账号:")
p=input("请输入你的密码:")
if n=="test" and p=="mercury":
print("恭喜你登录成功")
else:
print("你的账号或密码错误")'''
'''print("加密程序")
p=input("你输入的明文:")
m=p+5
print("你发出的密文为:%s"%m)'''
'''df=input()
if df=="加密程序":
p=input("你输入的明文:")
l=list(p)
m=[]
for i in range(len(l)):
m.append(chr(ord(l[i])+5))
o=''.join(m)
print("你发出的密文为:",o)
elif df=="解密程序":
p=input("你输入的密文:")
l=list(p)
m=[]
for i in range(len(l)):
m.append(chr(ord(l[i])-5))
o=''.join(m)
print("你发出的明文为:",o)
else:
print("程序选择错误")'''
'''a=input()
print(end='')
for i in a:
print(i)'''
'''a=input("请你猜我的名字:")
while a!="xxx":
print("wrong")
a=input("请你猜我的名字:")
print("correct")'''
'''import urllib.request
url="http://www.baidu.com"
data=urllib.request.urlopen(url).read()
data=data.decode('UTF-8')
print(data)'''
'''import urllib
import urllib.request
data={}
data['word']='Jecvay Notes'
url_values=urllib.parse.urlencode(data)
url="http://www.baidu.com/s?"
full_url=url+url_values
data=urllib.request.urlopen(full_url).read()
data=data.decode('UTF-8')
dt=str(data)
#mydic=open("C:\\Users\\ETC\\Desktop\\1.txt","a+")
#mydic.write("sssss")
def sf(s,p):
f_obj=open(p,'w')
f_obj.write(s)
f_obj.close() #文档备份函数,注意s参数的str输入
sf(dt,'E:\\tmp.txt')'''
'''class human():
def setname(self,name):
self.name=name
def getname(self):
print(self.name)
oo=human()
oo.setname('Joker')
oo.getname()'''
'''for i in range(1,5):
for j in range(1,i+1):
print(i,j)
print()'''
'''import random
ca=random.randint(1,30)
while True:
age=int(input("猜猜我的年龄:"))
if age==ca:
print("猜对了")
break
elif age>ca:
print("猜多了")
else:
print("猜少了")'''
'''a=int(input("输入第一个数:"))
b=int(input("输入第二个数:"))
c=int(input("输入第三个数:"))
d=int(input("输入第四个数:"))
e=int(input("输入第五个数:"))
m=[a,b,c,d,e]
for j in range(len(m)-1):
for i in range(len(m)-1-j):
if m[i]>m[i+1]:
t=m[i]
m[i]=m[i+1]
m[i+1]=t
print(m)''' | 0.024458 | 0.096323 |
import paddle
import paddle.fluid as fluid
import time
import sys
from paddle_fl.mpc.data_utils.data_utils import get_datautils
sys.path.append('..')
import network
mpc_du = get_datautils('aby3')
def original_train(model_dir, model_filename):
"""
Original Training: train and save pre-trained paddle model
"""
# Step 1. load paddle net
[x, y, _, loss] = network.uci_network()
# Step 2. train
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), batch_size=network.BATCH_SIZE, drop_last=True)
start_time = time.time()
for epoch_id in range(network.PADDLE_UPDATE_EPOCH):
step = 0
for data in train_reader():
avg_loss = exe.run(feed=feeder.feed(data), fetch_list=[loss.name])
if step % 50 == 0:
print('Epoch={}, Step={}, Loss={}'.format(epoch_id, step, avg_loss[0]))
step += 1
end_time = time.time()
print('Paddle Training of Epoch={} Batch_size={}, cost time in seconds:{}'
.format(network.PADDLE_UPDATE_EPOCH, network.BATCH_SIZE, (end_time - start_time)))
# Step 3. save model to update
mpc_du.save_trainable_model(exe=exe,
program=fluid.default_main_program(),
model_dir=model_dir,
model_filename=model_filename)
def encrypt_paddle_model(paddle_model_dir, mpc_model_dir, model_filename):
"""
Load, encrypt and save model.
"""
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# Step 1. Load pre-trained model.
main_prog, _, _ = fluid.io.load_inference_model(executor=exe,
dirname=paddle_model_dir,
model_filename=model_filename)
# Step 2. Encrypt pre-trained model.
mpc_du.encrypt_model(program=main_prog,
mpc_model_dir=mpc_model_dir,
model_filename=model_filename)
if __name__ == '__main__':
# train paddle model
model_to_update_dir = './tmp/paddle_model_to_update'
model_to_update_name = 'model_to_update'
original_train(model_dir=model_to_update_dir,
model_filename=model_to_update_name)
print('Successfully train and save paddle model for update. The model is saved in: {}.'
.format(model_to_update_dir))
# encrypt paddle model
mpc_model_to_update_dir = './tmp/mpc_models_to_update'
encrypt_paddle_model(paddle_model_dir=model_to_update_dir,
mpc_model_dir=mpc_model_to_update_dir,
model_filename=model_to_update_name)
print('Successfully encrypt paddle model for update. The encrypted models are saved in: {}.'
.format(mpc_model_to_update_dir)) | python/paddle_fl/mpc/examples/model_encryption/update/train_and_encrypt_model.py | import paddle
import paddle.fluid as fluid
import time
import sys
from paddle_fl.mpc.data_utils.data_utils import get_datautils
sys.path.append('..')
import network
mpc_du = get_datautils('aby3')
def original_train(model_dir, model_filename):
"""
Original Training: train and save pre-trained paddle model
"""
# Step 1. load paddle net
[x, y, _, loss] = network.uci_network()
# Step 2. train
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), batch_size=network.BATCH_SIZE, drop_last=True)
start_time = time.time()
for epoch_id in range(network.PADDLE_UPDATE_EPOCH):
step = 0
for data in train_reader():
avg_loss = exe.run(feed=feeder.feed(data), fetch_list=[loss.name])
if step % 50 == 0:
print('Epoch={}, Step={}, Loss={}'.format(epoch_id, step, avg_loss[0]))
step += 1
end_time = time.time()
print('Paddle Training of Epoch={} Batch_size={}, cost time in seconds:{}'
.format(network.PADDLE_UPDATE_EPOCH, network.BATCH_SIZE, (end_time - start_time)))
# Step 3. save model to update
mpc_du.save_trainable_model(exe=exe,
program=fluid.default_main_program(),
model_dir=model_dir,
model_filename=model_filename)
def encrypt_paddle_model(paddle_model_dir, mpc_model_dir, model_filename):
"""
Load, encrypt and save model.
"""
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# Step 1. Load pre-trained model.
main_prog, _, _ = fluid.io.load_inference_model(executor=exe,
dirname=paddle_model_dir,
model_filename=model_filename)
# Step 2. Encrypt pre-trained model.
mpc_du.encrypt_model(program=main_prog,
mpc_model_dir=mpc_model_dir,
model_filename=model_filename)
if __name__ == '__main__':
# train paddle model
model_to_update_dir = './tmp/paddle_model_to_update'
model_to_update_name = 'model_to_update'
original_train(model_dir=model_to_update_dir,
model_filename=model_to_update_name)
print('Successfully train and save paddle model for update. The model is saved in: {}.'
.format(model_to_update_dir))
# encrypt paddle model
mpc_model_to_update_dir = './tmp/mpc_models_to_update'
encrypt_paddle_model(paddle_model_dir=model_to_update_dir,
mpc_model_dir=mpc_model_to_update_dir,
model_filename=model_to_update_name)
print('Successfully encrypt paddle model for update. The encrypted models are saved in: {}.'
.format(mpc_model_to_update_dir)) | 0.473901 | 0.173148 |
class informacoes:
nome =''
dia = 0
mes = 0
ano = 0
ddd = 0
tel = 0
rua =''
num = 0
cidade =''
estado =''
serie = 0
def enfeite(texto,estilos=''):
print('-'*100)
print(f'{estilos}{texto:^100}')
print(f'{"-"*30}{"_"*40}{"-"*30}')
def cadastro(integ):
print(' '*100)
print(f'Vagas disponíveis : {500 - len(integ):}')
quantidade = int(input(f'{"Quantos alunos deseja cadastrar? ":<30}'))
b = quantidade + len(integ)
if b > 500:
print(f'\033[1;31;47mSÓ TEMOS : {500 - len(integ)} {"VAGAS!":<80}')
else:
for c in range(len(integ),b):
enfeite('Informações do aluno:','\033[1;37;45m')
info = informacoes()
print('\033[0;35;47m '*100)
info.nome = input(f'Nome completo: ').title()
enfeite('Data de nascimento:')
print('\033[0;35;47m '*100)
info.dia = int(input('Dia: '))
info.mes = int(input('Mês: '))
info.ano = int(input('Ano: '))
enfeite('Telefone:')
print('\033[0;35;47m '*100)
info.ddd = int(input('DDD: '))
info.tel = int(input('Número: '))
enfeite('Endereço:')
print('\033[0;35;47m '*100)
info.rua = input('Rua: ').title()
info.num = int(input('nº: '))
info.cidade = input('Cidade: ').title()
info.estado = input('Estado: ').upper()
info.serie = int(input('Série: '))
integ.append(info)
def pesquisa(busca):
buscar = input('Nome: ').title()
nada = 0
enfeite('Resultado da busca!','\033[1;37;44m')
for i in range(len(busca)):
if busca[i].nome.count(buscar,0,len(buscar) > 0:
print('\033[0;34;47m '*100)
interface(busca,i)
nada += 1
if nada == 0:
print('\033[0;34;47m '*100)
print(f'Aluno: {buscar} não está matriculado')
def docentes(respostas):
enfeite('Corpo Docente:','\033[1;37;46m')
for c in range(len(respostas)):
print('\033[0;36;47m'*100)
interface(respostas,c)
def interface(saidas,c):
print(f'Aluno: {saidas[c].nome:<43}Série: {saidas[c].serie:>2}{"Ano":<41}')
print(f'Data de nascimento: {saidas[c].dia:>2}/{saidas[c].mes:>2}/{saidas[c].ano:<10}',end='')
print(f'Telefone: ({saidas[c].ddd:2}){saidas[c].tel:<50}')
print(f'{"Endereço:":<100}')
print(f'Rua: {saidas[c].rua:<45}nº: {saidas[c].num:<46}')
print(f'Cidade: {saidas[c].cidade:<42}Estado: {saidas[c].estado:<42}')
def main():
integrantes = []
start = 1
while 0<=start<=3:
enfeite('Fatec Presidente Prudente SP.','\033[1;37;40m')
print(f'\033[4;3;30;47m{"Menu de opções:":^100}')
print(f'\033[0;32;47m{"1- Cadastrar alunos":^26}{"2- Consulta por nome":^25}',end='')
print(f'{"3- Visualizar todos os dados":^29}{"4- Sair":^20}')
print('\033[0;32;47m '*100)
print('\033[0;34;47m ',end='')
start = int(input(f'{"Digite a opção desejada: ":^50}'))
if start == 1:
if len(integrantes) == 500:
print(f'\033[1;31;47m{"NÃO HÁ VAGAS!":^100}')
else:
cadastro(integrantes)
elif start == 2:
pesquisa(integrantes)
elif start == 3:
docentes(integrantes)
elif start == 4:
print(f'\033[0;32;47m{"Sessão encerrada!":^100}')
elif 1 > start or start> 4:
print(f'\033[1;31;47m{"COMANDO INVÁLIDO !":^100}')
start = 0
print('\033[1;37;40m-'*100)
main() | segundoModulo/classes4.py | class informacoes:
nome =''
dia = 0
mes = 0
ano = 0
ddd = 0
tel = 0
rua =''
num = 0
cidade =''
estado =''
serie = 0
def enfeite(texto,estilos=''):
print('-'*100)
print(f'{estilos}{texto:^100}')
print(f'{"-"*30}{"_"*40}{"-"*30}')
def cadastro(integ):
print(' '*100)
print(f'Vagas disponíveis : {500 - len(integ):}')
quantidade = int(input(f'{"Quantos alunos deseja cadastrar? ":<30}'))
b = quantidade + len(integ)
if b > 500:
print(f'\033[1;31;47mSÓ TEMOS : {500 - len(integ)} {"VAGAS!":<80}')
else:
for c in range(len(integ),b):
enfeite('Informações do aluno:','\033[1;37;45m')
info = informacoes()
print('\033[0;35;47m '*100)
info.nome = input(f'Nome completo: ').title()
enfeite('Data de nascimento:')
print('\033[0;35;47m '*100)
info.dia = int(input('Dia: '))
info.mes = int(input('Mês: '))
info.ano = int(input('Ano: '))
enfeite('Telefone:')
print('\033[0;35;47m '*100)
info.ddd = int(input('DDD: '))
info.tel = int(input('Número: '))
enfeite('Endereço:')
print('\033[0;35;47m '*100)
info.rua = input('Rua: ').title()
info.num = int(input('nº: '))
info.cidade = input('Cidade: ').title()
info.estado = input('Estado: ').upper()
info.serie = int(input('Série: '))
integ.append(info)
def pesquisa(busca):
buscar = input('Nome: ').title()
nada = 0
enfeite('Resultado da busca!','\033[1;37;44m')
for i in range(len(busca)):
if busca[i].nome.count(buscar,0,len(buscar) > 0:
print('\033[0;34;47m '*100)
interface(busca,i)
nada += 1
if nada == 0:
print('\033[0;34;47m '*100)
print(f'Aluno: {buscar} não está matriculado')
def docentes(respostas):
enfeite('Corpo Docente:','\033[1;37;46m')
for c in range(len(respostas)):
print('\033[0;36;47m'*100)
interface(respostas,c)
def interface(saidas,c):
print(f'Aluno: {saidas[c].nome:<43}Série: {saidas[c].serie:>2}{"Ano":<41}')
print(f'Data de nascimento: {saidas[c].dia:>2}/{saidas[c].mes:>2}/{saidas[c].ano:<10}',end='')
print(f'Telefone: ({saidas[c].ddd:2}){saidas[c].tel:<50}')
print(f'{"Endereço:":<100}')
print(f'Rua: {saidas[c].rua:<45}nº: {saidas[c].num:<46}')
print(f'Cidade: {saidas[c].cidade:<42}Estado: {saidas[c].estado:<42}')
def main():
integrantes = []
start = 1
while 0<=start<=3:
enfeite('Fatec Presidente Prudente SP.','\033[1;37;40m')
print(f'\033[4;3;30;47m{"Menu de opções:":^100}')
print(f'\033[0;32;47m{"1- Cadastrar alunos":^26}{"2- Consulta por nome":^25}',end='')
print(f'{"3- Visualizar todos os dados":^29}{"4- Sair":^20}')
print('\033[0;32;47m '*100)
print('\033[0;34;47m ',end='')
start = int(input(f'{"Digite a opção desejada: ":^50}'))
if start == 1:
if len(integrantes) == 500:
print(f'\033[1;31;47m{"NÃO HÁ VAGAS!":^100}')
else:
cadastro(integrantes)
elif start == 2:
pesquisa(integrantes)
elif start == 3:
docentes(integrantes)
elif start == 4:
print(f'\033[0;32;47m{"Sessão encerrada!":^100}')
elif 1 > start or start> 4:
print(f'\033[1;31;47m{"COMANDO INVÁLIDO !":^100}')
start = 0
print('\033[1;37;40m-'*100)
main() | 0.074085 | 0.220615 |
import json
from django.http import HttpResponse
from django.views.generic import View
from django.shortcuts import render
from braces.views import CsrfExemptMixin
from core.models import AccountBling, Product, Movement
from django.utils import timezone
from Bling import Api, ApiError, HookDataProduct, SyncStock
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
class HookInventoryChangeView1(CsrfExemptMixin, View):
def post(self, request, *args, **kwargs):
data = HookDataProduct(request.body)
bling1 = AccountBling.objects.get(id=1)
products_base = Product.objects.filter(bling=bling1)
product_update = products_base.get(sku=data.sku)
sync_stock = SyncStock(product_update, data)
if sync_stock.diff != 0:
print()
print(timezone.now())
print(f"SKU: {product_update.sku} | Conta: {bling1.name} | Diferença: {sync_stock.diff} | Anterior: {sync_stock.before_stock}"
f" | Novo: {sync_stock.after_stock}")
product_update.last_update = timezone.now()
product_update.quantity = data.current_inventory
product_update.save(update_fields=['last_update', 'quantity'])
Movement.objects.create(quantity=sync_stock.diff, product=product_update,
after_stock=sync_stock.after_stock, before_stock=sync_stock.before_stock,
time=timezone.now(), bling=bling1, updated=False)
bling2 = AccountBling.objects.get(id=2)
blings_updated = sync_other_account(bling2)
print()
print(timezone.now())
print(f"Produto atualizado: {blings_updated}")
print()
return HttpResponse('\n OK! Status Code 200 \n')
class HookInventoryChangeView2(CsrfExemptMixin, View):
def post(self, request, *args, **kwargs):
data = HookDataProduct(request.body)
bling2 = AccountBling.objects.get(id=2)
products_base = Product.objects.filter(bling=bling2)
product_update = products_base.get(sku=data.sku)
sync_stock = SyncStock(product_update, data)
if sync_stock.diff != 0:
print()
print(timezone.now())
print(f"SKU: {product_update.sku} | Conta: {bling2.name} | Diferença: {sync_stock.diff} | Anterior: {sync_stock.before_stock}"
f" | Novo: {sync_stock.after_stock}")
product_update.last_update = timezone.now()
product_update.quantity = data.current_inventory
product_update.save(update_fields=['last_update', 'quantity'])
Movement.objects.create(quantity=sync_stock.diff, product=product_update,
after_stock=sync_stock.after_stock, before_stock=sync_stock.before_stock,
time=timezone.now(), bling=bling2, updated=False)
bling1 = AccountBling.objects.get(id=1)
blings_updated = sync_other_account(bling1)
print()
print(timezone.now())
print(f"Produto atualizado: {blings_updated}")
print()
return HttpResponse('\n OK! Status Code 200 \n')
def sync_other_account(bling):
list_sku = []
try:
movement_products = Movement.objects.filter(updated=False)
list_movement = list(movement_products)
for movement in list_movement:
products_data = Product.objects.filter(sku=movement.product.sku)
product = products_data.get(bling=bling)
if product.bling != movement.bling:
new_quantity = product.quantity + movement.quantity
product.quantity = new_quantity
product.last_update = timezone.now()
product.save(update_fields=['quantity', 'last_update'])
movement.updated = True
movement.save(update_fields=['updated'])
try:
api = Api(bling.api_key)
update = api.update_stock(code=product.sku, qty=new_quantity)
list_sku.append(f"UPDATED BLING: {bling.name} | SKU: {product.sku} | QTD: {new_quantity}")
except ApiError as e:
print(e.response)
except Exception as ex:
print(f"functionError - Erro na atualização dos Blings: {ex}")
return list_sku
@login_required
def home(request):
return render(request, 'index.html')
def insert_products(request):
accounts_blings = AccountBling.objects.all()
logging_future = 0
try:
for bling in accounts_blings:
# All products is base
products_base = Product.objects.filter(bling=bling)
# Instance class API Wrapper
api = Api(bling.api_key)
products_list = api.get_products()
for product in products_list:
sku = api.get_product(product['codigo'])
try:
if sku['estrutura']:
# Kit Product
logging_future += 1
except KeyError:
try:
sku_codigo = str(sku['codigo'])
sku_qtd = str(sku['estoqueAtual'])
try:
product_base = products_base.get(sku=sku_codigo)
print(f"Produto {product_base.sku} já está inserido na base.")
except ObjectDoesNotExist:
Product.objects.create(bling=bling, sku=sku_codigo, quantity=sku_qtd,
last_update=timezone.now())
print(f"Conta: {bling.name} | Produto: {sku_codigo} | Estoque: {sku_qtd}")
except KeyError:
# Father Product
logging_future += 1
except ApiError as e:
print(e.response)
return render(request, 'index.html') | core/views.py | import json
from django.http import HttpResponse
from django.views.generic import View
from django.shortcuts import render
from braces.views import CsrfExemptMixin
from core.models import AccountBling, Product, Movement
from django.utils import timezone
from Bling import Api, ApiError, HookDataProduct, SyncStock
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
class HookInventoryChangeView1(CsrfExemptMixin, View):
def post(self, request, *args, **kwargs):
data = HookDataProduct(request.body)
bling1 = AccountBling.objects.get(id=1)
products_base = Product.objects.filter(bling=bling1)
product_update = products_base.get(sku=data.sku)
sync_stock = SyncStock(product_update, data)
if sync_stock.diff != 0:
print()
print(timezone.now())
print(f"SKU: {product_update.sku} | Conta: {bling1.name} | Diferença: {sync_stock.diff} | Anterior: {sync_stock.before_stock}"
f" | Novo: {sync_stock.after_stock}")
product_update.last_update = timezone.now()
product_update.quantity = data.current_inventory
product_update.save(update_fields=['last_update', 'quantity'])
Movement.objects.create(quantity=sync_stock.diff, product=product_update,
after_stock=sync_stock.after_stock, before_stock=sync_stock.before_stock,
time=timezone.now(), bling=bling1, updated=False)
bling2 = AccountBling.objects.get(id=2)
blings_updated = sync_other_account(bling2)
print()
print(timezone.now())
print(f"Produto atualizado: {blings_updated}")
print()
return HttpResponse('\n OK! Status Code 200 \n')
class HookInventoryChangeView2(CsrfExemptMixin, View):
def post(self, request, *args, **kwargs):
data = HookDataProduct(request.body)
bling2 = AccountBling.objects.get(id=2)
products_base = Product.objects.filter(bling=bling2)
product_update = products_base.get(sku=data.sku)
sync_stock = SyncStock(product_update, data)
if sync_stock.diff != 0:
print()
print(timezone.now())
print(f"SKU: {product_update.sku} | Conta: {bling2.name} | Diferença: {sync_stock.diff} | Anterior: {sync_stock.before_stock}"
f" | Novo: {sync_stock.after_stock}")
product_update.last_update = timezone.now()
product_update.quantity = data.current_inventory
product_update.save(update_fields=['last_update', 'quantity'])
Movement.objects.create(quantity=sync_stock.diff, product=product_update,
after_stock=sync_stock.after_stock, before_stock=sync_stock.before_stock,
time=timezone.now(), bling=bling2, updated=False)
bling1 = AccountBling.objects.get(id=1)
blings_updated = sync_other_account(bling1)
print()
print(timezone.now())
print(f"Produto atualizado: {blings_updated}")
print()
return HttpResponse('\n OK! Status Code 200 \n')
def sync_other_account(bling):
list_sku = []
try:
movement_products = Movement.objects.filter(updated=False)
list_movement = list(movement_products)
for movement in list_movement:
products_data = Product.objects.filter(sku=movement.product.sku)
product = products_data.get(bling=bling)
if product.bling != movement.bling:
new_quantity = product.quantity + movement.quantity
product.quantity = new_quantity
product.last_update = timezone.now()
product.save(update_fields=['quantity', 'last_update'])
movement.updated = True
movement.save(update_fields=['updated'])
try:
api = Api(bling.api_key)
update = api.update_stock(code=product.sku, qty=new_quantity)
list_sku.append(f"UPDATED BLING: {bling.name} | SKU: {product.sku} | QTD: {new_quantity}")
except ApiError as e:
print(e.response)
except Exception as ex:
print(f"functionError - Erro na atualização dos Blings: {ex}")
return list_sku
@login_required
def home(request):
return render(request, 'index.html')
def insert_products(request):
accounts_blings = AccountBling.objects.all()
logging_future = 0
try:
for bling in accounts_blings:
# All products is base
products_base = Product.objects.filter(bling=bling)
# Instance class API Wrapper
api = Api(bling.api_key)
products_list = api.get_products()
for product in products_list:
sku = api.get_product(product['codigo'])
try:
if sku['estrutura']:
# Kit Product
logging_future += 1
except KeyError:
try:
sku_codigo = str(sku['codigo'])
sku_qtd = str(sku['estoqueAtual'])
try:
product_base = products_base.get(sku=sku_codigo)
print(f"Produto {product_base.sku} já está inserido na base.")
except ObjectDoesNotExist:
Product.objects.create(bling=bling, sku=sku_codigo, quantity=sku_qtd,
last_update=timezone.now())
print(f"Conta: {bling.name} | Produto: {sku_codigo} | Estoque: {sku_qtd}")
except KeyError:
# Father Product
logging_future += 1
except ApiError as e:
print(e.response)
return render(request, 'index.html') | 0.331985 | 0.105671 |
from blockchainetl_common.executors.batch_work_executor import BatchWorkExecutor
from blockchainetl_common.jobs.base_job import BaseJob
from blockchainetl_common.utils import validate_range
from zilliqaetl.jobs.retriable_exceptions import RETRY_EXCEPTIONS
from zilliqaetl.mappers.event_log_mapper import map_event_logs
from zilliqaetl.mappers.exception_mapper import map_exceptions
from zilliqaetl.mappers.transaction_mapper import map_transaction
from zilliqaetl.mappers.transition_mapper import map_transitions
from zilliqaetl.mappers.tx_block_mapper import map_tx_block
from zilliqaetl.service.zilliqa_service import ZilliqaService
# Exports tx blocks
class ExportTxBlocksJob(BaseJob):
def __init__(
self,
start_block,
end_block,
zilliqa_api,
max_workers,
item_exporter,
export_transactions=True,
export_event_logs=True,
export_exceptions=True,
export_transitions=True):
validate_range(start_block, end_block)
self.start_block = start_block
self.end_block = end_block
self.batch_work_executor = BatchWorkExecutor(1, max_workers, retry_exceptions=RETRY_EXCEPTIONS)
self.item_exporter = item_exporter
self.zilliqa_service = ZilliqaService(zilliqa_api)
self.export_transactions = export_transactions
self.export_event_logs = export_event_logs
self.export_exceptions = export_exceptions
self.export_transitions = export_transitions
def _start(self):
self.item_exporter.open()
def _export(self):
self.batch_work_executor.execute(
range(self.start_block, self.end_block + 1),
self._export_batch,
total_items=self.end_block - self.start_block + 1
)
def _export_batch(self, block_number_batch):
items = []
for number in block_number_batch:
tx_block = map_tx_block(self.zilliqa_service.get_tx_block(number))
txns = list(self.zilliqa_service.get_transactions(number)) if tx_block.get('num_transactions') > 0 else []
if self._should_export_transactions():
for txn in txns:
items.append(map_transaction(tx_block, txn))
if self._should_export_event_logs(txn):
items.extend(map_event_logs(tx_block, txn))
if self._should_export_exceptions(txn):
items.extend(map_exceptions(tx_block, txn))
if self._should_export_transitions(txn):
items.extend(map_transitions(tx_block, txn))
tx_block['num_present_transactions'] = len(txns)
items.append(tx_block)
for item in items:
self.item_exporter.export_item(item)
def _should_export_transactions(self):
return self.export_transactions
def _should_export_event_logs(self, txn):
return self.export_event_logs and txn.get('receipt')
def _should_export_exceptions(self, txn):
return self.export_exceptions and txn.get('receipt')
def _should_export_transitions(self, txn):
return self.export_transitions and txn.get('receipt')
def _end(self):
self.batch_work_executor.shutdown()
self.item_exporter.close() | cli/zilliqaetl/jobs/export_tx_blocks_job.py |
from blockchainetl_common.executors.batch_work_executor import BatchWorkExecutor
from blockchainetl_common.jobs.base_job import BaseJob
from blockchainetl_common.utils import validate_range
from zilliqaetl.jobs.retriable_exceptions import RETRY_EXCEPTIONS
from zilliqaetl.mappers.event_log_mapper import map_event_logs
from zilliqaetl.mappers.exception_mapper import map_exceptions
from zilliqaetl.mappers.transaction_mapper import map_transaction
from zilliqaetl.mappers.transition_mapper import map_transitions
from zilliqaetl.mappers.tx_block_mapper import map_tx_block
from zilliqaetl.service.zilliqa_service import ZilliqaService
# Exports tx blocks
class ExportTxBlocksJob(BaseJob):
def __init__(
self,
start_block,
end_block,
zilliqa_api,
max_workers,
item_exporter,
export_transactions=True,
export_event_logs=True,
export_exceptions=True,
export_transitions=True):
validate_range(start_block, end_block)
self.start_block = start_block
self.end_block = end_block
self.batch_work_executor = BatchWorkExecutor(1, max_workers, retry_exceptions=RETRY_EXCEPTIONS)
self.item_exporter = item_exporter
self.zilliqa_service = ZilliqaService(zilliqa_api)
self.export_transactions = export_transactions
self.export_event_logs = export_event_logs
self.export_exceptions = export_exceptions
self.export_transitions = export_transitions
def _start(self):
self.item_exporter.open()
def _export(self):
self.batch_work_executor.execute(
range(self.start_block, self.end_block + 1),
self._export_batch,
total_items=self.end_block - self.start_block + 1
)
def _export_batch(self, block_number_batch):
items = []
for number in block_number_batch:
tx_block = map_tx_block(self.zilliqa_service.get_tx_block(number))
txns = list(self.zilliqa_service.get_transactions(number)) if tx_block.get('num_transactions') > 0 else []
if self._should_export_transactions():
for txn in txns:
items.append(map_transaction(tx_block, txn))
if self._should_export_event_logs(txn):
items.extend(map_event_logs(tx_block, txn))
if self._should_export_exceptions(txn):
items.extend(map_exceptions(tx_block, txn))
if self._should_export_transitions(txn):
items.extend(map_transitions(tx_block, txn))
tx_block['num_present_transactions'] = len(txns)
items.append(tx_block)
for item in items:
self.item_exporter.export_item(item)
def _should_export_transactions(self):
return self.export_transactions
def _should_export_event_logs(self, txn):
return self.export_event_logs and txn.get('receipt')
def _should_export_exceptions(self, txn):
return self.export_exceptions and txn.get('receipt')
def _should_export_transitions(self, txn):
return self.export_transitions and txn.get('receipt')
def _end(self):
self.batch_work_executor.shutdown()
self.item_exporter.close() | 0.643665 | 0.150247 |
from rpython.jit.metainterp.history import ConstInt, FLOAT
from rpython.jit.backend.ppc.locations import imm
def check_imm_box(arg, lower_bound=-2**15, upper_bound=2**15-1):
if isinstance(arg, ConstInt):
i = arg.getint()
return lower_bound <= i <= upper_bound
return False
def _check_imm_arg(i):
return (-2**15) <= i <= (2**15-1)
def _prepare_cmp_op(signed):
lower_bound = -2**15 if signed else 0
upper_bound = 2**15-1 if signed else 2**16-1
def f(self, op):
l0 = self.ensure_reg(op.getarg(0))
a1 = op.getarg(1)
if check_imm_box(a1, lower_bound, upper_bound):
l1 = imm(a1.getint())
else:
l1 = self.ensure_reg(a1)
self.free_op_vars()
res = self.force_allocate_reg_or_cc(op)
return [l0, l1, res]
return f
prepare_cmp_op = _prepare_cmp_op(signed=True)
prepare_cmp_op_unsigned = _prepare_cmp_op(signed=False)
def prepare_unary_cmp(self, op):
l0 = self.ensure_reg(op.getarg(0))
l1 = imm(0)
self.free_op_vars()
res = self.force_allocate_reg_or_cc(op)
return [l0, l1, res]
def prepare_float_cmp(self, op):
l0 = self.ensure_reg(op.getarg(0))
l1 = self.ensure_reg(op.getarg(1))
self.free_op_vars()
res = self.force_allocate_reg_or_cc(op)
return [l0, l1, res]
def prepare_unary_op(self, op):
l0 = self.ensure_reg(op.getarg(0))
self.free_op_vars()
res = self.force_allocate_reg(op)
return [l0, res]
def prepare_binary_op(self, op):
reg1 = self.ensure_reg(op.getarg(0))
reg2 = self.ensure_reg(op.getarg(1))
self.free_op_vars()
res = self.force_allocate_reg(op)
return [reg1, reg2, res]
def prepare_int_add_or_mul(self, op):
a0 = op.getarg(0)
a1 = op.getarg(1)
if check_imm_box(a0):
a0, a1 = a1, a0
l0 = self.ensure_reg(a0)
if check_imm_box(a1):
l1 = imm(a1.getint())
else:
l1 = self.ensure_reg(a1)
self.free_op_vars()
res = self.force_allocate_reg(op)
return [l0, l1, res]
def prepare_int_sub(self, op):
l0 = self.ensure_reg(op.getarg(0))
a1 = op.getarg(1)
if check_imm_box(a1, -2**15+1, 2**15):
l1 = imm(a1.getint())
else:
l1 = self.ensure_reg(a1)
self.free_op_vars()
res = self.force_allocate_reg(op)
return [l0, l1, res] | rpython/jit/backend/ppc/helper/regalloc.py | from rpython.jit.metainterp.history import ConstInt, FLOAT
from rpython.jit.backend.ppc.locations import imm
def check_imm_box(arg, lower_bound=-2**15, upper_bound=2**15-1):
if isinstance(arg, ConstInt):
i = arg.getint()
return lower_bound <= i <= upper_bound
return False
def _check_imm_arg(i):
return (-2**15) <= i <= (2**15-1)
def _prepare_cmp_op(signed):
lower_bound = -2**15 if signed else 0
upper_bound = 2**15-1 if signed else 2**16-1
def f(self, op):
l0 = self.ensure_reg(op.getarg(0))
a1 = op.getarg(1)
if check_imm_box(a1, lower_bound, upper_bound):
l1 = imm(a1.getint())
else:
l1 = self.ensure_reg(a1)
self.free_op_vars()
res = self.force_allocate_reg_or_cc(op)
return [l0, l1, res]
return f
prepare_cmp_op = _prepare_cmp_op(signed=True)
prepare_cmp_op_unsigned = _prepare_cmp_op(signed=False)
def prepare_unary_cmp(self, op):
l0 = self.ensure_reg(op.getarg(0))
l1 = imm(0)
self.free_op_vars()
res = self.force_allocate_reg_or_cc(op)
return [l0, l1, res]
def prepare_float_cmp(self, op):
l0 = self.ensure_reg(op.getarg(0))
l1 = self.ensure_reg(op.getarg(1))
self.free_op_vars()
res = self.force_allocate_reg_or_cc(op)
return [l0, l1, res]
def prepare_unary_op(self, op):
l0 = self.ensure_reg(op.getarg(0))
self.free_op_vars()
res = self.force_allocate_reg(op)
return [l0, res]
def prepare_binary_op(self, op):
reg1 = self.ensure_reg(op.getarg(0))
reg2 = self.ensure_reg(op.getarg(1))
self.free_op_vars()
res = self.force_allocate_reg(op)
return [reg1, reg2, res]
def prepare_int_add_or_mul(self, op):
a0 = op.getarg(0)
a1 = op.getarg(1)
if check_imm_box(a0):
a0, a1 = a1, a0
l0 = self.ensure_reg(a0)
if check_imm_box(a1):
l1 = imm(a1.getint())
else:
l1 = self.ensure_reg(a1)
self.free_op_vars()
res = self.force_allocate_reg(op)
return [l0, l1, res]
def prepare_int_sub(self, op):
l0 = self.ensure_reg(op.getarg(0))
a1 = op.getarg(1)
if check_imm_box(a1, -2**15+1, 2**15):
l1 = imm(a1.getint())
else:
l1 = self.ensure_reg(a1)
self.free_op_vars()
res = self.force_allocate_reg(op)
return [l0, l1, res] | 0.442637 | 0.276868 |
from pyramid.view import view_config
from ..Models import Base
from sqlalchemy import select, asc, func
from pyramid.security import NO_PERMISSION_REQUIRED
dictObj = {
'stations': 'Station',
'sensors': 'Sensor',
'individuals': 'Individual',
'monitoredSites': 'MonitoredSite',
'users': 'User',
'regions': 'Region',
'projects': 'Project',
'clients': 'Client'
}
def asInt(str):
try:
return int(str)
except:
return str
# TODO remove that already exists in Object view,
# need replace url requesting "{root}/autocomplete/{object}..." by "{root}/{object}/autocomplete..."
@view_config(route_name='autocomplete',
renderer='json',
request_method='GET')
@view_config(route_name='autocomplete/ID',
renderer='json',
request_method='GET')
def autocomplete(request):
objName = dictObj[request.matchdict['obj']]
session = request.dbsession
criteria = request.params['term']
prop = asInt(request.matchdict['prop'])
try:
NameValReturn = request.matchdict['valReturn']
except:
NameValReturn = None
if isinstance(prop, int):
table = Base.metadata.tables[objName + 'DynPropValuesNow']
query = select([table.c['ValueString'].label('label'),
table.c['ValueString'].label('value')]
).distinct(table.c['ValueString']
).where(table.c['FK_' + objName + 'DynProp'] == prop)
query = query.where(table.c['ValueString'].like('%' + criteria + '%')
).order_by(asc(table.c['ValueString']))
else:
if NameValReturn is None:
NameValReturn = prop
table = Base.metadata.tables[objName]
query = select([table.c[NameValReturn].label('value'),
table.c[prop].label('label')]
).distinct(table.c[prop])
query = query.where(table.c[prop].like(
'%' + criteria + '%')).order_by(asc(table.c[prop]))
return [dict(row) for row in session.execute(query).fetchall()]
@view_config(route_name='autocomplete/taxon',
renderer='json',
request_method='GET',
permission=NO_PERMISSION_REQUIRED)
def autocompleteTaxon(request):
session = request.dbsession
taxaViews = {
'reptile': Base.metadata.tables['reptil_view'],
'oiseau': Base.metadata.tables['bird_view'],
'amphibien': Base.metadata.tables['amphibia_view'],
'mammal': Base.metadata.tables['mammal_view'],
'insecte': Base.metadata.tables['insect_view'],
'chiroptera': Base.metadata.tables['chiroptera_view'],
'flore': Base.metadata.tables['phyto_view'],
}
# prop_name = {'vernaculaire': 'NOM_VERN',
# 'latin': 'NOM_COMPLET'}
criterias = dict(request.params)
table = taxaViews.get(criterias['protocol'], None)
if table is None:
return None
prop_criteria = criterias['type']
query = select([table]).where(
func.lower(table.c[prop_criteria]).like(
func.lower(criterias['term'] + '%'))
).order_by(asc(table.c[prop_criteria]))
# result = session.execute(query).fetchall()
return [{'label': row[prop_criteria],
'taxref_id': row['taxref_id'],
'vernaculaire': row['vernaculaire'],
'latin': row['latin']
} for row in session.execute(query).fetchall()]
@view_config(route_name='taxon',
renderer='json',
request_method='GET',
permission=NO_PERMISSION_REQUIRED)
def getTaxon(request):
session = request.dbsession
taxref_id = request.matchdict.get('taxref_id', None)
table = Base.metadata.tables['TAXREF']
query = select([table]).where(table.c['CD_NOM']==taxref_id)
result = session.execute(query).fetchone()
return {
'taxref_id': result['CD_NOM'],
'vernaculaire': result['NOM_VERN'],
'latin': result['LB_NOM']
} | Back/ecoreleve_be_server/Views/autocomplete.py | from pyramid.view import view_config
from ..Models import Base
from sqlalchemy import select, asc, func
from pyramid.security import NO_PERMISSION_REQUIRED
dictObj = {
'stations': 'Station',
'sensors': 'Sensor',
'individuals': 'Individual',
'monitoredSites': 'MonitoredSite',
'users': 'User',
'regions': 'Region',
'projects': 'Project',
'clients': 'Client'
}
def asInt(str):
try:
return int(str)
except:
return str
# TODO remove that already exists in Object view,
# need replace url requesting "{root}/autocomplete/{object}..." by "{root}/{object}/autocomplete..."
@view_config(route_name='autocomplete',
renderer='json',
request_method='GET')
@view_config(route_name='autocomplete/ID',
renderer='json',
request_method='GET')
def autocomplete(request):
objName = dictObj[request.matchdict['obj']]
session = request.dbsession
criteria = request.params['term']
prop = asInt(request.matchdict['prop'])
try:
NameValReturn = request.matchdict['valReturn']
except:
NameValReturn = None
if isinstance(prop, int):
table = Base.metadata.tables[objName + 'DynPropValuesNow']
query = select([table.c['ValueString'].label('label'),
table.c['ValueString'].label('value')]
).distinct(table.c['ValueString']
).where(table.c['FK_' + objName + 'DynProp'] == prop)
query = query.where(table.c['ValueString'].like('%' + criteria + '%')
).order_by(asc(table.c['ValueString']))
else:
if NameValReturn is None:
NameValReturn = prop
table = Base.metadata.tables[objName]
query = select([table.c[NameValReturn].label('value'),
table.c[prop].label('label')]
).distinct(table.c[prop])
query = query.where(table.c[prop].like(
'%' + criteria + '%')).order_by(asc(table.c[prop]))
return [dict(row) for row in session.execute(query).fetchall()]
@view_config(route_name='autocomplete/taxon',
renderer='json',
request_method='GET',
permission=NO_PERMISSION_REQUIRED)
def autocompleteTaxon(request):
session = request.dbsession
taxaViews = {
'reptile': Base.metadata.tables['reptil_view'],
'oiseau': Base.metadata.tables['bird_view'],
'amphibien': Base.metadata.tables['amphibia_view'],
'mammal': Base.metadata.tables['mammal_view'],
'insecte': Base.metadata.tables['insect_view'],
'chiroptera': Base.metadata.tables['chiroptera_view'],
'flore': Base.metadata.tables['phyto_view'],
}
# prop_name = {'vernaculaire': 'NOM_VERN',
# 'latin': 'NOM_COMPLET'}
criterias = dict(request.params)
table = taxaViews.get(criterias['protocol'], None)
if table is None:
return None
prop_criteria = criterias['type']
query = select([table]).where(
func.lower(table.c[prop_criteria]).like(
func.lower(criterias['term'] + '%'))
).order_by(asc(table.c[prop_criteria]))
# result = session.execute(query).fetchall()
return [{'label': row[prop_criteria],
'taxref_id': row['taxref_id'],
'vernaculaire': row['vernaculaire'],
'latin': row['latin']
} for row in session.execute(query).fetchall()]
@view_config(route_name='taxon',
renderer='json',
request_method='GET',
permission=NO_PERMISSION_REQUIRED)
def getTaxon(request):
session = request.dbsession
taxref_id = request.matchdict.get('taxref_id', None)
table = Base.metadata.tables['TAXREF']
query = select([table]).where(table.c['CD_NOM']==taxref_id)
result = session.execute(query).fetchone()
return {
'taxref_id': result['CD_NOM'],
'vernaculaire': result['NOM_VERN'],
'latin': result['LB_NOM']
} | 0.379493 | 0.171755 |
import argparse
import csv
import logging
import sys
def convert(input_csv, source_sep, dest_sep, output_file, label_col, label_order, quote_char='"'):
"""
Formats the input to the target by changing the separator
"""
label_map = {l: f"{i:03}_{l}" for i, l in enumerate(label_order)}
print(label_map)
with open(input_csv, "r") as p_file:
reader = csv.reader(p_file, delimiter=source_sep, quotechar=quote_char)
header_cols = next(reader)
print(header_cols)
source_lines = list(reader)
label_index = list(filter(lambda x: x[1] == label_col, enumerate(header_cols)))[0][0]
print(label_index)
with open(output_file, "w") as w_file:
writer = csv.writer(w_file, delimiter=dest_sep, quotechar=quote_char)
header_cols[label_index] = "label"
writer.writerow(header_cols)
for line in source_lines:
line[label_index] = label_map[line[label_index]]
writer.writerow(line)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("datafile_csv",
help="The csv file containing predictions")
parser.add_argument("label_col",
help="The name of the label column")
parser.add_argument("labels_in_order_csv",
help="The label names in order (csv) of index in the model. This is work around the default_glue_script")
parser.add_argument("--src_csv_sep",
help="The csv separator for the source", default="\t")
parser.add_argument("--dest_csv_sep",
help="The csv separator for the target", default=",")
parser.add_argument("--output",
help="The output file", required=True)
parser.add_argument("--log-level", help="Log level", default="INFO", choices={"INFO", "WARN", "DEBUG", "ERROR"})
args = parser.parse_args()
return args
def main_run():
args = parse_args()
print(args.__dict__)
# Set up logging
logging.basicConfig(level=logging.getLevelName(args.log_level), handlers=[logging.StreamHandler(sys.stdout)],
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Runs
convert(args.datafile_csv,
args.src_csv_sep,
args.dest_csv_sep,
args.output,
args.label_col,
args.labels_in_order_csv.split(",")
)
if __name__ == '__main__':
main_run() | src/utils/convert_to_csv.py | import argparse
import csv
import logging
import sys
def convert(input_csv, source_sep, dest_sep, output_file, label_col, label_order, quote_char='"'):
"""
Formats the input to the target by changing the separator
"""
label_map = {l: f"{i:03}_{l}" for i, l in enumerate(label_order)}
print(label_map)
with open(input_csv, "r") as p_file:
reader = csv.reader(p_file, delimiter=source_sep, quotechar=quote_char)
header_cols = next(reader)
print(header_cols)
source_lines = list(reader)
label_index = list(filter(lambda x: x[1] == label_col, enumerate(header_cols)))[0][0]
print(label_index)
with open(output_file, "w") as w_file:
writer = csv.writer(w_file, delimiter=dest_sep, quotechar=quote_char)
header_cols[label_index] = "label"
writer.writerow(header_cols)
for line in source_lines:
line[label_index] = label_map[line[label_index]]
writer.writerow(line)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("datafile_csv",
help="The csv file containing predictions")
parser.add_argument("label_col",
help="The name of the label column")
parser.add_argument("labels_in_order_csv",
help="The label names in order (csv) of index in the model. This is work around the default_glue_script")
parser.add_argument("--src_csv_sep",
help="The csv separator for the source", default="\t")
parser.add_argument("--dest_csv_sep",
help="The csv separator for the target", default=",")
parser.add_argument("--output",
help="The output file", required=True)
parser.add_argument("--log-level", help="Log level", default="INFO", choices={"INFO", "WARN", "DEBUG", "ERROR"})
args = parser.parse_args()
return args
def main_run():
args = parse_args()
print(args.__dict__)
# Set up logging
logging.basicConfig(level=logging.getLevelName(args.log_level), handlers=[logging.StreamHandler(sys.stdout)],
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Runs
convert(args.datafile_csv,
args.src_csv_sep,
args.dest_csv_sep,
args.output,
args.label_col,
args.labels_in_order_csv.split(",")
)
if __name__ == '__main__':
main_run() | 0.336222 | 0.199133 |
from tkinter import *
from tkinter import ttk
from tkinter import scrolledtext
from pandas.core.frame import DataFrame
from prettytable import PrettyTable
from parserT28.utils.decorators import singleton
@singleton
class DataWindow(object):
def __init__(self):
self._console = None
self._data = ''
self._headers = []
self._rows = []
@property
def console(self):
return self._console
@property
def data(self):
return self._data
@property
def headers(self):
return self._headers
@property
def rows(self):
return self._rows
def clearProperties(self):
self._data = ''
self._headers = []
self._rows = []
@console.setter
def console(self, frame):
Label(frame, text='Consola', borderwidth=0,
font='Arial 15 bold', width=52, bg='#3c3f41', foreground='#fff').grid(row=3, column=1)
x_scroll = Scrollbar(frame, orient='horizontal')
y_scroll = Scrollbar(frame, orient='vertical')
self._console = Text(frame, borderwidth=0, height=35,
width=70, bg='#1c1c1e', foreground='#9efb01', undo=True, wrap='none', xscrollcommand=x_scroll.set, yscrollcommand=y_scroll.set)
y_scroll.config(command=self._console.yview)
y_scroll.grid(column=1, row=4, sticky='NE', ipady=255, padx=12)
x_scroll.config(command=self._console.xview)
x_scroll.grid(column=1, row=5, sticky='NS', ipadx=255)
def consoleText(self, data):
if self._console is None:
self._data = f"{data}\n\n"
print(f"{data}\n\n")
else:
self._data = f"{data}\n\n"
self._console.insert(INSERT, f"{data}\n\n")
def consoleTable(self, headers: list, rows: list):
table = PrettyTable()
table.field_names = headers
for row in rows:
table.add_row(row)
if self._console is None:
self._headers = headers
for row in rows:
self._rows.append(row)
self._data = f"{table}\n\n"
print(INSERT, f"{table}\n\n")
else:
self._data = f"{table}\n\n"
self._console.insert(INSERT, f"{table}\n\n")
def format_df(self, df: DataFrame):
table = PrettyTable([''] + list(df.columns))
self._headers = list(df.columns)
for row in df.itertuples():
table.add_row(row)
self._rows.append(row)
self._data = f"{str(table)}\n\n"
return str(table)
def format_table_list(self, array: list):
try:
table_value = PrettyTable(array[0])
table_value.add_row(array[1])
self._headers = array[0]
for row in array[1]:
self._rows.append(row)
self._data = f"{str(table_value)}\n\n"
return str(table_value)
except:
desc = "FATAL ERROR, Funciones Select"
# ErrorController().add(34, 'Execution', desc, 0, 0)
def clearConsole(self):
self._console.delete('1.0', END) | bases_2021_1S/Grupo 03/parserT28/views/data_window.py | from tkinter import *
from tkinter import ttk
from tkinter import scrolledtext
from pandas.core.frame import DataFrame
from prettytable import PrettyTable
from parserT28.utils.decorators import singleton
@singleton
class DataWindow(object):
def __init__(self):
self._console = None
self._data = ''
self._headers = []
self._rows = []
@property
def console(self):
return self._console
@property
def data(self):
return self._data
@property
def headers(self):
return self._headers
@property
def rows(self):
return self._rows
def clearProperties(self):
self._data = ''
self._headers = []
self._rows = []
@console.setter
def console(self, frame):
Label(frame, text='Consola', borderwidth=0,
font='Arial 15 bold', width=52, bg='#3c3f41', foreground='#fff').grid(row=3, column=1)
x_scroll = Scrollbar(frame, orient='horizontal')
y_scroll = Scrollbar(frame, orient='vertical')
self._console = Text(frame, borderwidth=0, height=35,
width=70, bg='#1c1c1e', foreground='#9efb01', undo=True, wrap='none', xscrollcommand=x_scroll.set, yscrollcommand=y_scroll.set)
y_scroll.config(command=self._console.yview)
y_scroll.grid(column=1, row=4, sticky='NE', ipady=255, padx=12)
x_scroll.config(command=self._console.xview)
x_scroll.grid(column=1, row=5, sticky='NS', ipadx=255)
def consoleText(self, data):
if self._console is None:
self._data = f"{data}\n\n"
print(f"{data}\n\n")
else:
self._data = f"{data}\n\n"
self._console.insert(INSERT, f"{data}\n\n")
def consoleTable(self, headers: list, rows: list):
table = PrettyTable()
table.field_names = headers
for row in rows:
table.add_row(row)
if self._console is None:
self._headers = headers
for row in rows:
self._rows.append(row)
self._data = f"{table}\n\n"
print(INSERT, f"{table}\n\n")
else:
self._data = f"{table}\n\n"
self._console.insert(INSERT, f"{table}\n\n")
def format_df(self, df: DataFrame):
table = PrettyTable([''] + list(df.columns))
self._headers = list(df.columns)
for row in df.itertuples():
table.add_row(row)
self._rows.append(row)
self._data = f"{str(table)}\n\n"
return str(table)
def format_table_list(self, array: list):
try:
table_value = PrettyTable(array[0])
table_value.add_row(array[1])
self._headers = array[0]
for row in array[1]:
self._rows.append(row)
self._data = f"{str(table_value)}\n\n"
return str(table_value)
except:
desc = "FATAL ERROR, Funciones Select"
# ErrorController().add(34, 'Execution', desc, 0, 0)
def clearConsole(self):
self._console.delete('1.0', END) | 0.478529 | 0.122418 |
from .CardInfo import CardInfo
from woodcutter.src.Card import *
from woodcutter.src.Action import Action
class ADVISOR(CardInfo):
names = ["Advisor", "Advisors", "an Advisor"]
types = [Types.ACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class BAKER(CardInfo):
names = ["Baker", "Bakers", "a Baker"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class BUTCHER(CardInfo):
names = ["Butcher", "Butchers", "a Butcher"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class CANDLESTICK_MAKER(CardInfo):
names = ["<NAME>", "<NAME>", "a Candlestick Maker"]
types = [Types.ACTION]
cost = [2, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class DOCTOR(CardInfo):
names = ["Doctor", "Doctors", "a Doctor"]
types = [Types.ACTION]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class HERALD(CardInfo):
names = ["Herald", "Heralds", "a Herald"]
types = [Types.ACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class JOURNEYMAN(CardInfo):
names = ["Journeyman", "Journeymen", "a Journeyman"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class MASTERPIECE(CardInfo):
names = ["Masterpiece", "Masterpieces", "a Masterpiece"]
types = [Types.TREASURE]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class MERCHANT_GUILD(CardInfo):
names = ["Merchant Guild", "Merchant Guilds", "a Merchant Guild"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class PLAZA(CardInfo):
names = ["Plaza", "Plazas", "a Plaza"]
types = [Types.ACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class TAXMAN(CardInfo):
names = ["Taxman", "Taxmen", "a Taxman"]
types = [Types.ACTION, Types.ATTACK]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class SOOTHSAYER(CardInfo):
names = ["Soothsayer", "Soothsayers", "<NAME>"]
types = [Types.ACTION, Types.ATTACK]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class STONEMASON(CardInfo):
names = ["Stonemason", "Stonemasons", "<NAME>"]
types = [Types.ACTION]
cost = [2, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state | woodcutter/src/CardActions/Guilds.py | from .CardInfo import CardInfo
from woodcutter.src.Card import *
from woodcutter.src.Action import Action
class ADVISOR(CardInfo):
names = ["Advisor", "Advisors", "an Advisor"]
types = [Types.ACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class BAKER(CardInfo):
names = ["Baker", "Bakers", "a Baker"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class BUTCHER(CardInfo):
names = ["Butcher", "Butchers", "a Butcher"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class CANDLESTICK_MAKER(CardInfo):
names = ["<NAME>", "<NAME>", "a Candlestick Maker"]
types = [Types.ACTION]
cost = [2, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class DOCTOR(CardInfo):
names = ["Doctor", "Doctors", "a Doctor"]
types = [Types.ACTION]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class HERALD(CardInfo):
names = ["Herald", "Heralds", "a Herald"]
types = [Types.ACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class JOURNEYMAN(CardInfo):
names = ["Journeyman", "Journeymen", "a Journeyman"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class MASTERPIECE(CardInfo):
names = ["Masterpiece", "Masterpieces", "a Masterpiece"]
types = [Types.TREASURE]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class MERCHANT_GUILD(CardInfo):
names = ["Merchant Guild", "Merchant Guilds", "a Merchant Guild"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class PLAZA(CardInfo):
names = ["Plaza", "Plazas", "a Plaza"]
types = [Types.ACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class TAXMAN(CardInfo):
names = ["Taxman", "Taxmen", "a Taxman"]
types = [Types.ACTION, Types.ATTACK]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class SOOTHSAYER(CardInfo):
names = ["Soothsayer", "Soothsayers", "<NAME>"]
types = [Types.ACTION, Types.ATTACK]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class STONEMASON(CardInfo):
names = ["Stonemason", "Stonemasons", "<NAME>"]
types = [Types.ACTION]
cost = [2, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state | 0.357119 | 0.485722 |
from course_lib.Base.BaseRecommender import BaseRecommender
from typing import List
import numpy as np
class HybridPredictionRecommender(BaseRecommender):
models_object: List[BaseRecommender] = []
models_name: List[str] = []
models_aps: List[np.array] = []
def __init__(self, URM_train):
self.model_ap_size = None
self.models_to_be_used = None
super().__init__(URM_train)
def add_fitted_model(self, recommender_name: str, recommender_object: BaseRecommender, recommender_aps: np.array):
'''
Add an already fitted model to the list of model that will be used to compute predictions.
Models are assumed to be fitted on the same URM_train and validated on the same URM_validation.
Also, recommendation average precision, are assumed to refer to the same map user-index.
:param recommender_name: name of the recommender
:param recommender_object: fitted recommended
:param recommender_aps: average precision of the recommender on the validation set
:return: None
'''
if not (self.__verify_aps_consistency__(recommender_aps) and self.__verify_name_consistency__(
recommender_name)):
raise AssertionError("The len of the aps of each recommender should be the same. Moreover, the name"
"should not be in the ones already used")
if len(self.models_name) == 0:
self.model_ap_size = recommender_aps.size
self.models_name.append(recommender_name)
self.models_object.append(recommender_object)
self.models_aps.append(recommender_aps)
def get_number_of_models(self):
return len(self.models_name)
def get_recommender_names(self):
return self.models_name
def __verify_name_consistency__(self, name):
return False if name in self.models_name else True
def __verify_aps_consistency__(self, aps):
'''
Verify that each recommender has the same number of tested recommendations
:param aps: average precision to be checked
:return: True if condition are satisfied, False otherwise
'''
if len(self.models_aps) == 0:
return True
return True if (self.model_ap_size == aps.size) else False
def get_model_to_be_used(self):
if self.models_to_be_used is None:
return self.models_to_be_used
else:
raise RuntimeError("You need to fit the recommender first")
def fit(self):
'''
Compute for each user predicted by the recommenders, the recommender with the highest MAP.
We have a huge list (around 50k) for each recommender stored.
We need to select index of the recommender associated to the highest value in these list, for
each position.
We could get the max of the prediction for each component (i.e. the nparray containing all the maximum values).
-> To do that, we should transform, all the values to a matrix, and take the max on the second axis.
After that, we should build a mask, doing checks on them, and finally, break ties (if any)
:return: None
'''
# Getting the maximum value
print("Retrieving max values...", end="")
matrix = np.array(self.models_aps)
max_values = matrix.max(axis=0) # np array containing the maximum values
print("Done")
print("Building masks...", end="")
# Building the masks
masks = []
for aps in self.models_aps:
res = np.where(aps == max_values, 1, 0)
masks.append(res)
mask_matrix = np.array(masks)
print("Done")
print("Computing model to be used...")
# Now, that we know that, we should build self.model_to_be_used
self.models_to_be_used = mask_matrix.argmax(axis=0)
print("Done")
def recommend(self, user_id_array, cutoff=None, remove_seen_flag=True, items_to_compute=None,
remove_top_pop_flag=False, remove_custom_items_flag=False, return_scores=False):
recommendations = []
junk, scores = self.models_object[0].recommend(user_id_array, cutoff=cutoff,
remove_custom_items_flag=remove_custom_items_flag,
items_to_compute=items_to_compute,
remove_top_pop_flag=remove_top_pop_flag,
return_scores=True)
# Building recommendations and scores
for i in range(len(user_id_array)):
rec_idx = self.models_to_be_used[user_id_array[i]]
if return_scores:
recommendation_for_user = self.models_object[rec_idx].recommend(user_id_array[i],
cutoff=cutoff,
remove_custom_items_flag=remove_custom_items_flag,
items_to_compute=items_to_compute,
remove_top_pop_flag=remove_top_pop_flag,
return_scores=False)
else:
recommendation_for_user = self.models_object[rec_idx].recommend(user_id_array[i], cutoff=cutoff,
remove_custom_items_flag=remove_custom_items_flag,
items_to_compute=items_to_compute,
remove_top_pop_flag=remove_top_pop_flag,
return_scores=False)
recommendations.append(recommendation_for_user)
# Return predictions
if return_scores:
return recommendations, scores
else:
return recommendations | src/model/HybridRecommender/HybridPredictionRecommender.py | from course_lib.Base.BaseRecommender import BaseRecommender
from typing import List
import numpy as np
class HybridPredictionRecommender(BaseRecommender):
models_object: List[BaseRecommender] = []
models_name: List[str] = []
models_aps: List[np.array] = []
def __init__(self, URM_train):
self.model_ap_size = None
self.models_to_be_used = None
super().__init__(URM_train)
def add_fitted_model(self, recommender_name: str, recommender_object: BaseRecommender, recommender_aps: np.array):
'''
Add an already fitted model to the list of model that will be used to compute predictions.
Models are assumed to be fitted on the same URM_train and validated on the same URM_validation.
Also, recommendation average precision, are assumed to refer to the same map user-index.
:param recommender_name: name of the recommender
:param recommender_object: fitted recommended
:param recommender_aps: average precision of the recommender on the validation set
:return: None
'''
if not (self.__verify_aps_consistency__(recommender_aps) and self.__verify_name_consistency__(
recommender_name)):
raise AssertionError("The len of the aps of each recommender should be the same. Moreover, the name"
"should not be in the ones already used")
if len(self.models_name) == 0:
self.model_ap_size = recommender_aps.size
self.models_name.append(recommender_name)
self.models_object.append(recommender_object)
self.models_aps.append(recommender_aps)
def get_number_of_models(self):
return len(self.models_name)
def get_recommender_names(self):
return self.models_name
def __verify_name_consistency__(self, name):
return False if name in self.models_name else True
def __verify_aps_consistency__(self, aps):
'''
Verify that each recommender has the same number of tested recommendations
:param aps: average precision to be checked
:return: True if condition are satisfied, False otherwise
'''
if len(self.models_aps) == 0:
return True
return True if (self.model_ap_size == aps.size) else False
def get_model_to_be_used(self):
if self.models_to_be_used is None:
return self.models_to_be_used
else:
raise RuntimeError("You need to fit the recommender first")
def fit(self):
'''
Compute for each user predicted by the recommenders, the recommender with the highest MAP.
We have a huge list (around 50k) for each recommender stored.
We need to select index of the recommender associated to the highest value in these list, for
each position.
We could get the max of the prediction for each component (i.e. the nparray containing all the maximum values).
-> To do that, we should transform, all the values to a matrix, and take the max on the second axis.
After that, we should build a mask, doing checks on them, and finally, break ties (if any)
:return: None
'''
# Getting the maximum value
print("Retrieving max values...", end="")
matrix = np.array(self.models_aps)
max_values = matrix.max(axis=0) # np array containing the maximum values
print("Done")
print("Building masks...", end="")
# Building the masks
masks = []
for aps in self.models_aps:
res = np.where(aps == max_values, 1, 0)
masks.append(res)
mask_matrix = np.array(masks)
print("Done")
print("Computing model to be used...")
# Now, that we know that, we should build self.model_to_be_used
self.models_to_be_used = mask_matrix.argmax(axis=0)
print("Done")
def recommend(self, user_id_array, cutoff=None, remove_seen_flag=True, items_to_compute=None,
remove_top_pop_flag=False, remove_custom_items_flag=False, return_scores=False):
recommendations = []
junk, scores = self.models_object[0].recommend(user_id_array, cutoff=cutoff,
remove_custom_items_flag=remove_custom_items_flag,
items_to_compute=items_to_compute,
remove_top_pop_flag=remove_top_pop_flag,
return_scores=True)
# Building recommendations and scores
for i in range(len(user_id_array)):
rec_idx = self.models_to_be_used[user_id_array[i]]
if return_scores:
recommendation_for_user = self.models_object[rec_idx].recommend(user_id_array[i],
cutoff=cutoff,
remove_custom_items_flag=remove_custom_items_flag,
items_to_compute=items_to_compute,
remove_top_pop_flag=remove_top_pop_flag,
return_scores=False)
else:
recommendation_for_user = self.models_object[rec_idx].recommend(user_id_array[i], cutoff=cutoff,
remove_custom_items_flag=remove_custom_items_flag,
items_to_compute=items_to_compute,
remove_top_pop_flag=remove_top_pop_flag,
return_scores=False)
recommendations.append(recommendation_for_user)
# Return predictions
if return_scores:
return recommendations, scores
else:
return recommendations | 0.800302 | 0.533397 |
import unittest
import pandas as pd
from tqdm import tqdm
import oscml.data.dataset
import oscml.data.dataset_cep
import oscml.data.dataset_hopv15
import oscml.utils.util
from oscml.utils.util import smiles2mol
class TestData(unittest.TestCase):
def assert_PCE_values(self, df_100, df):
for i in range(len(df_100)):
df_100_pce = df_100['id'].iloc[i]
pce = df['id'].iloc[i]
self.assertEqual(df_100_pce, pce)
def assertEqualArray(self, a, b):
self.assertEqual(len(a), len(b))
for i in range(len(a)):
self.assertEqual(a[i],b[i])
@classmethod
def setUpClass(cls):
print()
print()
print('###################################')
print('# Data Tests #')
print('###################################')
print()
print()
oscml.utils.util.init_logging('./tests', './tests/tests_logs')
def setUp(self):
self.path_CEPDB = oscml.data.dataset.path_cepdb_valid_smiles()
self.path_CEPDB_25000 = oscml.data.dataset.path_cepdb_25000()
def test_dataset_read_cep_25000(self):
print()
print()
print('------------------------------------------------')
print('- Test: test_dataset_read_cep_25000 -')
print('------------------------------------------------')
print()
print()
df_train, df_val, df_test = oscml.data.dataset.read_and_split(self.path_CEPDB_25000)
assert len(df_train) == 15000
assert len(df_val) == 5000
assert len(df_test) == 5000
def test_dataset_transform_cep_25000(self):
print()
print()
print('------------------------------------------------')
print('- Test: test_dataset_transform_cep_25000 -')
print('------------------------------------------------')
print()
print()
df_train, _, _ = oscml.data.dataset.read_and_split(self.path_CEPDB_25000)
transformer = oscml.data.dataset.create_transformer(df_train, column_target='pce', column_x='SMILES_str')
self.assertAlmostEqual(4.120434375131375, transformer.target_mean, 1)
self.assertAlmostEqual(2.405561853258728, transformer.target_std, 1)
def test_dataset_update_state(self):
print()
print()
print('------------------------------------------------')
print('- Test: test_dataset_update_state -')
print('------------------------------------------------')
print()
print()
mol2seq = oscml.features.weisfeilerlehman.Mol2seq_WL(radius=1)
info = oscml.data.dataset.DatasetInfo(mol2seq=mol2seq)
smiles = '[SiH2]1C=c2c3cc([se]c3c3cc4ccccc4cc3c2=C1)-c1cncs1'
mol = smiles2mol(smiles)
info.update(mol, smiles)
self.assertEqual(38, info.max_molecule_size)
self.assertEqual(50, info.max_smiles_length)
self.assertEqual(16, len(info.mol2seq.fragment_dict))
self.assertEqual(7, len(info.node_types))
smiles = '[SiH2]1cc2cccc(-c3ccc(-c4scc5[nH]ccc45)c4nsnc34)c2c1'
mol = smiles2mol(smiles)
info.update(mol, smiles)
self.assertEqual(39, info.max_molecule_size)
self.assertEqual(52, info.max_smiles_length)
self.assertEqual(7, len(info.node_types))
def test_dataset_info_for_cepdb_25000(self):
print()
print()
print('------------------------------------------------')
print('- Test: test_dataset_info_for_cepdb_25000 -')
print('------------------------------------------------')
print()
print()
# check the correct size of dictionaries
info = oscml.data.dataset_cep.create_dataset_info_for_CEP25000()
number_node_types = len(info.node_types)
self.assertEqual(8, number_node_types)
number_fragment_types = len(info.mol2seq.fragment_dict)
self.assertEqual(56, number_fragment_types)
# read subset from CEPDB
df = pd.read_csv(self.path_CEPDB_25000)
for i in tqdm(range(len(df))):
smiles = df.iloc[i]['SMILES_str']
m = smiles2mol(smiles)
info.update(m, smiles)
# check that there are no additional node or fragment types
number_node_types = len(info.node_types)
self.assertEqual(8, number_node_types)
number_fragment_types = len(info.mol2seq.fragment_dict)
self.assertEqual(56, number_fragment_types)
def test_dataset_info_for_hopv15(self):
print()
print()
print('------------------------------------------------')
print('- Test: test_dataset_info_for_hopv15 -')
print('------------------------------------------------')
print()
print()
# check the correct size of dictionaries
info = oscml.data.dataset_hopv15.create_dataset_info_for_HOPV15()
number_node_types = len(info.node_types)
self.assertEqual(12, number_node_types)
number_fragment_types = len(info.mol2seq.fragment_dict)
self.assertEqual(150, number_fragment_types)
# the fragments and node type were added to existing ones from CEP DB
# compare the results when starting from scratich
path = oscml.data.dataset.path_hopv_15()
info_from_scratch = oscml.data.dataset_hopv15.generate_dictionaries(path, 'smiles', None)
number_fragment_types = len(info_from_scratch.mol2seq.fragment_dict)
self.assertEqual(134, number_fragment_types)
# that means there are 16 fragments in CEP DB that are not used in HOPV15
def test_sample_without_replacement(self):
df = pd.read_csv(self.path_CEPDB)
df_cleaned = oscml.data.dataset_cep.skip_all_small_pce_values(df.copy(), 0.0001)
df_train, _ = oscml.data.dataset_cep.sample_without_replacement(df_cleaned, number_samples=1000, step=1.)
self.assertEqual(1000, len(df_train))
df_cleaned = oscml.data.dataset_cep.skip_all_small_pce_values(df.copy(), 0.0001)
df_train, df_val, df_test = oscml.data.dataset_cep.sample_without_replacement(df_cleaned, number_samples=[1000, 200, 300], step=.2)
self.assertEqual(1000, len(df_train))
self.assertEqual(200, len(df_val))
self.assertEqual(300, len(df_test))
def test_store_CEP_cleaned_and_stratified(self):
df = oscml.data.dataset_cep.store_CEP_cleaned_and_stratified(
self.path_CEPDB, dst=None, number_samples=[15000, 5000, 5000], threshold_skip=0.0001)
self.assertEqual(25000, len(df))
mask = (df['ml_phase'] == 'train')
self.assertEqual(15000, len(df[mask]))
def test_add_k_fold_columns(self):
file = './data/processed/HOPV_15_revised_2_processed_homo.csv'
df = pd.read_csv(file)
k = 5
oscml.data.dataset.add_k_fold_columns(df, k, seed=200, column_name_prefix='ml_phase')
size = len(df)
mask = [False]*size
for i in range(k):
column = 'ml_phase_fold_' + str(i)
mask = (mask | (df[column] == 'test'))
self.assertTrue(all(mask))
def test_add_fingerprint_columns(self):
file = './data/processed/HOPV_15_revised_2_processed_homo.csv'
df = pd.read_csv(file)[:4]
print(df['smiles'])
nBits = 128
expected_number_columns = len(df.columns) + 128
df = oscml.data.dataset.add_fingerprint_columns(df, 'smiles', nBits, 2)
self.assertEqualArray([0,0,0,0], df['fp0'].to_numpy())
self.assertEqualArray([1,0,1,1], df['fp3'].to_numpy())
self.assertEqual(expected_number_columns, len(df.columns))
if __name__ == '__main__':
unittest.main()
#suite = unittest.TestSuite()
#suite.addTest(TestData('test_dataset_info_for_cepdb_25000'))
#suite.addTest(TestData('test_dataset_info_for_hopv15'))
#suite.addTest(TestData('test_dataset_transform_cep_25000'))
#suite.addTest(TestData('test_dataset_skip_invalid_smiles'))
#suite.addTest(TestData('test_add_k_fold_columns'))
#suite.addTest(TestData('test_add_fingerprint_columns'))
#runner = unittest.TextTestRunner()
#runner.run(suite) | tests/test_data.py | import unittest
import pandas as pd
from tqdm import tqdm
import oscml.data.dataset
import oscml.data.dataset_cep
import oscml.data.dataset_hopv15
import oscml.utils.util
from oscml.utils.util import smiles2mol
class TestData(unittest.TestCase):
def assert_PCE_values(self, df_100, df):
for i in range(len(df_100)):
df_100_pce = df_100['id'].iloc[i]
pce = df['id'].iloc[i]
self.assertEqual(df_100_pce, pce)
def assertEqualArray(self, a, b):
self.assertEqual(len(a), len(b))
for i in range(len(a)):
self.assertEqual(a[i],b[i])
@classmethod
def setUpClass(cls):
print()
print()
print('###################################')
print('# Data Tests #')
print('###################################')
print()
print()
oscml.utils.util.init_logging('./tests', './tests/tests_logs')
def setUp(self):
self.path_CEPDB = oscml.data.dataset.path_cepdb_valid_smiles()
self.path_CEPDB_25000 = oscml.data.dataset.path_cepdb_25000()
def test_dataset_read_cep_25000(self):
print()
print()
print('------------------------------------------------')
print('- Test: test_dataset_read_cep_25000 -')
print('------------------------------------------------')
print()
print()
df_train, df_val, df_test = oscml.data.dataset.read_and_split(self.path_CEPDB_25000)
assert len(df_train) == 15000
assert len(df_val) == 5000
assert len(df_test) == 5000
def test_dataset_transform_cep_25000(self):
print()
print()
print('------------------------------------------------')
print('- Test: test_dataset_transform_cep_25000 -')
print('------------------------------------------------')
print()
print()
df_train, _, _ = oscml.data.dataset.read_and_split(self.path_CEPDB_25000)
transformer = oscml.data.dataset.create_transformer(df_train, column_target='pce', column_x='SMILES_str')
self.assertAlmostEqual(4.120434375131375, transformer.target_mean, 1)
self.assertAlmostEqual(2.405561853258728, transformer.target_std, 1)
def test_dataset_update_state(self):
print()
print()
print('------------------------------------------------')
print('- Test: test_dataset_update_state -')
print('------------------------------------------------')
print()
print()
mol2seq = oscml.features.weisfeilerlehman.Mol2seq_WL(radius=1)
info = oscml.data.dataset.DatasetInfo(mol2seq=mol2seq)
smiles = '[SiH2]1C=c2c3cc([se]c3c3cc4ccccc4cc3c2=C1)-c1cncs1'
mol = smiles2mol(smiles)
info.update(mol, smiles)
self.assertEqual(38, info.max_molecule_size)
self.assertEqual(50, info.max_smiles_length)
self.assertEqual(16, len(info.mol2seq.fragment_dict))
self.assertEqual(7, len(info.node_types))
smiles = '[SiH2]1cc2cccc(-c3ccc(-c4scc5[nH]ccc45)c4nsnc34)c2c1'
mol = smiles2mol(smiles)
info.update(mol, smiles)
self.assertEqual(39, info.max_molecule_size)
self.assertEqual(52, info.max_smiles_length)
self.assertEqual(7, len(info.node_types))
def test_dataset_info_for_cepdb_25000(self):
print()
print()
print('------------------------------------------------')
print('- Test: test_dataset_info_for_cepdb_25000 -')
print('------------------------------------------------')
print()
print()
# check the correct size of dictionaries
info = oscml.data.dataset_cep.create_dataset_info_for_CEP25000()
number_node_types = len(info.node_types)
self.assertEqual(8, number_node_types)
number_fragment_types = len(info.mol2seq.fragment_dict)
self.assertEqual(56, number_fragment_types)
# read subset from CEPDB
df = pd.read_csv(self.path_CEPDB_25000)
for i in tqdm(range(len(df))):
smiles = df.iloc[i]['SMILES_str']
m = smiles2mol(smiles)
info.update(m, smiles)
# check that there are no additional node or fragment types
number_node_types = len(info.node_types)
self.assertEqual(8, number_node_types)
number_fragment_types = len(info.mol2seq.fragment_dict)
self.assertEqual(56, number_fragment_types)
def test_dataset_info_for_hopv15(self):
print()
print()
print('------------------------------------------------')
print('- Test: test_dataset_info_for_hopv15 -')
print('------------------------------------------------')
print()
print()
# check the correct size of dictionaries
info = oscml.data.dataset_hopv15.create_dataset_info_for_HOPV15()
number_node_types = len(info.node_types)
self.assertEqual(12, number_node_types)
number_fragment_types = len(info.mol2seq.fragment_dict)
self.assertEqual(150, number_fragment_types)
# the fragments and node type were added to existing ones from CEP DB
# compare the results when starting from scratich
path = oscml.data.dataset.path_hopv_15()
info_from_scratch = oscml.data.dataset_hopv15.generate_dictionaries(path, 'smiles', None)
number_fragment_types = len(info_from_scratch.mol2seq.fragment_dict)
self.assertEqual(134, number_fragment_types)
# that means there are 16 fragments in CEP DB that are not used in HOPV15
def test_sample_without_replacement(self):
df = pd.read_csv(self.path_CEPDB)
df_cleaned = oscml.data.dataset_cep.skip_all_small_pce_values(df.copy(), 0.0001)
df_train, _ = oscml.data.dataset_cep.sample_without_replacement(df_cleaned, number_samples=1000, step=1.)
self.assertEqual(1000, len(df_train))
df_cleaned = oscml.data.dataset_cep.skip_all_small_pce_values(df.copy(), 0.0001)
df_train, df_val, df_test = oscml.data.dataset_cep.sample_without_replacement(df_cleaned, number_samples=[1000, 200, 300], step=.2)
self.assertEqual(1000, len(df_train))
self.assertEqual(200, len(df_val))
self.assertEqual(300, len(df_test))
def test_store_CEP_cleaned_and_stratified(self):
df = oscml.data.dataset_cep.store_CEP_cleaned_and_stratified(
self.path_CEPDB, dst=None, number_samples=[15000, 5000, 5000], threshold_skip=0.0001)
self.assertEqual(25000, len(df))
mask = (df['ml_phase'] == 'train')
self.assertEqual(15000, len(df[mask]))
def test_add_k_fold_columns(self):
file = './data/processed/HOPV_15_revised_2_processed_homo.csv'
df = pd.read_csv(file)
k = 5
oscml.data.dataset.add_k_fold_columns(df, k, seed=200, column_name_prefix='ml_phase')
size = len(df)
mask = [False]*size
for i in range(k):
column = 'ml_phase_fold_' + str(i)
mask = (mask | (df[column] == 'test'))
self.assertTrue(all(mask))
def test_add_fingerprint_columns(self):
file = './data/processed/HOPV_15_revised_2_processed_homo.csv'
df = pd.read_csv(file)[:4]
print(df['smiles'])
nBits = 128
expected_number_columns = len(df.columns) + 128
df = oscml.data.dataset.add_fingerprint_columns(df, 'smiles', nBits, 2)
self.assertEqualArray([0,0,0,0], df['fp0'].to_numpy())
self.assertEqualArray([1,0,1,1], df['fp3'].to_numpy())
self.assertEqual(expected_number_columns, len(df.columns))
if __name__ == '__main__':
unittest.main()
#suite = unittest.TestSuite()
#suite.addTest(TestData('test_dataset_info_for_cepdb_25000'))
#suite.addTest(TestData('test_dataset_info_for_hopv15'))
#suite.addTest(TestData('test_dataset_transform_cep_25000'))
#suite.addTest(TestData('test_dataset_skip_invalid_smiles'))
#suite.addTest(TestData('test_add_k_fold_columns'))
#suite.addTest(TestData('test_add_fingerprint_columns'))
#runner = unittest.TextTestRunner()
#runner.run(suite) | 0.340485 | 0.457621 |
def SPV_Comment_dict(Set):
if Set == 1:
Set_dic = dict([
('Phi_Shift_476', 'Phase control NEH'),
('Phi_Shift_ps' , 'Phase shift FEH'),
('Cav_1' , 'Cav 1 '),
('Cav_2' , 'Cav 2 ')
])
else:
Set_dic = dict([
('Phi_Shift_476', 'Phase control NEH'),
('Phi_Shift_ps' , 'Phase shift FEH'),
('Cav_1' , 'Cav 3 '),
('Cav_2' , 'Cav 4 ')
])
Comment_val = dict([
('Cav_PV_Q_Max' , ['Full signal charge' ,'pC', 2]),
('Calc_PV_Time_Ctrl' , ['Time control' ,'ps', 3]),
('Calc_PV_Amp_max' , ['Amplifier charge threshold', 'pC', 1]),
('Calc_PV_DAC_Scale' , ['DAC scale' ,'V', 2]),
('Calc_PV_Phi_jump_max' , ['Phase jump tolerance' ,'ps', 2]),
('Ele_PV_Phi_Shift_476_Deg' , [Set_dic['Phi_Shift_476'] ,'rad476', 3]),
('Ele_PV_Phi_Shift_ps' , [Set_dic['Phi_Shift_ps'] ,'ps', 3]),
('Calc_PV_Phi_diff' , ['(Cav1 - Cav2) noise' ,'ps', 4]),
('Calc_PV_out_diffs' , ['Difference signals' ,'ps', 3]),
('Cav_PV_Scale1' , [Set_dic['Cav_1']+'scale (in)' ,'arb', 2]),
('Cav_PV_Scale2' , [Set_dic['Cav_2']+'scale (in)' ,'arb', 2]),
('Cav_PV_Offset1' , [Set_dic['Cav_1']+'phase offset (in)' ,'arb', 2]),
('Cav_PV_Offset2' , [Set_dic['Cav_2']+'phase offset (in)' ,'arb', 2]),
('Ele_PV_Cav_Gain1' , [Set_dic['Cav_1']+'attenuator (in)' , '1:15', 0]),
('Ele_PV_Cav_Gain2' , [Set_dic['Cav_2']+'attenuator (in)' , '1:15', 0]),
('Calc_PV_Fbck_Gain1' , [Set_dic['Cav_1']+'feedback gain (in)', 'arb' , 4]),
('Calc_PV_Fbck_Gain2' , [Set_dic['Cav_2']+'feedback gain (in)', 'arb' , 4]),
('Calc_PV_StartTime1' , [Set_dic['Cav_1']+'start time (in)', 'ps' , 3]),
('Calc_PV_StartTime2' , [Set_dic['Cav_2']+'start time (in)', 'ps' , 3]),
('Cav_PV_Charge1' , [Set_dic['Cav_1']+'charge (out)' , 'pC' , 2]),
('Cav_PV_Charge2' , [Set_dic['Cav_2']+'charge (out)' , 'pC' , 2]),
('Cav_PV_Time1' , [Set_dic['Cav_1']+'time (out)' , 'ps' , 3]),
('Cav_PV_Time2' , [Set_dic['Cav_2']+'time (out)' , 'ps' , 3]),
('Cav_PV_Freq1' , [Set_dic['Cav_1']+'frequency-2805 (out)' , 'MHz' , 4]),
('Cav_PV_Freq2' , [Set_dic['Cav_2']+'frequency-2805 (out)' , 'MHz' , 4]),
('Cav_PV_MaxCounts1' , [Set_dic['Cav_1']+'max dig counts (out)' , 'cts' , 0]),
('Cav_PV_MaxCounts2' , [Set_dic['Cav_2']+'max dig counts (out)' , 'cts' , 0]),
('Cav_PV_MaxCounts2' , [Set_dic['Cav_2']+'max dig counts (out)' , 'cts' , 0]),
('Calc_PV_Time_Std1' , [Set_dic['Cav_1']+'std deviation (out)' , 'ps' , 3]),
('Calc_PV_Time_Std2' , [Set_dic['Cav_2']+'std deviation (out)' , 'ps' , 3]),
('Calc_PV_Time_Diff1' , [Set_dic['Cav_1']+'diff to Cav 1 (out)' , 'ps' , 3]),
('Calc_PV_Time_Diff2' , [Set_dic['Cav_2']+'diff to Cav 1 (out)' , 'ps' , 3]),
('Q1' , [Set_dic['Cav_1']+'Q (out)' , 'arb' , 1]),
('Q2' , [Set_dic['Cav_2']+'Q (out)' , 'arb' , 1])
])
return Comment_val | python/SPV_Comment_dict.py | def SPV_Comment_dict(Set):
if Set == 1:
Set_dic = dict([
('Phi_Shift_476', 'Phase control NEH'),
('Phi_Shift_ps' , 'Phase shift FEH'),
('Cav_1' , 'Cav 1 '),
('Cav_2' , 'Cav 2 ')
])
else:
Set_dic = dict([
('Phi_Shift_476', 'Phase control NEH'),
('Phi_Shift_ps' , 'Phase shift FEH'),
('Cav_1' , 'Cav 3 '),
('Cav_2' , 'Cav 4 ')
])
Comment_val = dict([
('Cav_PV_Q_Max' , ['Full signal charge' ,'pC', 2]),
('Calc_PV_Time_Ctrl' , ['Time control' ,'ps', 3]),
('Calc_PV_Amp_max' , ['Amplifier charge threshold', 'pC', 1]),
('Calc_PV_DAC_Scale' , ['DAC scale' ,'V', 2]),
('Calc_PV_Phi_jump_max' , ['Phase jump tolerance' ,'ps', 2]),
('Ele_PV_Phi_Shift_476_Deg' , [Set_dic['Phi_Shift_476'] ,'rad476', 3]),
('Ele_PV_Phi_Shift_ps' , [Set_dic['Phi_Shift_ps'] ,'ps', 3]),
('Calc_PV_Phi_diff' , ['(Cav1 - Cav2) noise' ,'ps', 4]),
('Calc_PV_out_diffs' , ['Difference signals' ,'ps', 3]),
('Cav_PV_Scale1' , [Set_dic['Cav_1']+'scale (in)' ,'arb', 2]),
('Cav_PV_Scale2' , [Set_dic['Cav_2']+'scale (in)' ,'arb', 2]),
('Cav_PV_Offset1' , [Set_dic['Cav_1']+'phase offset (in)' ,'arb', 2]),
('Cav_PV_Offset2' , [Set_dic['Cav_2']+'phase offset (in)' ,'arb', 2]),
('Ele_PV_Cav_Gain1' , [Set_dic['Cav_1']+'attenuator (in)' , '1:15', 0]),
('Ele_PV_Cav_Gain2' , [Set_dic['Cav_2']+'attenuator (in)' , '1:15', 0]),
('Calc_PV_Fbck_Gain1' , [Set_dic['Cav_1']+'feedback gain (in)', 'arb' , 4]),
('Calc_PV_Fbck_Gain2' , [Set_dic['Cav_2']+'feedback gain (in)', 'arb' , 4]),
('Calc_PV_StartTime1' , [Set_dic['Cav_1']+'start time (in)', 'ps' , 3]),
('Calc_PV_StartTime2' , [Set_dic['Cav_2']+'start time (in)', 'ps' , 3]),
('Cav_PV_Charge1' , [Set_dic['Cav_1']+'charge (out)' , 'pC' , 2]),
('Cav_PV_Charge2' , [Set_dic['Cav_2']+'charge (out)' , 'pC' , 2]),
('Cav_PV_Time1' , [Set_dic['Cav_1']+'time (out)' , 'ps' , 3]),
('Cav_PV_Time2' , [Set_dic['Cav_2']+'time (out)' , 'ps' , 3]),
('Cav_PV_Freq1' , [Set_dic['Cav_1']+'frequency-2805 (out)' , 'MHz' , 4]),
('Cav_PV_Freq2' , [Set_dic['Cav_2']+'frequency-2805 (out)' , 'MHz' , 4]),
('Cav_PV_MaxCounts1' , [Set_dic['Cav_1']+'max dig counts (out)' , 'cts' , 0]),
('Cav_PV_MaxCounts2' , [Set_dic['Cav_2']+'max dig counts (out)' , 'cts' , 0]),
('Cav_PV_MaxCounts2' , [Set_dic['Cav_2']+'max dig counts (out)' , 'cts' , 0]),
('Calc_PV_Time_Std1' , [Set_dic['Cav_1']+'std deviation (out)' , 'ps' , 3]),
('Calc_PV_Time_Std2' , [Set_dic['Cav_2']+'std deviation (out)' , 'ps' , 3]),
('Calc_PV_Time_Diff1' , [Set_dic['Cav_1']+'diff to Cav 1 (out)' , 'ps' , 3]),
('Calc_PV_Time_Diff2' , [Set_dic['Cav_2']+'diff to Cav 1 (out)' , 'ps' , 3]),
('Q1' , [Set_dic['Cav_1']+'Q (out)' , 'arb' , 1]),
('Q2' , [Set_dic['Cav_2']+'Q (out)' , 'arb' , 1])
])
return Comment_val | 0.172974 | 0.267277 |
import logging, os
from logging.handlers import RotatingFileHandler
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from celery import Celery
from config import Config
# Monolith design
# Video Player (On Raspberry Pi Video Device (Endpoint))
# - Plays videos as instructed
# Video Manager (Celery Task)
# - When Video PLAYS, rPi POSTs to queue NEXT video
# Video Finder This App: videos
# - Searches for and records all videos
# Video Scanner (Celery Task)
# - Scans for videos and adds them to database
# Controller This App: controller
# Frontend This App: frontend
# User Management This App: users
# Player Management This App: players
# Channel Management This App: channels
app = Flask(__name__)
app.config.from_object(Config)
celery = Celery(__name__, broker=Config.CELERY_BROKER_URL)
celery.conf.update(app.config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = 'auth.login'
bootstrap = Bootstrap(app)
from project.web import bp as web_bp
from project.auth import bp as auth_bp
from project.videos import bp as videos_bp
from project.channels import bp as channels_bp
from project.endpoint import bp as endpoints_bp
from project.errors import bp as errors_bp
app.register_blueprint(web_bp, url_prefix='/web')
app.register_blueprint(auth_bp, url_prefix='/auth')
app.register_blueprint(videos_bp, url_prefix='/api/videos')
app.register_blueprint(channels_bp, url_prefix='/api/channels')
app.register_blueprint(endpoints_bp, url_prefix='/api/endpoint')
app.register_blueprint(errors_bp)
if not app.debug:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/video_finder.log', maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Microblog startup')
from project import models | project/__init__.py | import logging, os
from logging.handlers import RotatingFileHandler
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from celery import Celery
from config import Config
# Monolith design
# Video Player (On Raspberry Pi Video Device (Endpoint))
# - Plays videos as instructed
# Video Manager (Celery Task)
# - When Video PLAYS, rPi POSTs to queue NEXT video
# Video Finder This App: videos
# - Searches for and records all videos
# Video Scanner (Celery Task)
# - Scans for videos and adds them to database
# Controller This App: controller
# Frontend This App: frontend
# User Management This App: users
# Player Management This App: players
# Channel Management This App: channels
app = Flask(__name__)
app.config.from_object(Config)
celery = Celery(__name__, broker=Config.CELERY_BROKER_URL)
celery.conf.update(app.config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = 'auth.login'
bootstrap = Bootstrap(app)
from project.web import bp as web_bp
from project.auth import bp as auth_bp
from project.videos import bp as videos_bp
from project.channels import bp as channels_bp
from project.endpoint import bp as endpoints_bp
from project.errors import bp as errors_bp
app.register_blueprint(web_bp, url_prefix='/web')
app.register_blueprint(auth_bp, url_prefix='/auth')
app.register_blueprint(videos_bp, url_prefix='/api/videos')
app.register_blueprint(channels_bp, url_prefix='/api/channels')
app.register_blueprint(endpoints_bp, url_prefix='/api/endpoint')
app.register_blueprint(errors_bp)
if not app.debug:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/video_finder.log', maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Microblog startup')
from project import models | 0.22627 | 0.045121 |
import warnings
import numpy as np
from magicgui.widgets import Table
from napari.types import ImageData, LabelsData, LayerDataTuple
from napari import Viewer
from pandas import DataFrame
from qtpy.QtWidgets import QTableWidget, QTableWidgetItem, QWidget, QGridLayout, QPushButton, QFileDialog
from skimage.measure import regionprops_table
def regionprops(image: ImageData, labels: LabelsData, napari_viewer : Viewer, size : bool = True, intensity : bool = True, perimeter : bool = False, shape : bool = False, position : bool = False, moments : bool = False):
"""
Adds a table widget to a given napari viewer with quantitative analysis results derived from an image-labelimage pair.
"""
if image is not None and labels is not None:
properties = ['label']
extra_properties = []
dimensions = len(image.shape)
if size:
properties = properties + ['area', 'bbox_area', 'equivalent_diameter']
if dimensions == 2:
properties = properties + ['convex_area']
if intensity:
properties = properties + ['max_intensity', 'mean_intensity', 'min_intensity']
# arguments must be in the specified order, matching regionprops
def standard_deviation_intensity(region, intensities):
return np.std(intensities[region])
extra_properties.append(standard_deviation_intensity)
if perimeter and dimensions == 2:
properties = properties + ['perimeter', 'perimeter_crofton']
if shape:
properties = properties + ['major_axis_length', 'minor_axis_length', 'extent', 'local_centroid']
if dimensions == 2:
properties = properties + ['solidity', 'orientation', 'eccentricity', 'feret_diameter_max']
if position:
properties = properties + ['centroid', 'bbox', 'weighted_centroid']
if moments:
properties = properties + ['moments', 'moments_central', 'moments_normalized']
if dimensions == 2:
properties = properties + ['moments_hu']
# todo:
# euler_number
# weighted_local_centroid
# weighted_moments
# weighted_moments_central
# weighted_moments_hu
# weighted_moments_normalized
# quantitative analysis using scikit-image's regionprops
table = regionprops_table(np.asarray(labels).astype(int), intensity_image=np.asarray(image),
properties=properties, extra_properties=extra_properties)
# turn table into a widget
dock_widget = table_to_widget(table)
# add widget to napari
napari_viewer.window.add_dock_widget(dock_widget, area='right')
else:
warnings.warn("Image and labels must be set.")
def table_to_widget(table: dict) -> QWidget:
"""
Takes a table given as dictionary with strings as keys and numeric arrays as values and returns a QWidget which
contains a QTableWidget with that data.
"""
view = Table(value=table)
copy_button = QPushButton("Copy to clipboard")
@copy_button.clicked.connect
def copy_trigger():
view.to_dataframe().to_clipboard()
save_button = QPushButton("Save as csv...")
@save_button.clicked.connect
def save_trigger():
filename, _ = QFileDialog.getSaveFileName(save_button, "Save as csv...", ".", "*.csv")
view.to_dataframe().to_csv(filename)
widget = QWidget()
widget.setWindowTitle("region properties")
widget.setLayout(QGridLayout())
widget.layout().addWidget(copy_button)
widget.layout().addWidget(save_button)
widget.layout().addWidget(view.native)
return widget | napari_feature_visualization/_regionprops.py | import warnings
import numpy as np
from magicgui.widgets import Table
from napari.types import ImageData, LabelsData, LayerDataTuple
from napari import Viewer
from pandas import DataFrame
from qtpy.QtWidgets import QTableWidget, QTableWidgetItem, QWidget, QGridLayout, QPushButton, QFileDialog
from skimage.measure import regionprops_table
def regionprops(image: ImageData, labels: LabelsData, napari_viewer : Viewer, size : bool = True, intensity : bool = True, perimeter : bool = False, shape : bool = False, position : bool = False, moments : bool = False):
"""
Adds a table widget to a given napari viewer with quantitative analysis results derived from an image-labelimage pair.
"""
if image is not None and labels is not None:
properties = ['label']
extra_properties = []
dimensions = len(image.shape)
if size:
properties = properties + ['area', 'bbox_area', 'equivalent_diameter']
if dimensions == 2:
properties = properties + ['convex_area']
if intensity:
properties = properties + ['max_intensity', 'mean_intensity', 'min_intensity']
# arguments must be in the specified order, matching regionprops
def standard_deviation_intensity(region, intensities):
return np.std(intensities[region])
extra_properties.append(standard_deviation_intensity)
if perimeter and dimensions == 2:
properties = properties + ['perimeter', 'perimeter_crofton']
if shape:
properties = properties + ['major_axis_length', 'minor_axis_length', 'extent', 'local_centroid']
if dimensions == 2:
properties = properties + ['solidity', 'orientation', 'eccentricity', 'feret_diameter_max']
if position:
properties = properties + ['centroid', 'bbox', 'weighted_centroid']
if moments:
properties = properties + ['moments', 'moments_central', 'moments_normalized']
if dimensions == 2:
properties = properties + ['moments_hu']
# todo:
# euler_number
# weighted_local_centroid
# weighted_moments
# weighted_moments_central
# weighted_moments_hu
# weighted_moments_normalized
# quantitative analysis using scikit-image's regionprops
table = regionprops_table(np.asarray(labels).astype(int), intensity_image=np.asarray(image),
properties=properties, extra_properties=extra_properties)
# turn table into a widget
dock_widget = table_to_widget(table)
# add widget to napari
napari_viewer.window.add_dock_widget(dock_widget, area='right')
else:
warnings.warn("Image and labels must be set.")
def table_to_widget(table: dict) -> QWidget:
"""
Takes a table given as dictionary with strings as keys and numeric arrays as values and returns a QWidget which
contains a QTableWidget with that data.
"""
view = Table(value=table)
copy_button = QPushButton("Copy to clipboard")
@copy_button.clicked.connect
def copy_trigger():
view.to_dataframe().to_clipboard()
save_button = QPushButton("Save as csv...")
@save_button.clicked.connect
def save_trigger():
filename, _ = QFileDialog.getSaveFileName(save_button, "Save as csv...", ".", "*.csv")
view.to_dataframe().to_csv(filename)
widget = QWidget()
widget.setWindowTitle("region properties")
widget.setLayout(QGridLayout())
widget.layout().addWidget(copy_button)
widget.layout().addWidget(save_button)
widget.layout().addWidget(view.native)
return widget | 0.428951 | 0.511168 |
from Tkinter import *
import tkFont
import win32api
import win32print
import pyodbc
from fpdf import FPDF
class App:
def __init__(self, master):
frame = Frame(master)
frame.pack()
arial18 = tkFont.Font(family='Arial', size=18, weight='bold') # big font so we can read it in the shop
self.order_label = Label(frame, text="ORDER #:", font=arial18)
self.order_label.grid(row=0, column=0)
self.order_value = Entry(frame, text="", font=arial18)
self.order_value.grid(row=0, column=1, columnspan=2)
self.generate_button = Button(frame, text="Generate Labels", font=arial18, command=self.generate_labels)
self.generate_button.grid(row=0, column=3)
self.single_button = Button(frame, text="Print One", font=arial18, command=self.print_one_label)
self.single_button.grid(row=2, column=3)
self.single_label = Label(frame, text="Single Label:", font=arial18)
self.single_label.grid(row=2, column=0)
self.single_value = Entry(frame, text="", font=arial18)
self.single_value.grid(row=2, column=1, columnspan=2)
self.quit_button = Button(frame, text="QUIT", font=arial18, fg="red", command=frame.quit)
self.quit_button.grid(row=11, column=3)
def generate_labels(self):
conn = pyodbc.connect('DSN=QDSN_10.0.0.1;UID=username;PWD=password')
cursor = conn.cursor()
cursor.execute("select ODITEM, ODQORD from HDSDATA.OEORDT where ODORD# = " + self.order_value.get() )
parts_list = [] # [part number, quantity]
for rows in cursor:
parts_list.append([rows.ODITEM.rstrip(), int(rows.ODQORD)])
# 72 pts in an inch
l_width = 162
l_height = 54
label = FPDF(orientation='P', unit='pt', format=(l_width, l_height) )
label.set_margins(0, 0) # we don't want margins
label.set_auto_page_break(0) # turn off page breaks
label.set_font('Courier', 'B', 22)
for p in parts_list:
for i in range(0, p[1]):
label.add_page()
label.cell(l_width, l_height, p[0], 0, 0, 'C')
label.output(temp_pdf_file)
# automatically print the label
#win32api.ShellExecute (0, "printto", temp_pdf_file, label_printer, ".", 0)
def print_one_label(self):
# no database query, just print a label for a manually entered part #
# 72 pts in an inch
l_width = 162
l_height = 54
label = FPDF(orientation='P', unit='pt', format=(l_width, l_height) )
label.set_margins(0, 0) # we don't want margins
label.set_auto_page_break(0) # turn off page breaks
label.set_font('Courier', 'B', 22)
label.add_page()
label.cell(l_width, l_height, self.single_value.get(), 0, 0, 'C')
label.output(temp_pdf_file)
root = Tk()
root.wm_title("Parts Labeler")
app = App(root)
label_printer = 'ZDesigner GC420d (EPL)' # \\\\printserver\\zebra'
temp_pdf_file = 'TEMP_LABEL.PDF'
root.mainloop()
root.destroy() | Interface.py | from Tkinter import *
import tkFont
import win32api
import win32print
import pyodbc
from fpdf import FPDF
class App:
def __init__(self, master):
frame = Frame(master)
frame.pack()
arial18 = tkFont.Font(family='Arial', size=18, weight='bold') # big font so we can read it in the shop
self.order_label = Label(frame, text="ORDER #:", font=arial18)
self.order_label.grid(row=0, column=0)
self.order_value = Entry(frame, text="", font=arial18)
self.order_value.grid(row=0, column=1, columnspan=2)
self.generate_button = Button(frame, text="Generate Labels", font=arial18, command=self.generate_labels)
self.generate_button.grid(row=0, column=3)
self.single_button = Button(frame, text="Print One", font=arial18, command=self.print_one_label)
self.single_button.grid(row=2, column=3)
self.single_label = Label(frame, text="Single Label:", font=arial18)
self.single_label.grid(row=2, column=0)
self.single_value = Entry(frame, text="", font=arial18)
self.single_value.grid(row=2, column=1, columnspan=2)
self.quit_button = Button(frame, text="QUIT", font=arial18, fg="red", command=frame.quit)
self.quit_button.grid(row=11, column=3)
def generate_labels(self):
conn = pyodbc.connect('DSN=QDSN_10.0.0.1;UID=username;PWD=password')
cursor = conn.cursor()
cursor.execute("select ODITEM, ODQORD from HDSDATA.OEORDT where ODORD# = " + self.order_value.get() )
parts_list = [] # [part number, quantity]
for rows in cursor:
parts_list.append([rows.ODITEM.rstrip(), int(rows.ODQORD)])
# 72 pts in an inch
l_width = 162
l_height = 54
label = FPDF(orientation='P', unit='pt', format=(l_width, l_height) )
label.set_margins(0, 0) # we don't want margins
label.set_auto_page_break(0) # turn off page breaks
label.set_font('Courier', 'B', 22)
for p in parts_list:
for i in range(0, p[1]):
label.add_page()
label.cell(l_width, l_height, p[0], 0, 0, 'C')
label.output(temp_pdf_file)
# automatically print the label
#win32api.ShellExecute (0, "printto", temp_pdf_file, label_printer, ".", 0)
def print_one_label(self):
# no database query, just print a label for a manually entered part #
# 72 pts in an inch
l_width = 162
l_height = 54
label = FPDF(orientation='P', unit='pt', format=(l_width, l_height) )
label.set_margins(0, 0) # we don't want margins
label.set_auto_page_break(0) # turn off page breaks
label.set_font('Courier', 'B', 22)
label.add_page()
label.cell(l_width, l_height, self.single_value.get(), 0, 0, 'C')
label.output(temp_pdf_file)
root = Tk()
root.wm_title("Parts Labeler")
app = App(root)
label_printer = 'ZDesigner GC420d (EPL)' # \\\\printserver\\zebra'
temp_pdf_file = 'TEMP_LABEL.PDF'
root.mainloop()
root.destroy() | 0.161717 | 0.096748 |
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
from PIL import Image
import torch, argparse
import torchvision.utils as vutils
from moviepy.editor import VideoClip
import numpy as np
import lib, models
from lib.manipulate import *
parser = argparse.ArgumentParser(description='SGD/SWA training')
parser.add_argument('--ckpt1', type=str, default="expr/cifar10_feat_gbn_dbn.pt", help='check point 1')
parser.add_argument('--ckpt2', type=str, default='expr/cifar10_none_gbn_dbn.pt', help='check point 2')
parser.add_argument('--upsample', type=int, default=3)
args = parser.parse_args()
ckpts = [torch.load(args.ckpt1), torch.load(args.ckpt2)]
gen_models = [
models.simple.ConvolutionGenerator(bn='bn', upsample=args.upsample),
models.simple.ConvolutionGenerator(bn='bn', upsample=args.upsample)]
disc_models = [
models.simple.ConvolutionDiscriminator(bn='bn', upsample=args.upsample),
models.simple.ConvolutionDiscriminator(bn='bn', upsample=args.upsample)]
interpolate_model = models.simple.ConvolutionGenerator(bn='bn', upsample=args.upsample)
interpolate_model.eval()
interpolate_model.cuda()
for ckpt, g, d in zip(ckpts, gen_models, disc_models):
g.load_state_dict(ckpt['gen_state_dict'])
d.load_state_dict(ckpt['disc_state_dict'])
g.eval(); d.eval()
g.cuda(); d.cuda()
print_parameters(gen_models[0])
print_parameters(gen_models[1])
fixed_z = torch.Tensor(16, 128).normal_().cuda()
TOTAL_TIME = 5.0
def test_disc_line(alpha):
interpolate_parameters(gen_models[0], gen_models[1], interpolate_model, alpha)
for i in range(100):
z = torch.Tensor(128, 128).normal_().cuda()
img = interpolate_model(z).detach()
d1 = disc_models[0](img).detach().mean().cpu().numpy()
d2 = disc_models[1](img).detach().mean().cpu().numpy()
loss_records[i]['d1'].append(d1)
loss_records[i]['d2'].append(d2)
def generate(model, z):
result = model(z).detach()
grid = vutils.make_grid(result, nrow=4, padding=2, normalize=True)
img = grid.cpu().numpy().transpose(1, 2, 0)
img = (img * 255).astype("uint8")
return img, result
loss_record = {"d1":[], "d2":[]}
loss_records = [{"d1":[], "d2":[]}] * 100
def interpolate_make_frame(t):
process = t / TOTAL_TIME
interpolate_parameters(gen_models[0], gen_models[1], interpolate_model, process)
img, raw_img = generate(interpolate_model, fixed_z)
disc1, disc2 = disc_models[0](raw_img), disc_models[1](raw_img)
loss_record["d1"].append(disc1.detach().cpu().numpy().mean())
loss_record["d2"].append(disc2.detach().cpu().numpy().mean())
return img
Image.fromarray(generate(gen_models[0], fixed_z)[0]).save(open("my_1.jpg", "wb"), format="JPEG")
Image.fromarray(generate(gen_models[1], fixed_z)[0]).save(open("my_2.jpg", "wb"), format="JPEG")
#interpolate_parameters(gen_models[0], gen_models[1], interpolate_model, 0.5)
animation = VideoClip(interpolate_make_frame, duration=TOTAL_TIME) # 3-second clip
animation.write_videofile("my_animation.mp4", fps=24)
for i in range(100):
print("=> %d" % i)
alpha = i / 100.
test_disc_line(alpha)
for i in range(10):
plot_dict(loss_records[i])
plt.savefig("my_fig.png")
plt.close() | interpolate.py | import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
from PIL import Image
import torch, argparse
import torchvision.utils as vutils
from moviepy.editor import VideoClip
import numpy as np
import lib, models
from lib.manipulate import *
parser = argparse.ArgumentParser(description='SGD/SWA training')
parser.add_argument('--ckpt1', type=str, default="expr/cifar10_feat_gbn_dbn.pt", help='check point 1')
parser.add_argument('--ckpt2', type=str, default='expr/cifar10_none_gbn_dbn.pt', help='check point 2')
parser.add_argument('--upsample', type=int, default=3)
args = parser.parse_args()
ckpts = [torch.load(args.ckpt1), torch.load(args.ckpt2)]
gen_models = [
models.simple.ConvolutionGenerator(bn='bn', upsample=args.upsample),
models.simple.ConvolutionGenerator(bn='bn', upsample=args.upsample)]
disc_models = [
models.simple.ConvolutionDiscriminator(bn='bn', upsample=args.upsample),
models.simple.ConvolutionDiscriminator(bn='bn', upsample=args.upsample)]
interpolate_model = models.simple.ConvolutionGenerator(bn='bn', upsample=args.upsample)
interpolate_model.eval()
interpolate_model.cuda()
for ckpt, g, d in zip(ckpts, gen_models, disc_models):
g.load_state_dict(ckpt['gen_state_dict'])
d.load_state_dict(ckpt['disc_state_dict'])
g.eval(); d.eval()
g.cuda(); d.cuda()
print_parameters(gen_models[0])
print_parameters(gen_models[1])
fixed_z = torch.Tensor(16, 128).normal_().cuda()
TOTAL_TIME = 5.0
def test_disc_line(alpha):
interpolate_parameters(gen_models[0], gen_models[1], interpolate_model, alpha)
for i in range(100):
z = torch.Tensor(128, 128).normal_().cuda()
img = interpolate_model(z).detach()
d1 = disc_models[0](img).detach().mean().cpu().numpy()
d2 = disc_models[1](img).detach().mean().cpu().numpy()
loss_records[i]['d1'].append(d1)
loss_records[i]['d2'].append(d2)
def generate(model, z):
result = model(z).detach()
grid = vutils.make_grid(result, nrow=4, padding=2, normalize=True)
img = grid.cpu().numpy().transpose(1, 2, 0)
img = (img * 255).astype("uint8")
return img, result
loss_record = {"d1":[], "d2":[]}
loss_records = [{"d1":[], "d2":[]}] * 100
def interpolate_make_frame(t):
process = t / TOTAL_TIME
interpolate_parameters(gen_models[0], gen_models[1], interpolate_model, process)
img, raw_img = generate(interpolate_model, fixed_z)
disc1, disc2 = disc_models[0](raw_img), disc_models[1](raw_img)
loss_record["d1"].append(disc1.detach().cpu().numpy().mean())
loss_record["d2"].append(disc2.detach().cpu().numpy().mean())
return img
Image.fromarray(generate(gen_models[0], fixed_z)[0]).save(open("my_1.jpg", "wb"), format="JPEG")
Image.fromarray(generate(gen_models[1], fixed_z)[0]).save(open("my_2.jpg", "wb"), format="JPEG")
#interpolate_parameters(gen_models[0], gen_models[1], interpolate_model, 0.5)
animation = VideoClip(interpolate_make_frame, duration=TOTAL_TIME) # 3-second clip
animation.write_videofile("my_animation.mp4", fps=24)
for i in range(100):
print("=> %d" % i)
alpha = i / 100.
test_disc_line(alpha)
for i in range(10):
plot_dict(loss_records[i])
plt.savefig("my_fig.png")
plt.close() | 0.514888 | 0.411288 |
import logging
from collections import namedtuple
from operator import attrgetter
from pony.orm import db_session
from data import Article
from .index import Index
InvertedIndexEntry = namedtuple("InvertedIndexEntry", ["article", "count", "positions"])
class InvertedIndex(Index):
NAME = "inverted_index"
def __init__(self):
super().__init__(InvertedIndex.NAME)
@staticmethod
def build(articles: [int]) -> dict:
""" Build an index for the given documents """
index = dict()
with db_session:
for article_id in articles:
article = Article[article_id]
# TODO: use gzip.open
with open(article.processed_abstract_path, "r") as abstract:
text = " ".join(abstract.readlines()).split()
for token in set(text):
if token not in index:
index[token] = []
index[token].append(InvertedIndex._build_entry(article, text, token))
return index
@staticmethod
def merge(index1: dict, index2: dict):
""" Merge two indices """
return {token: InvertedIndex._merge(index1.get(token, []), index2.get(token, []))
for token in index1.keys() | index2.keys()}
@staticmethod
def _merge(list1: [InvertedIndexEntry], list2: [InvertedIndexEntry]) -> [InvertedIndexEntry]:
return sorted(list1 + list2, key=attrgetter("count"), reverse=True)
@staticmethod
def _gap_values(values: [int]):
""" Compress list with ints """
return [values[0]] + [values[i] - values[i - 1] for i in range(1, len(values))]
@staticmethod
def _build_entry(article: Article, text: [str], token: str) -> InvertedIndexEntry:
""" Build an inverted index entry for the given text and token """
positions = InvertedIndex._gap_values([index for index, word in enumerate(text) if word == token])
return InvertedIndexEntry(article.id, len(positions), positions) | index/invertedindex.py | import logging
from collections import namedtuple
from operator import attrgetter
from pony.orm import db_session
from data import Article
from .index import Index
InvertedIndexEntry = namedtuple("InvertedIndexEntry", ["article", "count", "positions"])
class InvertedIndex(Index):
NAME = "inverted_index"
def __init__(self):
super().__init__(InvertedIndex.NAME)
@staticmethod
def build(articles: [int]) -> dict:
""" Build an index for the given documents """
index = dict()
with db_session:
for article_id in articles:
article = Article[article_id]
# TODO: use gzip.open
with open(article.processed_abstract_path, "r") as abstract:
text = " ".join(abstract.readlines()).split()
for token in set(text):
if token not in index:
index[token] = []
index[token].append(InvertedIndex._build_entry(article, text, token))
return index
@staticmethod
def merge(index1: dict, index2: dict):
""" Merge two indices """
return {token: InvertedIndex._merge(index1.get(token, []), index2.get(token, []))
for token in index1.keys() | index2.keys()}
@staticmethod
def _merge(list1: [InvertedIndexEntry], list2: [InvertedIndexEntry]) -> [InvertedIndexEntry]:
return sorted(list1 + list2, key=attrgetter("count"), reverse=True)
@staticmethod
def _gap_values(values: [int]):
""" Compress list with ints """
return [values[0]] + [values[i] - values[i - 1] for i in range(1, len(values))]
@staticmethod
def _build_entry(article: Article, text: [str], token: str) -> InvertedIndexEntry:
""" Build an inverted index entry for the given text and token """
positions = InvertedIndex._gap_values([index for index, word in enumerate(text) if word == token])
return InvertedIndexEntry(article.id, len(positions), positions) | 0.416915 | 0.295065 |
import pyautogui
import configparser
import base64
import mimetypes
import time
def main():
config = configparser.ConfigParser()
config.read('config.ini')
files = eval(config.get("Main", "Files"))
for f in files:
global last_command
global default_delay
last_command = ""
duck_text = ""
default_delay = 0
mime = mimetypes.guess_type(f)
if mime[0] == 'application/octet-stream':
duck_bin = open(f, 'rb').read()
duck_text = base64.b64decode(duck_bin)
duck_text = duck_text.decode('ascii')
duck_text = duck_text.splitlines()
elif mime[0] == 'text/plain':
duck_text = open(f, 'r').readlines()
for line in duck_text:
execute_command(line)
last_command = line
def execute_command(cmd):
global default_delay
global last_command
time.sleep(default_delay)
cmd = cmd.split(' ', 1)
if '-' in cmd[0]:
cmd[0] = cmd[0].split('-', 1)
if len(cmd) > 1:
cmd = cmd[0] + [cmd[1]]
else:
cmd = cmd[0]
execute_hotkey(cmd)
elif cmd[0] == 'DELAY':
cmd[1] = eval(cmd[1])/1000
time.sleep(cmd[1])
elif cmd[0] == 'DEFAULT_DELAY':
default_delay = eval(cmd[1]) / 1000
elif cmd[0] == 'STRING':
pyautogui.typewrite(cmd[1].rstrip())
elif cmd[0] == 'GUI' or cmd[0] == 'WINDOWS':
cmd[0] = 'win'
execute_hotkey(cmd)
elif cmd[0] == 'MENU' or cmd[0] == 'APP':
pyautogui.hotkey('shift', 'f10')
elif cmd[0] == 'CTRL' or cmd[0] == 'SHIFT' or cmd[0] == 'ALT':
execute_hotkey(cmd)
elif cmd[0] == 'CONTROL':
cmd[0] = 'ctrl'
execute_hotkey(cmd)
elif cmd[0] == 'DOWNARROW':
pyautogui.press('down')
elif cmd[0] == 'LEFTARROW':
pyautogui.press('left')
elif cmd[0] == 'RIGHTARROW':
pyautogui.press('right')
elif cmd[0] == 'UPARROW':
pyautogui.press('up')
elif cmd[0] == 'REPEAT':
for x in range(0, eval(cmd[1])):
execute_command(last_command)
else:
execute_hotkey(cmd)
def execute_hotkey(cmd):
cmd[0] = cmd[0].rstrip().lower()
if len(cmd) > 1:
cmd[1] = cmd[1].split(' ')
[x.strip().lower() for x in cmd[1]]
pyautogui.hotkey(cmd[0], *cmd[1], interval=0.1)
else:
print(cmd[0])
pyautogui.hotkey(cmd[0])
if __name__ == "__main__":
default_delay = 0
last_command = ""
main() | DuckyTails.py | import pyautogui
import configparser
import base64
import mimetypes
import time
def main():
config = configparser.ConfigParser()
config.read('config.ini')
files = eval(config.get("Main", "Files"))
for f in files:
global last_command
global default_delay
last_command = ""
duck_text = ""
default_delay = 0
mime = mimetypes.guess_type(f)
if mime[0] == 'application/octet-stream':
duck_bin = open(f, 'rb').read()
duck_text = base64.b64decode(duck_bin)
duck_text = duck_text.decode('ascii')
duck_text = duck_text.splitlines()
elif mime[0] == 'text/plain':
duck_text = open(f, 'r').readlines()
for line in duck_text:
execute_command(line)
last_command = line
def execute_command(cmd):
global default_delay
global last_command
time.sleep(default_delay)
cmd = cmd.split(' ', 1)
if '-' in cmd[0]:
cmd[0] = cmd[0].split('-', 1)
if len(cmd) > 1:
cmd = cmd[0] + [cmd[1]]
else:
cmd = cmd[0]
execute_hotkey(cmd)
elif cmd[0] == 'DELAY':
cmd[1] = eval(cmd[1])/1000
time.sleep(cmd[1])
elif cmd[0] == 'DEFAULT_DELAY':
default_delay = eval(cmd[1]) / 1000
elif cmd[0] == 'STRING':
pyautogui.typewrite(cmd[1].rstrip())
elif cmd[0] == 'GUI' or cmd[0] == 'WINDOWS':
cmd[0] = 'win'
execute_hotkey(cmd)
elif cmd[0] == 'MENU' or cmd[0] == 'APP':
pyautogui.hotkey('shift', 'f10')
elif cmd[0] == 'CTRL' or cmd[0] == 'SHIFT' or cmd[0] == 'ALT':
execute_hotkey(cmd)
elif cmd[0] == 'CONTROL':
cmd[0] = 'ctrl'
execute_hotkey(cmd)
elif cmd[0] == 'DOWNARROW':
pyautogui.press('down')
elif cmd[0] == 'LEFTARROW':
pyautogui.press('left')
elif cmd[0] == 'RIGHTARROW':
pyautogui.press('right')
elif cmd[0] == 'UPARROW':
pyautogui.press('up')
elif cmd[0] == 'REPEAT':
for x in range(0, eval(cmd[1])):
execute_command(last_command)
else:
execute_hotkey(cmd)
def execute_hotkey(cmd):
cmd[0] = cmd[0].rstrip().lower()
if len(cmd) > 1:
cmd[1] = cmd[1].split(' ')
[x.strip().lower() for x in cmd[1]]
pyautogui.hotkey(cmd[0], *cmd[1], interval=0.1)
else:
print(cmd[0])
pyautogui.hotkey(cmd[0])
if __name__ == "__main__":
default_delay = 0
last_command = ""
main() | 0.055785 | 0.0686 |
import logging
import asyncio
import socket
import aiohttp
import async_timeout
from datetime import timedelta
from homeassistant.util.dt import now
TIMEOUT = 15
DATE_FORMAT = "%G-%m-%d"
_LOGGER: logging.Logger = logging.getLogger(__package__)
class DsnyApiClient:
def __init__(self, session: aiohttp.ClientSession) -> None:
"""Sample API Client."""
self._session = session
async def async_get_schedule(
self, house_number: str, street: str, borough: str
) -> list[dict]:
"""Get data from the API."""
url = "https://a827-donatenyc.nyc.gov/DSNYApi/API/SCHEDULE/GetallSchedule"
tomorrow = (now() + timedelta(days=1)).strftime(DATE_FORMAT)
next_week = (now() + timedelta(days=7)).strftime(DATE_FORMAT)
print(tomorrow)
return await self.get_url(
url,
{
"houseNo": house_number,
"streetName": street,
"borough": borough,
"startdate": tomorrow,
"enddate": next_week,
},
)
async def get_url(self, url: str, params: dict) -> list[dict]:
"""Get information from the API."""
try:
async with async_timeout.timeout(TIMEOUT, loop=asyncio.get_event_loop()):
response = await self._session.get(url, params=params)
return await response.json()
except asyncio.TimeoutError as exception:
_LOGGER.error(
"Timeout error fetching information from %s - %s",
url,
exception,
)
except (KeyError, TypeError) as exception:
_LOGGER.error(
"Error parsing information from %s - %s",
url,
exception,
)
except (aiohttp.ClientError, socket.gaierror) as exception:
_LOGGER.error(
"Error fetching information from %s - %s",
url,
exception,
)
except Exception as exception: # pylint: disable=broad-except
_LOGGER.error("Something really wrong happened! - %s", exception) | custom_components/dsny/api.py | import logging
import asyncio
import socket
import aiohttp
import async_timeout
from datetime import timedelta
from homeassistant.util.dt import now
TIMEOUT = 15
DATE_FORMAT = "%G-%m-%d"
_LOGGER: logging.Logger = logging.getLogger(__package__)
class DsnyApiClient:
def __init__(self, session: aiohttp.ClientSession) -> None:
"""Sample API Client."""
self._session = session
async def async_get_schedule(
self, house_number: str, street: str, borough: str
) -> list[dict]:
"""Get data from the API."""
url = "https://a827-donatenyc.nyc.gov/DSNYApi/API/SCHEDULE/GetallSchedule"
tomorrow = (now() + timedelta(days=1)).strftime(DATE_FORMAT)
next_week = (now() + timedelta(days=7)).strftime(DATE_FORMAT)
print(tomorrow)
return await self.get_url(
url,
{
"houseNo": house_number,
"streetName": street,
"borough": borough,
"startdate": tomorrow,
"enddate": next_week,
},
)
async def get_url(self, url: str, params: dict) -> list[dict]:
"""Get information from the API."""
try:
async with async_timeout.timeout(TIMEOUT, loop=asyncio.get_event_loop()):
response = await self._session.get(url, params=params)
return await response.json()
except asyncio.TimeoutError as exception:
_LOGGER.error(
"Timeout error fetching information from %s - %s",
url,
exception,
)
except (KeyError, TypeError) as exception:
_LOGGER.error(
"Error parsing information from %s - %s",
url,
exception,
)
except (aiohttp.ClientError, socket.gaierror) as exception:
_LOGGER.error(
"Error fetching information from %s - %s",
url,
exception,
)
except Exception as exception: # pylint: disable=broad-except
_LOGGER.error("Something really wrong happened! - %s", exception) | 0.545528 | 0.105027 |
import openpyxl
import os
class XlsxWriter():
def __init__(self, report, reportdir):
self.report = report
self.reportdir = reportdir
self.book = openpyxl.Workbook()
def write(self):
self.write_cover()
self.write_envinfo()
self.write_results()
report_path = os.path.join(self.reportdir, 'report.xlsx')
self.book.save(report_path)
def write_cover(self):
sheet = self.book.worksheets[0]
sheet.title = 'cover'
sheet.cell(2, 2).value = self.report.cover['title']
sheet.cell(4, 2).value = 'History:'
sheet.cell(5, 2).value = 'date'
sheet.cell(5, 3).value = 'author'
sheet.cell(5, 4).value = 'comment'
sheet.cell(6, 2).value = self.report.cover['history']['date']
sheet.cell(6, 3).value = self.report.cover['history']['author']
sheet.cell(6, 4).value = self.report.cover['history']['comment']
def write_envinfo(self):
sheet = self.book.create_sheet(title='envinfo')
envinfo = self.report.envinfo
i = 1
for info_key, info in envinfo.items():
sheet.cell(i, 1).value = '[%s]' % (info_key)
i += 1
for key, val in info.items():
sheet.cell(i, 1).value = key
if isinstance(val, dict):
for vk, vv in val.items():
sheet.cell(i, 2).value = str(vk)
sheet.cell(i, 3).value = str(vv)
i += 1
elif isinstance(val, list):
for vv in val:
sheet.cell(i, 2).value = str(vv)
i += 1
else:
sheet.cell(i, 2).value = str(val)
i += 1
def write_results(self):
sheet = self.book.create_sheet(title='results')
tc = self.report.testcases[0]
row = ['testcase'] + list(tc.interim_results.keys())
for j in range(len(row)):
sheet.cell(1, j + 1).value = row[j]
for i, tc in enumerate(self.report.testcases):
row = [tc.name] + list(tc.interim_results.values())
for j in range(len(row)):
sheet.cell(i + 2, j + 1).value = row[j] | script/report/writer/xlsx.py | import openpyxl
import os
class XlsxWriter():
def __init__(self, report, reportdir):
self.report = report
self.reportdir = reportdir
self.book = openpyxl.Workbook()
def write(self):
self.write_cover()
self.write_envinfo()
self.write_results()
report_path = os.path.join(self.reportdir, 'report.xlsx')
self.book.save(report_path)
def write_cover(self):
sheet = self.book.worksheets[0]
sheet.title = 'cover'
sheet.cell(2, 2).value = self.report.cover['title']
sheet.cell(4, 2).value = 'History:'
sheet.cell(5, 2).value = 'date'
sheet.cell(5, 3).value = 'author'
sheet.cell(5, 4).value = 'comment'
sheet.cell(6, 2).value = self.report.cover['history']['date']
sheet.cell(6, 3).value = self.report.cover['history']['author']
sheet.cell(6, 4).value = self.report.cover['history']['comment']
def write_envinfo(self):
sheet = self.book.create_sheet(title='envinfo')
envinfo = self.report.envinfo
i = 1
for info_key, info in envinfo.items():
sheet.cell(i, 1).value = '[%s]' % (info_key)
i += 1
for key, val in info.items():
sheet.cell(i, 1).value = key
if isinstance(val, dict):
for vk, vv in val.items():
sheet.cell(i, 2).value = str(vk)
sheet.cell(i, 3).value = str(vv)
i += 1
elif isinstance(val, list):
for vv in val:
sheet.cell(i, 2).value = str(vv)
i += 1
else:
sheet.cell(i, 2).value = str(val)
i += 1
def write_results(self):
sheet = self.book.create_sheet(title='results')
tc = self.report.testcases[0]
row = ['testcase'] + list(tc.interim_results.keys())
for j in range(len(row)):
sheet.cell(1, j + 1).value = row[j]
for i, tc in enumerate(self.report.testcases):
row = [tc.name] + list(tc.interim_results.values())
for j in range(len(row)):
sheet.cell(i + 2, j + 1).value = row[j] | 0.144843 | 0.243148 |
import inspect
from abc import ABCMeta, abstractmethod
from guniflask.annotation import AnnotationMetadata, AnnotationUtils
from guniflask.beans.definition import BeanDefinition
from guniflask.beans.definition_registry import BeanDefinitionRegistry
from guniflask.context.annotation import Component
from guniflask.context.annotation_config_utils import AnnotationConfigUtils
from guniflask.context.bean_name_generator import AnnotationBeanNameGenerator
from guniflask.context.condition_evaluator import ConditionEvaluator
from guniflask.utils.path import walk_modules
class AnnotationConfigRegistry(metaclass=ABCMeta):
@abstractmethod
def register(self, *annotated_elements):
pass
@abstractmethod
def scan(self, *base_modules):
pass
class AnnotatedBeanDefinitionReader:
def __init__(self, registry: BeanDefinitionRegistry):
self._registry = registry
self._bean_name_generator = AnnotationBeanNameGenerator()
self._condition_evaluator = ConditionEvaluator(registry)
AnnotationConfigUtils.register_annotation_config_processors(self._registry)
def register(self, *annotated_elements):
for e in annotated_elements:
self.register_bean(e)
def register_bean(self, annotated_element, name=None):
if self._condition_evaluator.should_skip(AnnotationUtils.get_annotation_metadata(annotated_element)):
return
bean_definition = BeanDefinition(annotated_element)
bean_name = name or self._bean_name_generator.generate_bean_name(bean_definition, self._registry)
self._registry.register_bean_definition(bean_name, bean_definition)
def set_bean_name_generator(self, bean_name_generator):
self._bean_name_generator = bean_name_generator
class ModuleBeanDefinitionScanner:
def __init__(self, registry: BeanDefinitionRegistry):
self._registry = registry
self.include_annotation_config = True
self._bean_name_generator = AnnotationBeanNameGenerator()
self._condition_evaluator = ConditionEvaluator(registry)
def scan(self, *base_modules):
self._scan(*base_modules)
if self.include_annotation_config:
AnnotationConfigUtils.register_annotation_config_processors(self._registry)
def set_bean_name_generator(self, bean_name_generator):
self._bean_name_generator = bean_name_generator
def _scan(self, *base_modules):
for base_module in base_modules:
for module in walk_modules(base_module):
candidates = self._find_candidate_components(module)
for bean_definition in candidates:
bean_name = self._bean_name_generator.generate_bean_name(bean_definition, self._registry)
self._registry.register_bean_definition(bean_name, bean_definition)
def _find_candidate_components(self, module):
candidates = []
selected_id = set()
for obj in vars(module).values():
if inspect.isclass(obj) or inspect.isfunction(obj):
if obj.__module__ == module.__name__:
annotation_metadata = AnnotationUtils.get_annotation_metadata(obj)
if annotation_metadata is not None and self._is_candidate_component(annotation_metadata):
obj_id = id(obj)
if obj_id not in selected_id:
selected_id.add(obj_id)
bean_definition = BeanDefinition(obj)
candidates.append(bean_definition)
return candidates
def _is_candidate_component(self, metadata: AnnotationMetadata):
return metadata.is_annotated(Component) and not self._condition_evaluator.should_skip(metadata) | guniflask/context/annotation_config_registry.py | import inspect
from abc import ABCMeta, abstractmethod
from guniflask.annotation import AnnotationMetadata, AnnotationUtils
from guniflask.beans.definition import BeanDefinition
from guniflask.beans.definition_registry import BeanDefinitionRegistry
from guniflask.context.annotation import Component
from guniflask.context.annotation_config_utils import AnnotationConfigUtils
from guniflask.context.bean_name_generator import AnnotationBeanNameGenerator
from guniflask.context.condition_evaluator import ConditionEvaluator
from guniflask.utils.path import walk_modules
class AnnotationConfigRegistry(metaclass=ABCMeta):
@abstractmethod
def register(self, *annotated_elements):
pass
@abstractmethod
def scan(self, *base_modules):
pass
class AnnotatedBeanDefinitionReader:
def __init__(self, registry: BeanDefinitionRegistry):
self._registry = registry
self._bean_name_generator = AnnotationBeanNameGenerator()
self._condition_evaluator = ConditionEvaluator(registry)
AnnotationConfigUtils.register_annotation_config_processors(self._registry)
def register(self, *annotated_elements):
for e in annotated_elements:
self.register_bean(e)
def register_bean(self, annotated_element, name=None):
if self._condition_evaluator.should_skip(AnnotationUtils.get_annotation_metadata(annotated_element)):
return
bean_definition = BeanDefinition(annotated_element)
bean_name = name or self._bean_name_generator.generate_bean_name(bean_definition, self._registry)
self._registry.register_bean_definition(bean_name, bean_definition)
def set_bean_name_generator(self, bean_name_generator):
self._bean_name_generator = bean_name_generator
class ModuleBeanDefinitionScanner:
def __init__(self, registry: BeanDefinitionRegistry):
self._registry = registry
self.include_annotation_config = True
self._bean_name_generator = AnnotationBeanNameGenerator()
self._condition_evaluator = ConditionEvaluator(registry)
def scan(self, *base_modules):
self._scan(*base_modules)
if self.include_annotation_config:
AnnotationConfigUtils.register_annotation_config_processors(self._registry)
def set_bean_name_generator(self, bean_name_generator):
self._bean_name_generator = bean_name_generator
def _scan(self, *base_modules):
for base_module in base_modules:
for module in walk_modules(base_module):
candidates = self._find_candidate_components(module)
for bean_definition in candidates:
bean_name = self._bean_name_generator.generate_bean_name(bean_definition, self._registry)
self._registry.register_bean_definition(bean_name, bean_definition)
def _find_candidate_components(self, module):
candidates = []
selected_id = set()
for obj in vars(module).values():
if inspect.isclass(obj) or inspect.isfunction(obj):
if obj.__module__ == module.__name__:
annotation_metadata = AnnotationUtils.get_annotation_metadata(obj)
if annotation_metadata is not None and self._is_candidate_component(annotation_metadata):
obj_id = id(obj)
if obj_id not in selected_id:
selected_id.add(obj_id)
bean_definition = BeanDefinition(obj)
candidates.append(bean_definition)
return candidates
def _is_candidate_component(self, metadata: AnnotationMetadata):
return metadata.is_annotated(Component) and not self._condition_evaluator.should_skip(metadata) | 0.563018 | 0.064831 |
from builtins import str
import os
import argparse
import logging
# Deal with matplotlib backend before importing seaborn
# See https://stackoverflow.com/a/50089385/579925
import matplotlib
if os.environ.get('DISPLAY','') == '':
print('No display found: using non-interactive Agg backend')
matplotlib.use('Agg')
import seaborn as sns
import pathlib2
from .pegs import pegs_main
from .intervals import make_gene_interval_file
from .bedtools import fetch_bedtools
from .bedtools import bedtools_version
from .utils import find_exe
from .utils import collect_files
from .utils import sort_files
from . import get_version
# Description
PEGS_DESCRIPTION = "PEGS: Peak-set Enrichment of Gene-Sets"
# Citation
PEGS_CITATION = """
If you use PEGS in your research then please cite:
* <NAME>, <NAME>, <NAME> et al.
PEGS: An efficient tool for gene set enrichment within defined
sets of genomic intervals [version 2; peer review: 2 approved].
F1000Research 2021, 10:570
(https://doi.org/10.12688/f1000research.53926.2)
"""
# Default set of distances for enrichment calculation
DEFAULT_DISTANCES = [5000,25000,50000,100000,150000,200000]
# Built in gene interval files
BUILTIN_GENE_INTERVALS = {
"hg38": "refGene_hg38_120719_intervals.bed",
"mm10": "refGene_mm10_120719_intervals.bed",
}
# Types for cubehelix_palette options
CUBEHELIX_PALETTE_TYPES = {
'n_colors': int,
'start': float,
'rot': float,
'gamma': float,
'hue': float,
'dark': float,
'light': float,
'reverse': bool,
}
def pegs():
# Create command line parser
p = argparse.ArgumentParser(description=PEGS_DESCRIPTION)
p.add_argument("gene_intervals",
metavar="GENE_INTERVALS",
help="either name of a built-in set of gene "
"intervals (%s), or a BED file with gene interval "
"data" %
','.join(["'%s'" % x for x in BUILTIN_GENE_INTERVALS]))
p.add_argument('--version',action='version',version=get_version())
p.add_argument("-p","--peaks",
metavar="PEAK_SET_FILE",
dest="peaks",
action="store",
required=True,
nargs="+",
help="one or more input peak set files (BED format)")
p.add_argument("-g","--genes",
metavar="GENE_CLUSTER_FILE",
dest="clusters",
action="store",
required=True,
nargs="+",
help="one or more input gene cluster files (one gene "
"per line)")
p.add_argument("-t","--tads",metavar="TADS_FILE",
dest="tads_file",
action="store",
help="BED file with topologically associating "
"domains (TADs)")
p.add_argument("-d","--distances",
metavar="DISTANCE",
dest="distances",
action="store",
nargs="+",
help="specify distance(s) to calculate enrichments "
"for (if no distances are specified then the default "
"set will be used i.e. %s)" %
' '.join([str(x) for x in DEFAULT_DISTANCES]))
output_options = p.add_argument_group("Output options")
output_options.add_argument("--name",metavar="BASENAME",
dest="name",
action='store',
default="pegs",
help="basename for output files (default: "
"'pegs')")
output_options.add_argument("-o",metavar="OUTPUT_DIRECTORY",
dest="output_directory",
action="store",
default=None,
help="specify directory to write output "
"files to (default: write to current "
"directory)")
output_options.add_argument("-m",metavar="HEATMAP",
dest="output_heatmap",
action="store",
default=None,
help="destination for output heatmap; "
"image format is implicitly determined by "
"the file extension (e.g. '.png','.svg' "
"etc) unless overridden by the --format "
"option (default: 'BASENAME_heatmap.FORMAT')")
output_options.add_argument("-x",metavar="XLSX",
dest="output_xlsx",
action="store",
default=None,
help="destination for output XLSX file "
"with the raw enrichment data (default: "
"'BASENAME_results.xlsx')")
heatmap_options = p.add_argument_group("Heatmap options")
heatmap_options.add_argument("--format",
dest="heatmap_format",
metavar="FORMAT",
action="store",
default = None,
help="explicitly specify the image format "
"for the output heatmap; note that if this "
"option is specified then it will override "
"the format implied by the specified with "
"the -m option (default: 'png')")
heatmap_options.add_argument("--x-label",
metavar="CLUSTERS_AXIS_LABEL",
dest="clusters_axis_label",
action="store",
default=None,
help="set a custom label for the X "
"(clusters) axis")
heatmap_options.add_argument("--y-label",
metavar="PEAKSETS_AXIS_LABEL",
dest="peaksets_axis_label",
action="store",
default=None,
help="set a custom label for the Y "
"(peak sets) axis")
g = heatmap_options.add_mutually_exclusive_group()
g.add_argument("--color",
dest="heatmap_color",
metavar="COLOR",
action="store",
default=None,
help="specify a base color to use for the heatmap "
"(NB not compatible with --heatmap-palette)")
g.add_argument("--heatmap-palette",
dest="heatmap_palette_options",
metavar="OPTION=VALUE",
action="store",
nargs="+",
default = None,
help="advanced option to specify custom palette "
"settings for the output heatmap (e.g. 'start=0.5', "
"'rot=0' etc). Available options are those listed in "
"the 'cubehelix_palette' documentation at "
"https://seaborn.pydata.org/generated/"
"seaborn.cubehelix_palette.html (NB not compatible "
"with --color)")
advanced_options = p.add_argument_group("Advanced options")
advanced_options.add_argument("-k","--keep-intersection-files",
dest="keep_intersection_files",
action="store_true",
help="keep the intermediate intersection "
"files (useful for debugging)")
advanced_options.add_argument("--dump-raw-data",
dest="dump_raw_data",
action="store_true",
help="dump the raw data (gene counts and "
"p-values) to TSV files (for debugging)")
args = p.parse_args()
# Deal with peak and cluster files
peaks = sort_files(args.peaks)
for f in peaks:
if not os.path.exists(f):
logging.fatal("Peaks file '%s' doesn't exist" % f)
return 1
elif os.path.isdir(f):
logging.fatal("Peaks file '%s' is a directory (must be a file)"
% f)
return 1
clusters = sort_files(args.clusters)
for f in clusters:
if not os.path.exists(f):
logging.fatal("Cluster file '%s' doesn't exist" % f)
return 1
elif os.path.isdir(f):
logging.fatal("Cluster file '%s' is a directory (must be a file)"
% f)
return 1
# Generate list of distances
if not args.distances:
# Defaults
distances = [d for d in DEFAULT_DISTANCES]
else:
# Assemble from command line
distances = list()
for d in args.distances:
for x in d.split(','):
distances.append(int(x))
distances = sorted(distances)
# Check if using built-in interval data
gene_interval_file = args.gene_intervals
try:
gene_interval_file = BUILTIN_GENE_INTERVALS[gene_interval_file]
p = os.path.dirname(__file__)
while p != os.sep:
f = os.path.join(p,"pegs-%s" % get_version(),gene_interval_file)
if os.path.exists(f):
gene_interval_file = f
break
else:
p = os.path.dirname(p)
except KeyError:
# Not found, ignore
pass
# Check TADs file is actually a file
if args.tads_file:
if not os.path.exists(args.tads_file):
logging.fatal("TADs file '%s' doesn't exist" % args.tads_file)
return 1
elif os.path.isdir(args.tads_file):
logging.fatal("TADs file '%s' is a directory (must be a file)"
% args.tads_file)
return 1
# Build colormap for heatmap
heatmap_cmap = None
if args.heatmap_color:
# Construct non-default colormap using the
# seaborn lightpalette function
heatmap_cmap = sns.light_palette(color=args.heatmap_color,
as_cmap=True)
elif args.heatmap_palette_options is not None:
# Construct non-default colormap using the
# options supplied by the user
heatmap_palette_options = {
'n_colors': 6,
'start': 0,
'rot': 0.4,
'gamma': 1.0,
'hue': 0.8,
'light': 0.85,
'dark': 0.15,
'reverse': False,
}
for o in args.heatmap_palette_options:
key,value = o.split("=")
if key not in heatmap_palette_options:
logging.warning("Unrecognised palette option: '%s'"
% key)
else:
heatmap_palette_options[key] = \
CUBEHELIX_PALETTE_TYPES[key](value)
heatmap_cmap = sns.cubehelix_palette(as_cmap=True,
**heatmap_palette_options)
# Report version and authors etc
print("%s %s" % (PEGS_DESCRIPTION,get_version()))
print("""
Efficiently calculate enrichments of gene clusters in
multiple genomic intervals data (e.g. ChIP-seq peak-sets)
at different distances
Copyright University of Manchester
Faculty of Biology Medicine and Health
Authors: <NAME>, <NAME>
""")
print(PEGS_CITATION)
print("====PEGS is starting====")
# Add PEGS 'bin' directory in user's home area to PATH
# NB this might not exist
pegs_dir = os.path.join(str(pathlib2.Path.home()),".pegs")
pegs_bin_dir = os.path.join(pegs_dir,"bin")
os.environ['PATH'] = "%s%s%s" % (os.environ['PATH'],
os.pathsep,
pegs_bin_dir)
# Locate bedtools executable
bedtools_exe = find_exe("bedtools")
if not bedtools_exe:
# Not found
logging.warning("'bedtools' not found")
# Attempt to get bedtools
bedtools_exe = fetch_bedtools(install_dir=pegs_bin_dir,
create_install_dir=True)
if not bedtools_exe:
logging.fatal("Failed to fetch 'bedtools'")
return 1
print("Found %s (%s)\n" % (bedtools_version(bedtools_exe),
bedtools_exe))
# Calculate the enrichments
pegs_main(genes_file=gene_interval_file,
distances=distances,
peaks=peaks,
clusters=clusters,
tads_file=args.tads_file,
name=args.name,
heatmap=args.output_heatmap,
xlsx=args.output_xlsx,
output_directory=args.output_directory,
keep_intersection_files=
args.keep_intersection_files,
clusters_axis_label=args.clusters_axis_label,
peaksets_axis_label=args.peaksets_axis_label,
heatmap_cmap=heatmap_cmap,
heatmap_format=args.heatmap_format,
dump_raw_data=args.dump_raw_data)
def mk_pegs_intervals():
# Create command line parser
p = argparse.ArgumentParser()
p.add_argument("refgene_file",
metavar="REFGENE_FILE",
help="refGene annotation data for the genome "
"of interest")
p.add_argument("gene_interval_file",
metavar="GENE_INTERVAL_FILE",
nargs='?',
help="destination for output BED file with "
"gene interval data (default: "
"'<REFGENE_FILE>_intervals.bed')")
p.add_argument('--version',action='version',version=get_version())
args = p.parse_args()
# Report version
print("MK_PEGS_INTERVALS %s\n" % get_version())
# Generate the gene interval file
make_gene_interval_file(args.refgene_file,
args.gene_interval_file) | pegs/cli.py | from builtins import str
import os
import argparse
import logging
# Deal with matplotlib backend before importing seaborn
# See https://stackoverflow.com/a/50089385/579925
import matplotlib
if os.environ.get('DISPLAY','') == '':
print('No display found: using non-interactive Agg backend')
matplotlib.use('Agg')
import seaborn as sns
import pathlib2
from .pegs import pegs_main
from .intervals import make_gene_interval_file
from .bedtools import fetch_bedtools
from .bedtools import bedtools_version
from .utils import find_exe
from .utils import collect_files
from .utils import sort_files
from . import get_version
# Description
PEGS_DESCRIPTION = "PEGS: Peak-set Enrichment of Gene-Sets"
# Citation
PEGS_CITATION = """
If you use PEGS in your research then please cite:
* <NAME>, <NAME>, <NAME> et al.
PEGS: An efficient tool for gene set enrichment within defined
sets of genomic intervals [version 2; peer review: 2 approved].
F1000Research 2021, 10:570
(https://doi.org/10.12688/f1000research.53926.2)
"""
# Default set of distances for enrichment calculation
DEFAULT_DISTANCES = [5000,25000,50000,100000,150000,200000]
# Built in gene interval files
BUILTIN_GENE_INTERVALS = {
"hg38": "refGene_hg38_120719_intervals.bed",
"mm10": "refGene_mm10_120719_intervals.bed",
}
# Types for cubehelix_palette options
CUBEHELIX_PALETTE_TYPES = {
'n_colors': int,
'start': float,
'rot': float,
'gamma': float,
'hue': float,
'dark': float,
'light': float,
'reverse': bool,
}
def pegs():
# Create command line parser
p = argparse.ArgumentParser(description=PEGS_DESCRIPTION)
p.add_argument("gene_intervals",
metavar="GENE_INTERVALS",
help="either name of a built-in set of gene "
"intervals (%s), or a BED file with gene interval "
"data" %
','.join(["'%s'" % x for x in BUILTIN_GENE_INTERVALS]))
p.add_argument('--version',action='version',version=get_version())
p.add_argument("-p","--peaks",
metavar="PEAK_SET_FILE",
dest="peaks",
action="store",
required=True,
nargs="+",
help="one or more input peak set files (BED format)")
p.add_argument("-g","--genes",
metavar="GENE_CLUSTER_FILE",
dest="clusters",
action="store",
required=True,
nargs="+",
help="one or more input gene cluster files (one gene "
"per line)")
p.add_argument("-t","--tads",metavar="TADS_FILE",
dest="tads_file",
action="store",
help="BED file with topologically associating "
"domains (TADs)")
p.add_argument("-d","--distances",
metavar="DISTANCE",
dest="distances",
action="store",
nargs="+",
help="specify distance(s) to calculate enrichments "
"for (if no distances are specified then the default "
"set will be used i.e. %s)" %
' '.join([str(x) for x in DEFAULT_DISTANCES]))
output_options = p.add_argument_group("Output options")
output_options.add_argument("--name",metavar="BASENAME",
dest="name",
action='store',
default="pegs",
help="basename for output files (default: "
"'pegs')")
output_options.add_argument("-o",metavar="OUTPUT_DIRECTORY",
dest="output_directory",
action="store",
default=None,
help="specify directory to write output "
"files to (default: write to current "
"directory)")
output_options.add_argument("-m",metavar="HEATMAP",
dest="output_heatmap",
action="store",
default=None,
help="destination for output heatmap; "
"image format is implicitly determined by "
"the file extension (e.g. '.png','.svg' "
"etc) unless overridden by the --format "
"option (default: 'BASENAME_heatmap.FORMAT')")
output_options.add_argument("-x",metavar="XLSX",
dest="output_xlsx",
action="store",
default=None,
help="destination for output XLSX file "
"with the raw enrichment data (default: "
"'BASENAME_results.xlsx')")
heatmap_options = p.add_argument_group("Heatmap options")
heatmap_options.add_argument("--format",
dest="heatmap_format",
metavar="FORMAT",
action="store",
default = None,
help="explicitly specify the image format "
"for the output heatmap; note that if this "
"option is specified then it will override "
"the format implied by the specified with "
"the -m option (default: 'png')")
heatmap_options.add_argument("--x-label",
metavar="CLUSTERS_AXIS_LABEL",
dest="clusters_axis_label",
action="store",
default=None,
help="set a custom label for the X "
"(clusters) axis")
heatmap_options.add_argument("--y-label",
metavar="PEAKSETS_AXIS_LABEL",
dest="peaksets_axis_label",
action="store",
default=None,
help="set a custom label for the Y "
"(peak sets) axis")
g = heatmap_options.add_mutually_exclusive_group()
g.add_argument("--color",
dest="heatmap_color",
metavar="COLOR",
action="store",
default=None,
help="specify a base color to use for the heatmap "
"(NB not compatible with --heatmap-palette)")
g.add_argument("--heatmap-palette",
dest="heatmap_palette_options",
metavar="OPTION=VALUE",
action="store",
nargs="+",
default = None,
help="advanced option to specify custom palette "
"settings for the output heatmap (e.g. 'start=0.5', "
"'rot=0' etc). Available options are those listed in "
"the 'cubehelix_palette' documentation at "
"https://seaborn.pydata.org/generated/"
"seaborn.cubehelix_palette.html (NB not compatible "
"with --color)")
advanced_options = p.add_argument_group("Advanced options")
advanced_options.add_argument("-k","--keep-intersection-files",
dest="keep_intersection_files",
action="store_true",
help="keep the intermediate intersection "
"files (useful for debugging)")
advanced_options.add_argument("--dump-raw-data",
dest="dump_raw_data",
action="store_true",
help="dump the raw data (gene counts and "
"p-values) to TSV files (for debugging)")
args = p.parse_args()
# Deal with peak and cluster files
peaks = sort_files(args.peaks)
for f in peaks:
if not os.path.exists(f):
logging.fatal("Peaks file '%s' doesn't exist" % f)
return 1
elif os.path.isdir(f):
logging.fatal("Peaks file '%s' is a directory (must be a file)"
% f)
return 1
clusters = sort_files(args.clusters)
for f in clusters:
if not os.path.exists(f):
logging.fatal("Cluster file '%s' doesn't exist" % f)
return 1
elif os.path.isdir(f):
logging.fatal("Cluster file '%s' is a directory (must be a file)"
% f)
return 1
# Generate list of distances
if not args.distances:
# Defaults
distances = [d for d in DEFAULT_DISTANCES]
else:
# Assemble from command line
distances = list()
for d in args.distances:
for x in d.split(','):
distances.append(int(x))
distances = sorted(distances)
# Check if using built-in interval data
gene_interval_file = args.gene_intervals
try:
gene_interval_file = BUILTIN_GENE_INTERVALS[gene_interval_file]
p = os.path.dirname(__file__)
while p != os.sep:
f = os.path.join(p,"pegs-%s" % get_version(),gene_interval_file)
if os.path.exists(f):
gene_interval_file = f
break
else:
p = os.path.dirname(p)
except KeyError:
# Not found, ignore
pass
# Check TADs file is actually a file
if args.tads_file:
if not os.path.exists(args.tads_file):
logging.fatal("TADs file '%s' doesn't exist" % args.tads_file)
return 1
elif os.path.isdir(args.tads_file):
logging.fatal("TADs file '%s' is a directory (must be a file)"
% args.tads_file)
return 1
# Build colormap for heatmap
heatmap_cmap = None
if args.heatmap_color:
# Construct non-default colormap using the
# seaborn lightpalette function
heatmap_cmap = sns.light_palette(color=args.heatmap_color,
as_cmap=True)
elif args.heatmap_palette_options is not None:
# Construct non-default colormap using the
# options supplied by the user
heatmap_palette_options = {
'n_colors': 6,
'start': 0,
'rot': 0.4,
'gamma': 1.0,
'hue': 0.8,
'light': 0.85,
'dark': 0.15,
'reverse': False,
}
for o in args.heatmap_palette_options:
key,value = o.split("=")
if key not in heatmap_palette_options:
logging.warning("Unrecognised palette option: '%s'"
% key)
else:
heatmap_palette_options[key] = \
CUBEHELIX_PALETTE_TYPES[key](value)
heatmap_cmap = sns.cubehelix_palette(as_cmap=True,
**heatmap_palette_options)
# Report version and authors etc
print("%s %s" % (PEGS_DESCRIPTION,get_version()))
print("""
Efficiently calculate enrichments of gene clusters in
multiple genomic intervals data (e.g. ChIP-seq peak-sets)
at different distances
Copyright University of Manchester
Faculty of Biology Medicine and Health
Authors: <NAME>, <NAME>
""")
print(PEGS_CITATION)
print("====PEGS is starting====")
# Add PEGS 'bin' directory in user's home area to PATH
# NB this might not exist
pegs_dir = os.path.join(str(pathlib2.Path.home()),".pegs")
pegs_bin_dir = os.path.join(pegs_dir,"bin")
os.environ['PATH'] = "%s%s%s" % (os.environ['PATH'],
os.pathsep,
pegs_bin_dir)
# Locate bedtools executable
bedtools_exe = find_exe("bedtools")
if not bedtools_exe:
# Not found
logging.warning("'bedtools' not found")
# Attempt to get bedtools
bedtools_exe = fetch_bedtools(install_dir=pegs_bin_dir,
create_install_dir=True)
if not bedtools_exe:
logging.fatal("Failed to fetch 'bedtools'")
return 1
print("Found %s (%s)\n" % (bedtools_version(bedtools_exe),
bedtools_exe))
# Calculate the enrichments
pegs_main(genes_file=gene_interval_file,
distances=distances,
peaks=peaks,
clusters=clusters,
tads_file=args.tads_file,
name=args.name,
heatmap=args.output_heatmap,
xlsx=args.output_xlsx,
output_directory=args.output_directory,
keep_intersection_files=
args.keep_intersection_files,
clusters_axis_label=args.clusters_axis_label,
peaksets_axis_label=args.peaksets_axis_label,
heatmap_cmap=heatmap_cmap,
heatmap_format=args.heatmap_format,
dump_raw_data=args.dump_raw_data)
def mk_pegs_intervals():
# Create command line parser
p = argparse.ArgumentParser()
p.add_argument("refgene_file",
metavar="REFGENE_FILE",
help="refGene annotation data for the genome "
"of interest")
p.add_argument("gene_interval_file",
metavar="GENE_INTERVAL_FILE",
nargs='?',
help="destination for output BED file with "
"gene interval data (default: "
"'<REFGENE_FILE>_intervals.bed')")
p.add_argument('--version',action='version',version=get_version())
args = p.parse_args()
# Report version
print("MK_PEGS_INTERVALS %s\n" % get_version())
# Generate the gene interval file
make_gene_interval_file(args.refgene_file,
args.gene_interval_file) | 0.559049 | 0.23118 |
from ..dirs import PEOPLE_DIR, SLEPOK_DIR
from .audio import preprocess_wav
from resemblyzer.voice_encoder import VoiceEncoder
from .cut_pauses import wav_by_segments
from .kaldi_tools import parse_kaldi_file, creat_output_file, write_file
import os
import numpy as np
import sys
def get_similarity(encoder, cont_embeds, speaker_wav):
speaker_embeds = encoder.embed_utterance(speaker_wav)
return cont_embeds @ speaker_embeds
def get_similarity_several(encoder, cont_embeds, speaker_wavs, speaker_names):
res = dict()
for i in range(len(speaker_names)):
res[speaker_names[i]] = get_similarity(encoder, cont_embeds, speaker_wavs[i])
return res
def get_operator_wavs(operators_dir):
operator_names = []
wavs = []
for slepok_file in os.listdir(operators_dir):
file_path = os.path.join(operators_dir, slepok_file)
operator_names.append('_'.join(slepok_file.split('.')[:-1]))
wav = preprocess_wav(file_path, noise=False)
wavs.append(wav)
return wavs, operator_names
def identify_operator(wav, encoder, cont_embeds):
operators_wavs, operators_names = get_operator_wavs(str(SLEPOK_DIR))
operators_similarity = get_similarity_several(encoder, cont_embeds, operators_wavs, operators_names)
operators_similarity_mean = [op_sim.mean() for op_sim in operators_similarity.values()]
best_id = np.argmax(operators_similarity_mean)
best_operator_name = operators_names[best_id]
return operators_wavs[best_id], operators_similarity[best_operator_name], best_operator_name
def make_points(data, timeline, window):
points = []
time_points = []
start = 0
end = window
while True:
points.append(np.mean(data[start:end]))
time_points.append(timeline[start])
start = end
end += window
if end > len(data):
return points, time_points
def sliding_window(data, sr=16000, window=1600, size=300):
arr_slid = []
timeline = []
start = 0
end = window
while True:
timeline.append(start / sr)
arr_slid.append(np.mean(np.abs(data[start:end])))
start = end
end += size
if end > len(data):
return make_points(arr_slid, timeline, 20)
def create_make(points_0, points_1, timeline):
skp_lst = []
for p_0, p_1 in zip(points_0, points_1):
skp_lst.append(np.argmax([p_0, p_1]))
spk = []
start = 0
end = 0
for i in range(1, len(skp_lst)):
if skp_lst[i] != skp_lst[i - 1]:
end = i
spk.append([timeline[start], timeline[end], skp_lst[i - 1]])
start = end
return spk
def identification(cutted_data, device):
encoder = VoiceEncoder(device, verbose=False)
_, cont_embeds, _ = encoder.embed_utterance(cutted_data, return_partials=True, rate=16)
operator_wav, operator_similarity, operator_name = identify_operator(cutted_data, encoder, cont_embeds)
return operator_name
def diarize(wav_fpath, file_kaldi, device):
start_end_text = parse_kaldi_file(file_kaldi)
cutted_data, sr, voice_fragments, data = wav_by_segments(wav_fpath, start_end_text, 0)
name_operator = identification(cutted_data, device)
points_0, timeline_0 = sliding_window(data[:, 0], sr)
points_1, timeline_1 = sliding_window(data[:, 1], sr)
return create_make(points_0, points_1, timeline_0), name_operator
def diarize_all(name, gpu=False):
folder_kaldi = f'{PEOPLE_DIR}/{name}/txt/'
folder_wav = f'{PEOPLE_DIR}/{name}/wav/'
device = 'cuda' if gpu else 'cpu'
for idx, file_name in enumerate(sorted(os.listdir(folder_kaldi))):
kaldi_fpath = folder_kaldi + file_name
wav_fpath = folder_wav + file_name.replace('.txt', '.wav')
markup, name_operator = diarize(wav_fpath, kaldi_fpath, device)
result = creat_output_file(kaldi_fpath, markup)
write_file(result, name_operator, name, idx)
if __name__ == '__main__':
diarize_all(sys.argv[1])
'''
MAX_SIZE = 3500
start = 0
end = MAX_SIZE
partial_embeds = 0
if MAX_SIZE > len(mels):
with torch.no_grad():
melss = torch.from_numpy(mels[start:]).to(self.device)
partial_embeds = self(melss).cpu().numpy()
else:
while True:
if end > len(mels):
with torch.no_grad():
melss = torch.from_numpy(mels[start:]).to(self.device)
partial_embeds = np.concatenate((partial_embeds, self(melss).cpu().numpy()), axis=0)
break
elif start == 0:
with torch.no_grad():
melss = torch.from_numpy(mels[start:end]).to(self.device)
partial_embeds = self(melss).cpu().numpy()
else:
with torch.no_grad():
melss = torch.from_numpy(mels[start:end]).to(self.device)
partial_embeds = np.concatenate((partial_embeds, self(melss).cpu().numpy()), axis=0)
start = end
end += MAX_SIZE
torch.cuda.empty_cache()
''' | src/diarization/diarization.py | from ..dirs import PEOPLE_DIR, SLEPOK_DIR
from .audio import preprocess_wav
from resemblyzer.voice_encoder import VoiceEncoder
from .cut_pauses import wav_by_segments
from .kaldi_tools import parse_kaldi_file, creat_output_file, write_file
import os
import numpy as np
import sys
def get_similarity(encoder, cont_embeds, speaker_wav):
speaker_embeds = encoder.embed_utterance(speaker_wav)
return cont_embeds @ speaker_embeds
def get_similarity_several(encoder, cont_embeds, speaker_wavs, speaker_names):
res = dict()
for i in range(len(speaker_names)):
res[speaker_names[i]] = get_similarity(encoder, cont_embeds, speaker_wavs[i])
return res
def get_operator_wavs(operators_dir):
operator_names = []
wavs = []
for slepok_file in os.listdir(operators_dir):
file_path = os.path.join(operators_dir, slepok_file)
operator_names.append('_'.join(slepok_file.split('.')[:-1]))
wav = preprocess_wav(file_path, noise=False)
wavs.append(wav)
return wavs, operator_names
def identify_operator(wav, encoder, cont_embeds):
operators_wavs, operators_names = get_operator_wavs(str(SLEPOK_DIR))
operators_similarity = get_similarity_several(encoder, cont_embeds, operators_wavs, operators_names)
operators_similarity_mean = [op_sim.mean() for op_sim in operators_similarity.values()]
best_id = np.argmax(operators_similarity_mean)
best_operator_name = operators_names[best_id]
return operators_wavs[best_id], operators_similarity[best_operator_name], best_operator_name
def make_points(data, timeline, window):
points = []
time_points = []
start = 0
end = window
while True:
points.append(np.mean(data[start:end]))
time_points.append(timeline[start])
start = end
end += window
if end > len(data):
return points, time_points
def sliding_window(data, sr=16000, window=1600, size=300):
arr_slid = []
timeline = []
start = 0
end = window
while True:
timeline.append(start / sr)
arr_slid.append(np.mean(np.abs(data[start:end])))
start = end
end += size
if end > len(data):
return make_points(arr_slid, timeline, 20)
def create_make(points_0, points_1, timeline):
skp_lst = []
for p_0, p_1 in zip(points_0, points_1):
skp_lst.append(np.argmax([p_0, p_1]))
spk = []
start = 0
end = 0
for i in range(1, len(skp_lst)):
if skp_lst[i] != skp_lst[i - 1]:
end = i
spk.append([timeline[start], timeline[end], skp_lst[i - 1]])
start = end
return spk
def identification(cutted_data, device):
encoder = VoiceEncoder(device, verbose=False)
_, cont_embeds, _ = encoder.embed_utterance(cutted_data, return_partials=True, rate=16)
operator_wav, operator_similarity, operator_name = identify_operator(cutted_data, encoder, cont_embeds)
return operator_name
def diarize(wav_fpath, file_kaldi, device):
start_end_text = parse_kaldi_file(file_kaldi)
cutted_data, sr, voice_fragments, data = wav_by_segments(wav_fpath, start_end_text, 0)
name_operator = identification(cutted_data, device)
points_0, timeline_0 = sliding_window(data[:, 0], sr)
points_1, timeline_1 = sliding_window(data[:, 1], sr)
return create_make(points_0, points_1, timeline_0), name_operator
def diarize_all(name, gpu=False):
folder_kaldi = f'{PEOPLE_DIR}/{name}/txt/'
folder_wav = f'{PEOPLE_DIR}/{name}/wav/'
device = 'cuda' if gpu else 'cpu'
for idx, file_name in enumerate(sorted(os.listdir(folder_kaldi))):
kaldi_fpath = folder_kaldi + file_name
wav_fpath = folder_wav + file_name.replace('.txt', '.wav')
markup, name_operator = diarize(wav_fpath, kaldi_fpath, device)
result = creat_output_file(kaldi_fpath, markup)
write_file(result, name_operator, name, idx)
if __name__ == '__main__':
diarize_all(sys.argv[1])
'''
MAX_SIZE = 3500
start = 0
end = MAX_SIZE
partial_embeds = 0
if MAX_SIZE > len(mels):
with torch.no_grad():
melss = torch.from_numpy(mels[start:]).to(self.device)
partial_embeds = self(melss).cpu().numpy()
else:
while True:
if end > len(mels):
with torch.no_grad():
melss = torch.from_numpy(mels[start:]).to(self.device)
partial_embeds = np.concatenate((partial_embeds, self(melss).cpu().numpy()), axis=0)
break
elif start == 0:
with torch.no_grad():
melss = torch.from_numpy(mels[start:end]).to(self.device)
partial_embeds = self(melss).cpu().numpy()
else:
with torch.no_grad():
melss = torch.from_numpy(mels[start:end]).to(self.device)
partial_embeds = np.concatenate((partial_embeds, self(melss).cpu().numpy()), axis=0)
start = end
end += MAX_SIZE
torch.cuda.empty_cache()
''' | 0.273671 | 0.134264 |
import sys, os
from pathlib import Path
import cv2
import numpy as np
import torch
from torchvision import datasets
from torch.autograd import Variable
from keras.applications.inception_resnet_v2 import preprocess_input
from keras.preprocessing.image import load_img, img_to_array
import tensorflow as tf
# detector
sys.path.append(os.environ['DETECTOR_PATH'])
from models import *
from utils.utils import *
from utils.datasets import *
# nima
sys.path.append(os.environ['REGRESSOR_PATH'])
from nima_models import NimaModel
class InstaScoreEstimator:
# food detector
detector_path = Path(os.environ['DETECTOR_PATH'])
config_path = detector_path / "config/mymodel.cfg"
weights_path = detector_path / "result/normal_finetuning_aug_full_strong/35.pkl"
img_size = 416
img_shape= (img_size, img_size)
class_path = detector_path / "data/coco.names"
conf_thresh = 0.7
nms_thres = 0.4
classes = load_classes(class_path)
Tensor = torch.FloatTensor
# nima
regressor_path = Path(os.environ['REGRESSOR_PATH'])
nima_weight_path = regressor_path / 'weights/inception_weights.h5'
nima_img_size = 224
def __init__(self):
# food detector
self.detector = Darknet(self.config_path, img_size=self.img_size)
model_wts = torch.load(self.weights_path)
self.detector.load_state_dict(model_wts)
self.detector.eval()
if torch.cuda.is_available():
self.detector = self.detector.cuda()
# nima
self.regressor = NimaModel(img_size=self.nima_img_size)
self.regressor.load_weights(self.nima_weight_path)
def predict(self, img_path):
img = cv2.imread(img_path)
img = img[:, :, ::-1]
h, w, c = img.shape
img_area = h*w
# run dish detector
bbox, bbox_area = self.detector.predict(img, self.conf_thresh, self.nms_thres)
# run nima
img = load_img(img_path, target_size=(224, 224))
img_arr = img_to_array(img)
img_arr = np.expand_dims(img_arr, axis=0)
img_arr = preprocess_input(img_arr)
instagenic_scores = self.regressor.predict(img_arr)
# calculate instagrammable score
score = np.argmax(instagenic_scores) + 1.
score /= 5.
return bbox, bbox_area, img_area, score | ml/server/estimator.py | import sys, os
from pathlib import Path
import cv2
import numpy as np
import torch
from torchvision import datasets
from torch.autograd import Variable
from keras.applications.inception_resnet_v2 import preprocess_input
from keras.preprocessing.image import load_img, img_to_array
import tensorflow as tf
# detector
sys.path.append(os.environ['DETECTOR_PATH'])
from models import *
from utils.utils import *
from utils.datasets import *
# nima
sys.path.append(os.environ['REGRESSOR_PATH'])
from nima_models import NimaModel
class InstaScoreEstimator:
# food detector
detector_path = Path(os.environ['DETECTOR_PATH'])
config_path = detector_path / "config/mymodel.cfg"
weights_path = detector_path / "result/normal_finetuning_aug_full_strong/35.pkl"
img_size = 416
img_shape= (img_size, img_size)
class_path = detector_path / "data/coco.names"
conf_thresh = 0.7
nms_thres = 0.4
classes = load_classes(class_path)
Tensor = torch.FloatTensor
# nima
regressor_path = Path(os.environ['REGRESSOR_PATH'])
nima_weight_path = regressor_path / 'weights/inception_weights.h5'
nima_img_size = 224
def __init__(self):
# food detector
self.detector = Darknet(self.config_path, img_size=self.img_size)
model_wts = torch.load(self.weights_path)
self.detector.load_state_dict(model_wts)
self.detector.eval()
if torch.cuda.is_available():
self.detector = self.detector.cuda()
# nima
self.regressor = NimaModel(img_size=self.nima_img_size)
self.regressor.load_weights(self.nima_weight_path)
def predict(self, img_path):
img = cv2.imread(img_path)
img = img[:, :, ::-1]
h, w, c = img.shape
img_area = h*w
# run dish detector
bbox, bbox_area = self.detector.predict(img, self.conf_thresh, self.nms_thres)
# run nima
img = load_img(img_path, target_size=(224, 224))
img_arr = img_to_array(img)
img_arr = np.expand_dims(img_arr, axis=0)
img_arr = preprocess_input(img_arr)
instagenic_scores = self.regressor.predict(img_arr)
# calculate instagrammable score
score = np.argmax(instagenic_scores) + 1.
score /= 5.
return bbox, bbox_area, img_area, score | 0.504394 | 0.20834 |