hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cd8d8365ca2301a760424dae1ee2e706688adc1f
| 9,678
|
py
|
Python
|
main/views.py
|
QingShuiXiFan/Style-Transfer
|
f79951323cdfd0c72f2157623209d9067376306b
|
[
"Apache-2.0"
] | null | null | null |
main/views.py
|
QingShuiXiFan/Style-Transfer
|
f79951323cdfd0c72f2157623209d9067376306b
|
[
"Apache-2.0"
] | null | null | null |
main/views.py
|
QingShuiXiFan/Style-Transfer
|
f79951323cdfd0c72f2157623209d9067376306b
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render, render_to_response, redirect
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, FileResponse
from django.urls import reverse
import os
from django.contrib.auth import authenticate, login, logout # 两个默认的用户认证和管理应用中的方法
from django.contrib import auth
from django.template import RequestContext
from .forms import LoginForm, RegistrationForm
from django.contrib.auth.models import User
import hashlib # python的哈希加密库
from django.contrib.auth.hashers import make_password, check_password # Django自带的哈希加密库
from django.core.mail import send_mail
import imghdr # 判断是否是图片类型
import time, datetime
from django.conf import settings
from .models import Pictures
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_DIR = "common_static"
GPU_ISACTIVATED = True
# Create your views here.
def index(request):
return render(request, "main/index.html")
def blog(request):
return render(request, 'main/blog.html')
def blogArticle(request):
return render(request, 'main/blogArticle.html')
def faq(request):
return render(request, 'main/faq.html')
def about(request):
return render(request, 'main/about.html')
def support(request):
return render(request, 'main/support.html')
# 获得访问者的ip
def get_request_ip(request):
try:
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0] # 所以这里是真实的ip
else:
ip = request.META.get('REMOTE_ADDR') # 这里获得代理ip
except:
ip = None
return ip
# 获取文件大小
def get_FileSize(filePath):
fsize = os.path.getsize(filePath)
fsize = fsize / float(1024 * 1024)
return round(fsize, 2)
def ajaxUpload(request):
if request.method == 'GET':
return render(request, 'main/ajaxUpload.html')
if request.method == 'POST':
# 获取访问用户的ip
ip = get_request_ip(request)
# =======上传内容图片==========
file_obj = request.FILES.get('file_obj', None) # 获得文件对象,如果没有文件,则默认为None
# 若没有上传图片
if not file_obj:
result = {"status": "no_file"}
return JsonResponse(result)
# 利用模型类 将图片要存放的路径存到数据库中
t = time.time() # 为文件名增加时间戳,用于独立标记每个文件
timeStamp = str(int(t))
p = Pictures()
p.pic = "tmpImages/" + timeStamp + '_' + file_obj.name # 文件路径字段
p.uploaded_timeStamp = timeStamp # 上传时间戳字段
p.ip = ip # 用户ip字段
p.save()
# 写入文件
picPath = settings.MEDIA_ROOT + "/tmpImages/" + timeStamp + '_' + file_obj.name
destination = open(picPath, 'wb+') # 打开特定的文件进行二进制的写操作
for chunk in file_obj.chunks(): # 分块写入文件
destination.write(chunk)
destination.close()
# 把地址和id写入session
request.session['uploaded_pic_path'] = str(p.pic)
request.session['uploaded_pic_id'] = str(p.id)
request.session.set_expiry(0) # 关闭浏览器就清掉session
picName = timeStamp + '_' + file_obj.name
data = {"status": "success", "picName": picName} # 返回data给前端,显示上传的图片
return JsonResponse(data)
# 风格化
def transfer(request):
if request.method == "GET":
request.session.flush() # 清除掉原有的session
return render(request, 'main/transfer.html')
if request.method == "POST": # 请求方法为POST时,进行处理
# 获取访问用户的ip
ip = get_request_ip(request)
style_name = str(request.POST.get('style_name')) # 获取select的value值,如scream,与文件名对应,如scream.ckpt
if style_name in ['la_muse','rain_princess','the_scream','the_shipwreck_of_the_minotaur','udnie','wave']:
ckpt_path = style_name + ".ckpt" # ckpt文件名
else:
ckpt_path = style_name
content_name = str(request.POST.get('picName')) # 获取内容图片名
generated_image_path = BASE_DIR + "/" + STATIC_DIR + "/media/download/tmpImages/" + content_name # 生成的图片路径
# 若风格化后的图像已存在,则将之删除
if (os.path.exists(generated_image_path)):
os.remove(generated_image_path)
# 执行evaluate.py程序
cmd = settings.PYTHON_VERSION + " evaluate.py --checkpoint examples/checkpoint/" + ckpt_path + \
" --in-path " + BASE_DIR + "/" + STATIC_DIR + "/media/upload/tmpImages/" + content_name + \
" --out-path " + BASE_DIR + "/" + STATIC_DIR + "/media/download/tmpImages/"
if (GPU_ISACTIVATED == True):
activate_gpu = 'activate tensorflow-gpu'
os.popen(activate_gpu + " && cd " + BASE_DIR + "/fast-style-transfer-master && " + cmd)
else:
os.popen("cd " + BASE_DIR + "/fast-style-transfer-master && " + cmd)
start_time = time.time()
while (os.path.exists(generated_image_path) == False):
time_used = time.time() - start_time
if time_used >= 60:
data = {"status": "time_out"}
return JsonResponse(data)
else:
time.sleep(1)
data = {"status": "success"} # 返回data给前端,显示上传的图片
return JsonResponse(data)
# 下载图片
def file_down(request):
file = open('', 'rb')
response = FileResponse(file)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="example.tar.gz"'
return response
def showImg(request):
return render(request, 'main/showImage.html')
def style2paint(request):
return render(request, 'main/style2paint.html')
def user_login(request):
if request.method == "GET":
login_form = LoginForm()
return render(request, 'main/login.html', {"form": login_form})
if request.method == "POST": # GET多用于数据查询,POST多用于数据写入或者更新等
login_form = LoginForm(request.POST) # request.POST是提交的表单数据所返回的类字典数据
if login_form.is_valid():
cd = login_form.cleaned_data
# user = authenticate(email=cd['email'],
# password=cd['password']) # 若authenticate()内键值对上号了,则返回一个实例对象,否则返回None
input_email = cd['email']
input_password = cd['password']
try:
user = User.objects.get(email=input_email)
if check_password(input_password, user.password): # 哈希加密
login(request, user) # 以上面返回的User实例对象作为参数,实现用户登录
return redirect('main:index')
else:
message = "抱歉,您的密码填写错误"
return render(request, 'main/login.html', {"message": message, "form": login_form})
except:
message = "用户不存在!"
return render(request, 'main/login.html', {"message": message, "form": login_form})
else:
message = "验证码输入错误"
return render(request, 'main/login.html', {"message": message, "form": login_form})
def user_logout(request):
logout(request) # 注销用户
return redirect("/main/")
def register(request):
if request.user.is_authenticated:
# 登录状态不允许注册。你可以修改这条原则!
return redirect("/main")
if request.method == "POST":
user_form = RegistrationForm(request.POST)
if user_form.is_valid(): # 获取数据
# <== 这里可以加一些判断逻辑 ==>
cd = user_form.cleaned_data
input_username = cd['username']
input_email = cd['email']
input_password = cd['password']
input_password2 = cd['password2']
if input_password != input_password2: # 判断两次密码是否相同
message = "两次输入的密码不同!"
return render(request, 'main/register.html', {"message": message, "form": user_form})
else:
same_name_user = User.objects.filter(username=input_username)
if same_name_user: # 用户名唯一
message = '该用户名已被注册,请使用别的用户名!'
return render(request, 'main/register.html', {"message": message, "form": user_form})
same_email_user = User.objects.filter(email=input_email)
if same_email_user: # 邮箱地址唯一
message = '该邮箱地址已被注册,请使用别的邮箱!'
return render(request, 'main/register.html', {"message": message, "form": user_form})
# 若邮箱可以注册,且信息填写无误
new_user = user_form.save(commit=False)
new_user.password = make_password(user_form.cleaned_data['password']) # 使用Django自带的哈希算法加密
new_user.save()
# send_mail('Subject here', 'Here is the message.', 'from@example.com',['to@example.com'], fail_silently=False)
send_email_content = input_username + ',\n' + '\t你已经成功注册Style Transfer账号,以下是你的登录信息,请谨慎保存:\n' + '电子邮箱:' + input_email + '\n' + '密码:' + input_password + '\n\n' + 'www.styletransfer.cn'
send_mail('[Style Transfer] Registered Successfully!', send_email_content, 'styletransfer@163.com',
[input_email],
fail_silently=False)
message = input_username + ",注册成功!"
return redirect('main:tip')
else:
message = "用户名已被使用"
return render(request, "main/register.html", {"message": message, "form": user_form})
user_form = RegistrationForm()
return render(request, "main/register.html", {"form": user_form})
def playground(request):
return render(request, 'main/playground.html')
def tip(request):
return render(request, 'main/tip.html')
def hash_code(s, salt='styletransfer'): # 哈希加密
h = hashlib.sha256()
s += salt
h.update(s.encode()) # update方法只接收bytes类型
return h.hexdigest()
| 36.247191
| 198
| 0.615726
| 1,060
| 9,678
| 5.471698
| 0.295283
| 0.043448
| 0.068793
| 0.083276
| 0.257931
| 0.149483
| 0.112931
| 0.112931
| 0.072586
| 0.072586
| 0
| 0.0035
| 0.262038
| 9,678
| 266
| 199
| 36.383459
| 0.808597
| 0.105807
| 0
| 0.170213
| 0
| 0
| 0.16087
| 0.036175
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101064
| false
| 0.042553
| 0.085106
| 0.053191
| 0.361702
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd8e00f631a120690eef589a528899913c4b3443
| 781
|
py
|
Python
|
edj/Spot_square.py
|
CircuitLaunch/Spot_Bootcamp
|
47735ce474a59c5478099f6095b68c46b77d3da6
|
[
"BSD-3-Clause"
] | null | null | null |
edj/Spot_square.py
|
CircuitLaunch/Spot_Bootcamp
|
47735ce474a59c5478099f6095b68c46b77d3da6
|
[
"BSD-3-Clause"
] | null | null | null |
edj/Spot_square.py
|
CircuitLaunch/Spot_Bootcamp
|
47735ce474a59c5478099f6095b68c46b77d3da6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
from Spot import *
import time
from bosdyn.client import math_helpers
if __name__ == '__main__':
spot = Spot()
try:
# It's ALIVE!
spot.power_on()
spot.move_to(1.0, 0.0, 0.0, math_helpers.Quat(), duration=5.0)
time.sleep(5.0)
spot.move_to(0.0, 1.0, 0.0, math_helpers.Quat(), duration=5.0)
time.sleep(5.0)
spot.move_to(-1.0, 0.0, 0.0, math_helpers.Quat(), duration=5.0)
time.sleep(5.0)
spot.move_to(0.0, -1.0, 0.0, math_helpers.Quat(), duration=5.0)
time.sleep(5.0)
# Power down
spot.estop(graceful=True)
except:
print('Exception')
print('Trying to make Python GC the Spot object')
spot = None
time.sleep(5.0)
exit(0)
| 21.694444
| 71
| 0.577465
| 129
| 781
| 3.356589
| 0.341085
| 0.064665
| 0.055427
| 0.127021
| 0.48037
| 0.48037
| 0.48037
| 0.48037
| 0.48037
| 0.48037
| 0
| 0.076923
| 0.267606
| 781
| 35
| 72
| 22.314286
| 0.68007
| 0.056338
| 0
| 0.227273
| 0
| 0
| 0.077657
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.136364
| 0
| 0.136364
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd90fb8f4961d4f54d2eb80fcec8b04e412e1af3
| 626
|
py
|
Python
|
sources/classic/messaging_kombu/handlers.py
|
variasov/classic_messaging_kombu
|
c4191f3d1f788a39f50dc137eca1b67f3ee2af20
|
[
"MIT"
] | 1
|
2021-11-12T08:19:53.000Z
|
2021-11-12T08:19:53.000Z
|
sources/classic/messaging_kombu/handlers.py
|
variasov/classic_messaging_kombu
|
c4191f3d1f788a39f50dc137eca1b67f3ee2af20
|
[
"MIT"
] | null | null | null |
sources/classic/messaging_kombu/handlers.py
|
variasov/classic_messaging_kombu
|
c4191f3d1f788a39f50dc137eca1b67f3ee2af20
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import Dict, Any, Callable
from kombu import Message
from classic.components import component
MessageBody = Dict[str, Any]
@component
class MessageHandler(ABC):
@abstractmethod
def handle(self, message: Message, body: MessageBody):
pass
@component
class SimpleMessageHandler(MessageHandler):
function: Callable[[Any], Any]
late_ack: bool = True
def handle(self, message: Message, body: MessageBody):
if not self.late_ack:
message.ack()
self.function(**body)
if self.late_ack:
message.ack()
| 18.969697
| 58
| 0.680511
| 72
| 626
| 5.875
| 0.416667
| 0.049645
| 0.061466
| 0.094563
| 0.297872
| 0.198582
| 0.198582
| 0
| 0
| 0
| 0
| 0
| 0.233227
| 626
| 32
| 59
| 19.5625
| 0.88125
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0.05
| 0.2
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd937e31435e325df9a3ac8d8fa5487807539935
| 1,440
|
py
|
Python
|
byceps/services/shop/order/event_service.py
|
GSH-LAN/byceps
|
ab8918634e90aaa8574bd1bb85627759cef122fe
|
[
"BSD-3-Clause"
] | 33
|
2018-01-16T02:04:51.000Z
|
2022-03-22T22:57:29.000Z
|
byceps/services/shop/order/event_service.py
|
GSH-LAN/byceps
|
ab8918634e90aaa8574bd1bb85627759cef122fe
|
[
"BSD-3-Clause"
] | 7
|
2019-06-16T22:02:03.000Z
|
2021-10-02T13:45:31.000Z
|
byceps/services/shop/order/event_service.py
|
GSH-LAN/byceps
|
ab8918634e90aaa8574bd1bb85627759cef122fe
|
[
"BSD-3-Clause"
] | 14
|
2019-06-01T21:39:24.000Z
|
2022-03-14T17:56:43.000Z
|
"""
byceps.services.shop.order.event_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from datetime import datetime
from typing import Sequence
from ....database import db
from .dbmodels.order_event import OrderEvent as DbOrderEvent, OrderEventData
from .transfer.models import OrderID
def create_event(
event_type: str, order_id: OrderID, data: OrderEventData
) -> None:
"""Create an order event."""
event = build_event(event_type, order_id, data)
db.session.add(event)
db.session.commit()
def create_events(
event_type: str, order_id: OrderID, datas: Sequence[OrderEventData]
) -> None:
"""Create a sequence of order events."""
events = [build_event(event_type, order_id, data) for data in datas]
db.session.add_all(events)
db.session.commit()
def build_event(
event_type: str, order_id: OrderID, data: OrderEventData
) -> DbOrderEvent:
"""Assemble, but not persist, an order event."""
now = datetime.utcnow()
return DbOrderEvent(now, event_type, order_id, data)
def get_events_for_order(order_id: OrderID) -> list[DbOrderEvent]:
"""Return the events for that order."""
return db.session \
.query(DbOrderEvent) \
.filter_by(order_id=order_id) \
.order_by(DbOrderEvent.occurred_at) \
.all()
| 26.181818
| 76
| 0.690972
| 182
| 1,440
| 5.285714
| 0.379121
| 0.065489
| 0.058212
| 0.053015
| 0.212058
| 0.191268
| 0.164241
| 0.101871
| 0.101871
| 0
| 0
| 0.006728
| 0.174306
| 1,440
| 54
| 77
| 26.666667
| 0.802355
| 0.21875
| 0
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.206897
| 0
| 0.413793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd95b58b744f084920dc507989ebf490290a8ec2
| 637
|
py
|
Python
|
app/models/columns/suit.py
|
abcnever/euchre-game
|
5446e345e0dfdcf83d5fe87c3d2cedc31b3ae669
|
[
"MIT"
] | 1
|
2018-12-31T05:38:56.000Z
|
2018-12-31T05:38:56.000Z
|
app/models/columns/suit.py
|
abcnever/euchre-game
|
5446e345e0dfdcf83d5fe87c3d2cedc31b3ae669
|
[
"MIT"
] | 4
|
2018-11-03T15:51:13.000Z
|
2019-01-12T21:09:23.000Z
|
app/models/columns/suit.py
|
abcnever/euchre-game
|
5446e345e0dfdcf83d5fe87c3d2cedc31b3ae669
|
[
"MIT"
] | null | null | null |
from attr import attrs, attrib
import enum
from .enum import EnumColumn
class Suit(EnumColumn):
class Enum(enum.Enum):
@attrs(frozen=True)
class _Suit():
suit_name = attrib()
ascii_icon = attrib()
spades = _Suit(
suit_name="Spades",
ascii_icon="♠"
)
clubs = _Suit(
suit_name="Clubs",
ascii_icon="♣"
)
diamonds = _Suit(
suit_name="Diamonds",
ascii_icon="\033[91m♦\0330m"
)
hearts = _Suit(
"Hearts",
ascii_icon="\033[91m♥\0330m"
)
| 21.233333
| 40
| 0.486656
| 65
| 637
| 4.615385
| 0.384615
| 0.15
| 0.16
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047244
| 0.401884
| 637
| 29
| 41
| 21.965517
| 0.729659
| 0
| 0
| 0
| 0
| 0
| 0.089482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd977d3ad4e8e4d9141853e4e08a51d0ffa0f771
| 1,881
|
py
|
Python
|
dataset.py
|
sreza1/Diabetic-Retinopathy-Detection
|
75f10423ef161d3040756253a8ba0b9012e391b7
|
[
"MIT"
] | null | null | null |
dataset.py
|
sreza1/Diabetic-Retinopathy-Detection
|
75f10423ef161d3040756253a8ba0b9012e391b7
|
[
"MIT"
] | null | null | null |
dataset.py
|
sreza1/Diabetic-Retinopathy-Detection
|
75f10423ef161d3040756253a8ba0b9012e391b7
|
[
"MIT"
] | null | null | null |
import config
import os
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from tqdm import tqdm
class DRDataset(Dataset):
def __init__(self, images_folder, path_to_csv, train=True, transform=None):
super().__init__()
self.data = pd.read_csv(path_to_csv)
self.images_folder = images_folder
self.image_files = os.listdir(images_folder)
self.transform = transform
self.train = train
def __len__(self):
return self.data.shape[0] if self.train else len(self.image_files)
def __getitem__(self, index):
if self.train:
image_file, label = self.data.iloc[index]
else:
# if test simply return -1 for label, I do this in order to
# re-use same dataset class for test set submission later on
image_file, label = self.image_files[index], -1
image_file = image_file.replace(".jpeg", "")
# if image_file[0]=="_":
# image_file=image_file[1:]
# elif image_file[:2] =="._":
# image_file=image_file[2:]
path = os.path.join(self.images_folder + "/", image_file+".jpeg")
image = np.array(Image.open(path))
if self.transform:
image= self.transform(image=image)["image"]
return image, label, image_file
if __name__ == "__main__":
"""
Test if everything works ok
"""
dataset = DRDataset(
images_folder="/data/images_resized_650",
path_to_csv="/data/trainLabels.csv",
transform = config.val_transforms
)
loader = DataLoader(
dataset=dataset, batch_size=32, num_workers=6, shuffle=True, pin_memory=True
)
for x, label, file in tqdm(loader):
print(x.shape)
print(label.shape)
import sys
sys.exit
| 29.857143
| 84
| 0.617757
| 245
| 1,881
| 4.497959
| 0.379592
| 0.098004
| 0.043557
| 0.049002
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009559
| 0.27698
| 1,881
| 63
| 85
| 29.857143
| 0.800735
| 0.12068
| 0
| 0
| 0
| 0
| 0.042991
| 0.028037
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.190476
| 0.02381
| 0.333333
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd988eff24525966178311b4c694188e2f3b5038
| 507
|
py
|
Python
|
server/server.py
|
Filipos27/Celebrity_classification
|
802474516b9ecaee70c4019600572bbbbd8b582a
|
[
"MIT"
] | null | null | null |
server/server.py
|
Filipos27/Celebrity_classification
|
802474516b9ecaee70c4019600572bbbbd8b582a
|
[
"MIT"
] | null | null | null |
server/server.py
|
Filipos27/Celebrity_classification
|
802474516b9ecaee70c4019600572bbbbd8b582a
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, jsonify
import util
app= Flask(__name__)
@app.route("/classify_image",methods=["GET","POST"])
def classify_image():
image_data=request.form["image_data"]
response=jsonify(util.classify_image(image_data))
response.headers.add("Access-Control-Allow-Origin","*")
return response
if __name__ == "__main__":
print("Starting Python Flask Server For Celebrity Image Classification")
util.load_saved_artifacts()
app.run(port=5000)
| 28.166667
| 77
| 0.710059
| 63
| 507
| 5.396825
| 0.634921
| 0.114706
| 0.105882
| 0.129412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009412
| 0.161736
| 507
| 17
| 78
| 29.823529
| 0.790588
| 0
| 0
| 0
| 0
| 0
| 0.267894
| 0.055215
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.307692
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd9a1323c7a15a9388bdc8532ce60de3beb414fa
| 7,827
|
py
|
Python
|
tests/e2e/performance/csi_tests/test_pvc_bulk_clone_performance.py
|
annagitel/ocs-ci
|
284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5
|
[
"MIT"
] | 1
|
2021-03-12T09:01:36.000Z
|
2021-03-12T09:01:36.000Z
|
tests/e2e/performance/csi_tests/test_pvc_bulk_clone_performance.py
|
annagitel/ocs-ci
|
284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5
|
[
"MIT"
] | 1
|
2021-08-30T20:06:00.000Z
|
2021-09-30T20:05:46.000Z
|
tests/e2e/performance/csi_tests/test_pvc_bulk_clone_performance.py
|
annagitel/ocs-ci
|
284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5
|
[
"MIT"
] | null | null | null |
"""
Test to measure pvc scale creation time. Total pvc count would be 50, 1 clone per PVC
Total number of clones in bulk will be 50
"""
import logging
import pytest
from ocs_ci.utility import utils
from ocs_ci.ocs.perftests import PASTest
from ocs_ci.framework.testlib import performance
from ocs_ci.helpers import helpers, performance_lib
from ocs_ci.ocs import constants, scale_lib
from ocs_ci.ocs.resources import pvc, pod
from ocs_ci.ocs.resources.objectconfigfile import ObjectConfFile
log = logging.getLogger(__name__)
@performance
class TestBulkCloneCreation(PASTest):
"""
Base class for bulk creation of PVC clones
"""
@pytest.fixture()
def namespace(self, project_factory, interface_iterate):
"""
Create a new project
"""
proj_obj = project_factory()
self.namespace = proj_obj.namespace
self.interface = interface_iterate
@pytest.mark.usefixtures(namespace.__name__)
@pytest.mark.polarion_id("OCS-2621")
def test_bulk_clone_performance(self, namespace, tmp_path):
"""
Creates number of PVCs in a bulk using kube job
Write 60% of PVC capacity to each one of the created PVCs
Creates 1 clone per each PVC altogether in a bulk
Measuring time for bulk of clones creation
"""
pvc_count = 50
vol_size = "5Gi"
job_pod_file, job_pvc_file, job_clone_file = [None, None, None]
log.info(f"Start creating {self.interface} {pvc_count} PVC")
if self.interface == constants.CEPHBLOCKPOOL:
sc_name = constants.DEFAULT_STORAGECLASS_RBD
clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
elif self.interface == constants.CEPHFILESYSTEM:
sc_name = constants.DEFAULT_STORAGECLASS_CEPHFS
clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
try:
pvc_dict_list = scale_lib.construct_pvc_creation_yaml_bulk_for_kube_job(
no_of_pvc=pvc_count,
access_mode=constants.ACCESS_MODE_RWO,
sc_name=sc_name,
pvc_size=vol_size,
)
job_pvc_file = ObjectConfFile(
name="job_profile_pvc",
obj_dict_list=pvc_dict_list,
project=self.namespace,
tmp_path=tmp_path,
)
# Create kube_job
job_pvc_file.create(namespace=self.namespace)
# Check all the PVC reached Bound state
pvc_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
kube_job_obj=job_pvc_file,
namespace=self.namespace,
no_of_pvc=pvc_count,
)
logging.info(f"Number of PVCs in Bound state {len(pvc_bound_list)}")
# Kube_job to Create pod
pod_dict_list = scale_lib.attach_multiple_pvc_to_pod_dict(
pvc_list=pvc_bound_list,
namespace=self.namespace,
pvcs_per_pod=1,
start_io=False,
pod_yaml=constants.NGINX_POD_YAML,
)
job_pod_file = ObjectConfFile(
name="job_profile_pod",
obj_dict_list=pod_dict_list,
project=self.namespace,
tmp_path=tmp_path,
)
job_pod_file.create(namespace=self.namespace)
# Check all PODs in Running state
scale_lib.check_all_pod_reached_running_state_in_kube_job(
kube_job_obj=job_pod_file,
namespace=self.namespace,
no_of_pod=len(pod_dict_list),
timeout=90,
)
logging.info(f"Number of PODs in Running state {len(pod_dict_list)}")
total_files_size = self.run_fio_on_pvcs(vol_size)
clone_dict_list = scale_lib.construct_pvc_clone_yaml_bulk_for_kube_job(
pvc_dict_list, clone_yaml, sc_name
)
logging.info("Created clone dict list")
job_clone_file = ObjectConfFile(
name="job_profile_clone",
obj_dict_list=clone_dict_list,
project=self.namespace,
tmp_path=tmp_path,
)
# Create kube_job that creates clones
job_clone_file.create(namespace=self.namespace)
logging.info("Going to check bound status for clones")
# Check all the clones reached Bound state
clone_bound_list = scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
kube_job_obj=job_clone_file,
namespace=self.namespace,
no_of_pvc=pvc_count,
timeout=180,
)
logging.info(f"Number of clones in Bound state {len(clone_bound_list)}")
clone_objs = []
all_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
for clone_yaml in clone_dict_list:
name = clone_yaml["metadata"]["name"]
size = clone_yaml["spec"]["resources"]["requests"]["storage"]
logging.info(f"Clone {name} of size {size} created")
for pvc_obj in all_pvc_objs:
if pvc_obj.name == name:
clone_objs.append(pvc_obj)
assert len(clone_bound_list) == len(
clone_objs
), "Not all clones reached BOUND state, cannot measure time"
start_time = helpers.get_provision_time(
self.interface, clone_objs, status="start"
)
end_time = helpers.get_provision_time(
self.interface, clone_objs, status="end"
)
total_time = (end_time - start_time).total_seconds()
speed = round(total_files_size / total_time, 2)
logging.info(
f"Total creation time = {total_time} secs, data size = {total_files_size} MB, speed = {speed} MB/sec "
f"for {self.interface} clone in bulk of {pvc_count} clones."
)
# Finally is used to clean-up the resources created
# Irrespective of try block pass/fail finally will be executed.
finally:
# Cleanup activities
logging.info("Cleanup of all the resources created during test execution")
if job_pod_file:
job_pod_file.delete(namespace=self.namespace)
job_pod_file.wait_for_delete(
resource_name=job_pod_file.name, namespace=self.namespace
)
if job_clone_file:
job_clone_file.delete(namespace=self.namespace)
job_clone_file.wait_for_delete(
resource_name=job_clone_file.name, namespace=self.namespace
)
if job_pvc_file:
job_pvc_file.delete(namespace=self.namespace)
job_pvc_file.wait_for_delete(
resource_name=job_pvc_file.name, namespace=self.namespace
)
# Check ceph health status
utils.ceph_health_check(tries=20)
def run_fio_on_pvcs(self, pvc_size):
searched_pvc_objs = pvc.get_all_pvc_objs(namespace=self.namespace)
pod_objs = pod.get_all_pods(namespace=self.namespace)
logging.info(f"Found {len(searched_pvc_objs)} PVCs")
pvc_size_int = int(pvc_size[:-2]) # without "Gi"
file_size_mb = int(pvc_size_int * 0.6) * constants.GB2MB
total_files_size = file_size_mb * len(searched_pvc_objs)
file_size_mb_str = str(file_size_mb) + "M"
logging.info(f"Writing file of size {file_size_mb_str} in each PVC")
for objs in pod_objs:
performance_lib.write_fio_on_pod(objs, file_size_mb_str)
return total_files_size
| 38.747525
| 118
| 0.614028
| 979
| 7,827
| 4.575077
| 0.191011
| 0.060951
| 0.078589
| 0.010717
| 0.336459
| 0.230409
| 0.187765
| 0.132842
| 0.126814
| 0.099129
| 0
| 0.005232
| 0.316213
| 7,827
| 201
| 119
| 38.940299
| 0.831652
| 0.095311
| 0
| 0.089655
| 0
| 0.006897
| 0.109359
| 0.009746
| 0
| 0
| 0
| 0
| 0.006897
| 1
| 0.02069
| false
| 0
| 0.062069
| 0
| 0.096552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2696d944b45b7b26bd7dbbe253779f41871a415a
| 7,779
|
py
|
Python
|
islandGen.py
|
Popcorn05/IslandGen
|
a06821c1db8f33befb1fb3db32fd2e18d323a23a
|
[
"MIT"
] | null | null | null |
islandGen.py
|
Popcorn05/IslandGen
|
a06821c1db8f33befb1fb3db32fd2e18d323a23a
|
[
"MIT"
] | null | null | null |
islandGen.py
|
Popcorn05/IslandGen
|
a06821c1db8f33befb1fb3db32fd2e18d323a23a
|
[
"MIT"
] | null | null | null |
#Import libraries
import random
import os
import noise
import numpy
import math
import sys
from chunks import Chunks as chk
from PIL import Image
import subprocess
from scipy.misc import toimage
import threading
random.seed(os.urandom(6))
#Delete old chunks
filelist = [ f for f in os.listdir("world/") if f.endswith(".chunk") ] #Delete previous world files
for f in filelist:
os.remove(os.path.join("world/", f))
#Functions
def percentChance(chance):
n = random.randrange(101)
if (100 - n) < chance:
return(True)
else:
return(False)
def mapVal(inp, inpMin, inpMax, outMin, outMax):
return (inp - inpMin) * (outMax - outMin) / (inpMax - inpMin) + outMin
def createCircleGrad(gridSize): #Obsolete
#Create circular gradient (Obsolete)
center_x, center_y = gridSize // 2, gridSize // 2 #Define centre
circle_grad = numpy.zeros((gridSize,gridSize)) #Create array
for y in range(gridSize): #Loop array
for x in range(gridSize):
distx = abs(x - center_x) #Get distance from centre on x and y
disty = abs(y - center_y)
dist = math.sqrt(distx*distx + disty*disty) #Get the actual distance from centre (pythag)
circle_grad[y][x] = dist
max_grad = numpy.max(circle_grad)
circle_grad = circle_grad / max_grad #This is some weird math that I don't quite understand but it works
circle_grad -= 0.5
circle_grad *= 2.0
circle_grad = -circle_grad
for y in range(gridSize): #More weird math, I think its just amplifying anything that is above 0
for x in range(gridSize):
if circle_grad[y][x] > 0:
circle_grad[y][x] *= 20
max_grad = numpy.max(circle_grad)
circle_grad = circle_grad / max_grad #For some reason it's lowered again
return(circle_grad)
#Colours
dwaterCol = [54, 137, 245]
waterCol = [67, 146, 245]
dsandCol = [224, 214, 164]
sandCol = [247, 232, 176]
rockCol = [209, 209, 209]
grassCol = [37, 170, 77]
dgrassCol = [34, 161, 63]
treeCol = [10, 122, 42]
mountCol = [74, 62, 36]
mountRockCol = [56, 48, 30]
snowCol = [245, 254, 255]
#Control Variables
a = sys.argv
if len(a) > 1:
gridSize = int(a[1])
scale = float(a[2])
octaves = int(a[3])
persistance = float(a[4])
lacunarity = float(a[5])
thres = float(a[6])
else:
gridSize = 1024 #Side length
scale = 250.0
octaves = 6
persistance = 0.5
lacunarity = 2.0
thres = 0.08
#Generate base noise, Apply gradient
im = Image.open("gradient/circle_grad.png")
circle_grad = im.convert("L")
main = numpy.zeros((gridSize,gridSize)) #Init arrays
mainNoise = numpy.zeros_like(main)
seed = random.randint(0,200) #Gen seed
for y in range(gridSize):
for x in range(gridSize):
main[y][x] = noise.pnoise2(y/scale,x/scale,octaves=octaves,persistence=persistance,lacunarity=lacunarity,repeatx=gridSize,repeaty=gridSize,base=seed) #Set noise
mainNoise[y][x] = (main[y][x] * mapVal(circle_grad.getpixel((round((1024/gridSize)*x),round((1024/gridSize)*y))), 0, 255, -0.05, 1)) #Apply gradient to noise
if mainNoise[y][x] > 0:
mainNoise[y][x] *= 20 #Amplify
max_grad = numpy.max(mainNoise)
mainNoise = mainNoise / max_grad #Weird even out math thing
#Lay base
display = numpy.zeros((gridSize//16,gridSize//16)+(16,16)+(3,))
processed = numpy.zeros((gridSize//16,gridSize//16), dtype=bool)
passOver = numpy.zeros((gridSize//16,gridSize//16), dtype=bool)
import time
start = time.time()
for cy in range(gridSize//16):
for cx in range(gridSize//16):
print(str(cy) + " " + str(cx))
if processed[cy][cx] == False:
processed[cy][cx] = True
for y in range(16):
for x in range(16):
m = mainNoise[y + (16*cy)][x + (16*cx)] #Set iterator to value of main array and check if meets certain thresholds to set colours
if m < thres + 0.015:
m = dwaterCol
elif m < thres + 0.11:
m = waterCol
elif m < thres + 0.12:
m = dsandCol
passOver[cy][cx] = True
elif m < thres + 0.15:
m = sandCol
passOver[cy][cx] = True
elif m < thres + 0.28:
m = grassCol
passOver[cy][cx] = True
elif m < thres + 0.46:
m = dgrassCol
passOver[cy][cx] = True
elif m < thres + 0.78:
m = mountCol
passOver[cy][cx] = True
elif m < thres + 1.0:
m = snowCol
passOver[cy][cx] = True
display[cy][cx][y][x] = m
#Second pass (Natural features)
featSeed = random.randint(0,100) #Generate seed
for cy in range(gridSize//16):
for cx in range(gridSize//16):
if passOver[cy][cx] == True:
for y in range(16):
for x in range(16):
m = display[cy][cx][y][x]
p = noise.pnoise2((y + (cy * 16))/(scale/2.5),(x + (cx * 16))/(scale/2.5),octaves=10,persistence=0.55,lacunarity=1.55,repeatx=gridSize,repeaty=gridSize,base=featSeed) #Get pond noise
if all(m == grassCol) or all(m == dsandCol) or all(m == sandCol): #If light grass or beach generate pond
if p > 0.17:
if p < 0.25:
m = sandCol
elif p < 1.0:
m = waterCol
display[cy][cx][y][x] = m
#Third pass (Structures)
def addTree(arr,cx,cy,x,y,inpScale):
arr[cy][cx][y][x] = treeCol
n = y
while n < y+inpScale: #Loop through tree size (Only creates plus sign)
arr[cy][cx][min(n+1,15)][x] = treeCol
n += 1
n = y
while n > y-inpScale:
arr[cy][cx][max(n-1,0)][x] = treeCol
n -= 1
n = x
while n < x+inpScale:
arr[cy][cx][y][min(n+1,15)] = treeCol
n += 1
n = x
while n > x-inpScale:
arr[cy][cx][y][max(n-1,0)] = treeCol
n -= 1
def addRock(arr,cx,cy,x,y,inpScale,c):
arr[cy][cx][y][x] = c
arr[cy][cx][min(y+random.randint(0,1),15)][x] = c #Random whether one is placed, if 0 is gen the origin is painted over
arr[cy][cx][max(y-random.randint(0,1),0)][x] = c
arr[cy][cx][y][min(x+random.randint(0,1),15)] = c
arr[cy][cx][y][max(x-random.randint(0,1),0)] = c
structScale = int(scale // 200)
for cy in range(gridSize//16):
for cx in range(gridSize//16):
if passOver[cy][cx] == True:
for y in range(16):
for x in range(16): #Place rocks on beach and mountnain
m = display[cy][cx][y][x]
if all(m == sandCol):
if percentChance(2) == True:
addRock(display,cx,cy,x,y,structScale,rockCol)
elif all(m == grassCol):
if percentChance(5) == True:
addTree(display,cx,cy,x,y,structScale)
elif all(m == dgrassCol):
if percentChance(20) == True:
addTree(display,cx,cy,x,y,structScale)
elif all(m == mountCol):
if percentChance(0.01) == True:
addRock(display,cx,cy,x,y,structScale,mountRockCol)
#Save
for cy in range(gridSize//16):
for cx in range(gridSize//16):
chk.writeChunk(cx,cy,display)
#Display
toimage(chk.readChunkArray(gridSize,display)).show()
| 33.530172
| 202
| 0.549556
| 1,085
| 7,779
| 3.913364
| 0.257143
| 0.02261
| 0.049458
| 0.03203
| 0.311116
| 0.233867
| 0.197833
| 0.19171
| 0.131418
| 0.131418
| 0
| 0.054748
| 0.319064
| 7,779
| 232
| 203
| 33.530172
| 0.746838
| 0.118396
| 0
| 0.287293
| 0
| 0
| 0.006449
| 0.003518
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027624
| false
| 0.049724
| 0.066298
| 0.005525
| 0.099448
| 0.005525
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26978b08939270913183c7dd0c609cfa2e52874f
| 4,363
|
py
|
Python
|
reagent/gym/tests/test_gym_replay_buffer.py
|
dmitryvinn/ReAgent
|
f98825b9d021ec353a1f9087840a05fea259bf42
|
[
"BSD-3-Clause"
] | 1,156
|
2019-10-02T12:15:31.000Z
|
2022-03-31T16:01:27.000Z
|
reagent/gym/tests/test_gym_replay_buffer.py
|
dmitryvinn/ReAgent
|
f98825b9d021ec353a1f9087840a05fea259bf42
|
[
"BSD-3-Clause"
] | 448
|
2019-10-03T13:40:52.000Z
|
2022-03-28T07:49:15.000Z
|
reagent/gym/tests/test_gym_replay_buffer.py
|
dmitryvinn/ReAgent
|
f98825b9d021ec353a1f9087840a05fea259bf42
|
[
"BSD-3-Clause"
] | 214
|
2019-10-13T13:28:33.000Z
|
2022-03-24T04:11:52.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import numpy.testing as npt
from reagent.core.parameters import ProblemDomain
from reagent.gym.envs import Gym
from reagent.gym.envs.wrappers.simple_minigrid import SimpleObsWrapper
from reagent.gym.utils import create_df_from_replay_buffer
from reagent.preprocessing.sparse_to_dense import PythonSparseToDenseProcessor
from reagent.test.base.horizon_test_base import HorizonTestBase
logger = logging.getLogger(__name__)
class TestEnv(SimpleObsWrapper):
"""
Wrap Gym environment in TestEnv to save the MiniGrid's
observation, action, reward and terminal in a list so that
we can check if replay buffer is working correctly
"""
def __init__(self, env):
self.env = env
self.action_space = self.env.action_space
# mdp_id, sequence_number, state, action, reward, terminal
self.sart = []
self.mdp_id = -1
self.sequence_number = 0
def seed(self, *args, **kwargs):
return self.env.seed(*args, **kwargs)
def reset(self, **kwargs):
self.mdp_id += 1
self.sequence_number = 0
res = self.env.reset(**kwargs)
self.sart.append([self.mdp_id, self.sequence_number, res, None, None, None])
return res
def step(self, action):
res = self.env.step(action)
(
_,
_,
last_state,
last_action,
last_reward,
last_terminal,
) = self.sart[-1]
assert (
last_state is not None
and last_action is None
and last_reward is None
and last_terminal is None
)
next_state, reward, terminal, _ = res
self.sart[-1][3] = action
self.sart[-1][4] = reward
self.sart[-1][5] = terminal
self.sequence_number += 1
self.sart.append(
[self.mdp_id, self.sequence_number, next_state, None, None, None]
)
return res
class TestGymReplayBuffer(HorizonTestBase):
def test_create_df_from_replay_buffer(self):
env_name = "MiniGrid-Empty-5x5-v0"
env = Gym(env_name=env_name)
state_dim = env.observation_space.shape[0]
# Wrap env in TestEnv
env = TestEnv(env)
problem_domain = ProblemDomain.DISCRETE_ACTION
DATASET_SIZE = 1000
multi_steps = None
DS = "2021-09-16"
# Generate data
df = create_df_from_replay_buffer(
env=env,
problem_domain=problem_domain,
desired_size=DATASET_SIZE,
multi_steps=multi_steps,
ds=DS,
shuffle_df=False,
)
self.assertEqual(len(df), DATASET_SIZE)
# Check data
preprocessor = PythonSparseToDenseProcessor(list(range(state_dim)))
for idx, row in df.iterrows():
df_mdp_id = row["mdp_id"]
env_mdp_id = str(env.sart[idx][0])
self.assertEqual(df_mdp_id, env_mdp_id)
df_seq_num = row["sequence_number"]
env_seq_num = env.sart[idx][1]
self.assertEqual(df_seq_num, env_seq_num)
df_state = preprocessor.process([row["state_features"]])[0][0].numpy()
env_state = env.sart[idx][2]
npt.assert_array_equal(df_state, env_state)
df_action = row["action"]
env_action = str(env.sart[idx][3])
self.assertEqual(df_action, env_action)
df_terminal = row["next_action"] == ""
env_terminal = env.sart[idx][5]
self.assertEqual(df_terminal, env_terminal)
if not df_terminal:
df_reward = float(row["reward"])
env_reward = float(env.sart[idx][4])
npt.assert_allclose(df_reward, env_reward)
df_next_state = preprocessor.process([row["next_state_features"]])[0][
0
].numpy()
env_next_state = env.sart[idx + 1][2]
npt.assert_array_equal(df_next_state, env_next_state)
df_next_action = row["next_action"]
env_next_action = str(env.sart[idx + 1][3])
self.assertEqual(df_next_action, env_next_action)
else:
del env.sart[idx + 1]
| 33.821705
| 86
| 0.60165
| 543
| 4,363
| 4.593002
| 0.26151
| 0.020048
| 0.036087
| 0.017642
| 0.179631
| 0.092221
| 0.056135
| 0.056135
| 0.032879
| 0
| 0
| 0.014469
| 0.303003
| 4,363
| 128
| 87
| 34.085938
| 0.805656
| 0.082054
| 0
| 0.060606
| 0
| 0
| 0.02993
| 0.005282
| 0
| 0
| 0
| 0
| 0.10101
| 1
| 0.050505
| false
| 0
| 0.080808
| 0.010101
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
269a18ede77adaabe0e01c16057d910f3519fa89
| 30,573
|
py
|
Python
|
depparse.py
|
viadee/eric
|
680508cc5bf2d322638c6cf2c466a06c3c1f33d4
|
[
"BSD-3-Clause-Clear",
"Apache-2.0",
"CC0-1.0",
"BSD-3-Clause"
] | 4
|
2020-04-07T07:05:02.000Z
|
2020-09-23T14:23:16.000Z
|
depparse.py
|
viadee/eric
|
680508cc5bf2d322638c6cf2c466a06c3c1f33d4
|
[
"BSD-3-Clause-Clear",
"Apache-2.0",
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
depparse.py
|
viadee/eric
|
680508cc5bf2d322638c6cf2c466a06c3c1f33d4
|
[
"BSD-3-Clause-Clear",
"Apache-2.0",
"CC0-1.0",
"BSD-3-Clause"
] | 1
|
2021-12-27T03:00:44.000Z
|
2021-12-27T03:00:44.000Z
|
import pickle
import stanza
import test_stuff
from datetime import datetime
from dictionary import cd, dictionary, nlp_dictionary, ph_outcome, ph_key, ph_value, ph_dvalue, ph_subject
import eric_nlp
#does not do preprocessing
def depparse(sentences, pipeline):
output = ["OUTPUT:\n"]
roots = dict()
for sentence in sentences:
print(f"parsing sentence: '{sentence}'")
doc = pipeline(sentence)
#get max_width for pretty printing
max_width_word = 0
for word in sentence.split():
width = len(word)
if width > max_width_word:
max_width_word = width
append_data = []
for sent in doc.sentences:
sentence_words = ""
root = ""
max_width_deprel = 0
for word in sent.words:
if len(word.deprel) > max_width_deprel:
max_width_deprel = len(word.deprel)
for word in sent.words:
if word.head == 0:
root = word.text
append_data.append(f'id: {word.id}\tword: {word.text.ljust(max_width_word)}\tlemma: {word.lemma.ljust(max_width_word)}\tupos: {word.upos}\txpos: {word.xpos.ljust(3)}\thead id: {word.head}\thead: {sent.words[word.head-1].text.ljust(max_width_word) if word.head > 0 else "root".ljust(max_width_word)}\tdeprel: {word.deprel.ljust(max_width_deprel)}\tfeats: {word.feats}')
sentence_words += f"{word.text} "
#console and/or txt-file output
append_data.append("="*47 + "\n")
output.append(sentence_words)
output.append(f"Root: {root}")
output.extend(append_data)
if root.lower() in roots.keys():
roots[root.lower()] += 1
else:
roots[root.lower()] = 1
roots = {key: val for key, val in sorted(roots.items(), key=lambda item: item[1], reverse=True)}
print(output)
return output, roots
def init_stanza(lang):
print(f"loading stanza pipeline for language '{lang}'")
stanza.download(lang)
stanza_pipeline = stanza.Pipeline(lang=lang, processors="tokenize,mwt,pos,lemma,depparse")
print("successfully loaded stanza pipeline")
return stanza_pipeline
def init_stanza_from_pickle(filename):
with open(filename, "rb") as f:
stanza_pipeline = pickle.load(f)
return stanza_pipeline
'''
creates a matrix with:
columns: roots
rows: count how often that root occurs for a function
'''
def create_roots_matrix(roots, file_name, csv_sep = ";", empty_cell = "0"):
file_lines = []
first_line = f"{empty_cell}"
first = True
for root, functions in roots.items():
line = f"{root}"
tmp = [x["id"] for x in nlp_dictionary]
tmp.append("none")
for fct_id in tmp:
if first:
first_line += f"{csv_sep}{fct_id}"
if fct_id in functions.keys():
count = functions[fct_id]
else:
count = empty_cell
line += f"{csv_sep}{count}"
if first:
file_lines.append(first_line)
first = False
file_lines.append(line)
test_stuff.list_to_file(file_lines, file_name)
#all_roots is a dict from words to another dict from function ids to ints
#roots is expected to be a dict from words to ints
def extend_roots(all_roots, roots, fct_id):
for k, v in roots.items():
if k in all_roots.keys():
if fct_id in all_roots[k].keys():
print(f"DUPLICATE FUNCTION IN ROOTS: {fct_id} ; {k} ; {v}")
else:
all_roots[k][fct_id] = v
else:
print(f"adding new word: {k} from {fct_id} ;; {v}")
all_roots[k] = {fct_id: v}
return all_roots
#attempt 1: how many nodes do they share, regardless of node depth
def tree_compare_bad(tree1, tree2):
if len(tree1.words) < len(tree2.words):
small = tree1
big = tree2
else:
small = tree2
big = tree1
in_common = 0
used_ids = []
for leaf_s in small.words:
found_leaf_id = ""
for leaf_b in big.words:
if leaf_s.deprel == leaf_b.deprel and leaf_b.id not in used_ids:
found_leaf_id = leaf_b.id
break
if found_leaf_id:
in_common += 1
used_ids.append(found_leaf_id)
percentage = in_common * 100.0 / len(small.words)
return in_common, percentage
def tree_compare_bad_again(tree1, tree2):
bad_id = "0"
if len(tree1.words) < len(tree2.words):
small = tree1
big = tree2
else:
small = tree2
big = tree1
similar_counter = 0
used_ids = []
for word_b in big.words:
found_id = bad_id
for word_s in small.words:
if word_b.lemma == word_s.lemma and word_b.deprel == word_s.deprel and word_b.head == word_s.head and word_s.id not in used_ids:
found_id = word_s.id
if found_id != bad_id:
similar_counter += 1
used_ids.append(found_id)
percentage = similar_counter * 100.0 / len(small.words)
return similar_counter, percentage
#a tree is a list of dictionarys. every dictionary represents a word of the sentence. key-value-pairs are the attributes of that word.
def tree_compare(t1, t2):
return tree_compare_bad_again(t1, t2)
def get_word(wanted_id, words):
if wanted_id == "0":
return "root"
for word in words:
if word.id == wanted_id:
return word
return ""
'''
takes a tuple as in "deprel" in dictionary.nlp_dictionary.
returns list of tuples. if master_tuple was a simple tuple, the list only contains that tuple
if master_tuple has lists as elements, these get split so that every tuple in the returned list has only strings as elements
Example:
in: (["predict", "divinate"], "obl", ["data", "person"])
out: [
("predict", "obl", "data"),
("predict", "obl", "person"),
("divinate", "obl", "data"),
("divinate", "obl", "person")
]
note: returning list has x elements with x being the product of all three lengths. (here 2*1*2 = 4)
'''
def generate_sub_tuples(master_tuple):
ret_val = []
element_0 = master_tuple[0] if isinstance(master_tuple[0], list) else [master_tuple[0]]
element_1 = master_tuple[1] if isinstance(master_tuple[1], list) else [master_tuple[1]]
element_2 = master_tuple[2] if isinstance(master_tuple[2], list) else [master_tuple[2]]
for e_0 in element_0:
for e_1 in element_1:
for e_2 in element_2:
tpl = (e_0, e_1, e_2)
ret_val.append(tpl)
return ret_val
'''
takes a word-object of a depparse-word and a string element from a tuple (not a list-element. use generate_sub_tuples() first)
checks if dictionary.cd (by default "#") is in tuple_element. If so, it extracts which attribute (i.e. in front of "#") is wanted.
then returns the corresponding attribute value of word_object and the part right of "#" in tuple_element
if "#" was not in tuple_element, it returns tuple_element as it is and the default attribute of word_object
also needs an eric, to invoke replacement of placeholders
'''
def get_comparison_attributes(word_object, tuple_element, eric, default="text"):
#if word_object is a root_word, it will be a dictionary, as root words don't exist and are constructed synthetically in the function get_mother()
if isinstance(word_object, dict):
if cd in tuple_element:
splitted = tuple_element.split(cd)
ret_word_attribute = word_object[splitted[0]]
ret_tuple_attribute = splitted[1]
else:
ret_word_attribute = word_object[default]
ret_tuple_attribute = tuple_element
else:
if cd in tuple_element:
splitted = tuple_element.split(cd)
ret_word_attribute = getattr(word_object, splitted[0])
ret_tuple_attribute = splitted[1]
else:
ret_word_attribute = getattr(word_object, default)
ret_tuple_attribute = tuple_element
ret1, ret2 = replace_depparse_placeholders(ret_word_attribute, ret_tuple_attribute, eric)
return ret1, ret2
'''
word_attribute should be from the user input, tuple_attribute one element of a tuple from the depparse templates in dictionary.nlp_dictionary
it's called attribute, not element because it should only be called at the end of get_comparison_attributes() which extracts attributes from word objects (e.g. the lemma, upos or deprel, etc.)
word_attribute needs to be included even though it will not have any placeholders. In the case, that "<outcome>" is in tuple_attribute, word_attribute needs to be checked
if it is a different form of the possible outcomes. This gets checked via the eric.model_columns["class"]["phrasings"] dict which has all possible outcomes as keys (here "survived" and "died")
and stores different forms of those as the values of that dict as list. Here ["survive", "survives"] and ["die", "dies"].
'''
def replace_depparse_placeholders(word_attribute, tuple_attribute, eric):
ret_word_attribute, ret_tuple_attribute = word_attribute, tuple_attribute
if ret_tuple_attribute == ph_outcome:
if eric.placeholders[ph_outcome]:
ret_tuple_attribute = eric.placeholders[ph_outcome]
elif ret_tuple_attribute == ph_key:
is_in_placeholders = False
for k in eric.placeholders[ph_key].keys():
if k.lower() == ret_word_attribute.lower():
is_in_placeholders = True
break
if is_in_placeholders:
ret_tuple_attribute = ret_word_attribute
elif ret_tuple_attribute == ph_value:
is_in_placeholders = False
for v in eric.placeholders[ph_key].values():
if v and v.lower() == ret_word_attribute.lower():
is_in_placeholders = True
break
if is_in_placeholders:
ret_tuple_attribute = ret_word_attribute
return ret_word_attribute, ret_tuple_attribute
replace_depparse_placeholders("", "", "")
#looks for the head/mother node of word in tree and returns it (or a representing dictionary if head is root).
#returns dict since root is not really represented in the word objects of depparse
def get_mother(word, tree):
if word.head == 0:
return {
"id": "0",
"text": "root",
"lemma": "root",
"upos": None,
"xpos": None,
"head": None,
"deprel": None
}
else:
return tree.words[word.head-1]
#takes a depparse tree t and goes through the depparse tree templates in dictionary.nlp_dictionary
#returns a list of tuples (fct_id, tree template) with a tuple for every found match.
def get_matching_dictionary_trees(tree, eric):
mother_index = 0
deprel_index = 1
child_index = 2
all_matches = []
# test_stuff.logger(f"{tab*1}DADICT: {nlp_dictionary}")
for d in nlp_dictionary:
#test_stuff.logger(f"/////: {d['id'].upper()} ://///")
for depparse_template in d["depparse"]:
#test_stuff.logger(f"{tab*1}template: {depparse_template}")
used_words = [] #already matched words. saved to not use them twice
template_match = True #stays true unless at least one tuple in the demplate does not match
match_sub_tuples = [] #stores the sub_tuples that matched in this template. So when a total match is achieved, the used subtuples can be viewed
#if a depparse template is an empty list, it would always match, so skip it. this should never happen, if dictionary was created properly, but just to be safe
if len(depparse_template) == 0:
continue
for template_tuple in depparse_template:
#test_stuff.logger(f"{tab*2}tuple: {template_tuple}")
tuple_correct = False
sub_tuples = generate_sub_tuples(template_tuple)
for sub_tuple in sub_tuples:
#test_stuff.logger(f"{tab*3}sub_tuple: {sub_tuple[mother_index]}, {sub_tuple[deprel_index]}, {sub_tuple[child_index]}")
sub_tuple_correct = False
for word in tree.words:
if word.id in used_words:
#test_stuff.logger(f"{tab*4}{word.text.upper()}: >>>skipped<<<")
continue
#test_stuff.logger(f"{tab*4}{word.text.upper()}: id: {word.id} :: text: {word.text} :: lemma: {word.lemma} :: upos: {word.upos} :: xpos: {word.xpos} :: feats: {word.feats} :: head: {word.head} :: deprel: {word.deprel} :: misc: {word.misc}")
#the following get generated over function to use different attributes of the words (see function for more info)
child_val, tuple_child_val = get_comparison_attributes(word, sub_tuple[child_index], eric)
deprel_val, tuple_deprel_val = get_comparison_attributes(word, sub_tuple[deprel_index], eric, default="deprel")
#test_stuff.logger(f"{tab*5}vals: {child_val},{tuple_child_val}, {deprel_val}, {tuple_deprel_val}")
child_matched = True if child_val.lower() == tuple_child_val.lower() else False
deprel_matched = True if deprel_val.lower() == tuple_deprel_val.lower() else False
#just to not look up the mother if the match already failed
if child_matched and deprel_matched:
mother = get_mother(word, tree)
mother_val, tuple_mother_val = get_comparison_attributes(mother, sub_tuple[mother_index], eric)
mother_matched = True if mother_val.lower() == tuple_mother_val.lower() else False
else:
mother_matched = False
#if all three categories are a match, the subtuple is a match
if child_matched and deprel_matched and mother_matched:
used_words.append(word.id)
sub_tuple_correct = True
break #no need to match the other words. match next tuple instead
#if one of the sub_tuples is correct it's a match for the whole tuple, so no need to match the others
if sub_tuple_correct:
match_sub_tuples.append(sub_tuple)
tuple_correct = True
break
#if one tuple in a template does not match, the whole template does not match, so no need to go on
if not tuple_correct:
template_match = False
break
#collect all template matches
if template_match:
tmp = (d["id"], match_sub_tuples)
all_matches.append(tmp)
#returns a list of tuples with two elements each: 1st fct_id, 2nd the tree template that matched, i.e. a list of tuples
#largest template tree will be element 0
if eric.prioritise_negation:
ret_val = prioritise_negation(all_matches)
else:
ret_val = sorted(all_matches, key=lambda item: len(item[1]), reverse=True)
return ret_val
#expects a list of tuples with two elements each: 1st fct_id, 2nd the tree template that matched, i.e. a list of tuples
#that list should represend a ranking from most likely (lowest index) to least likey (highest index)
#it then goes through all templates and sorts them into templates that contain a lemma:not and and those that do not
#then creates a ranking again for both, separately
#then, both lists get concatenated with the negated tuples at the lower indices. So a short but negated template will have priority over a longer, non-negated one
#returns that list
def prioritise_negation(templates_list):
negated_tuples = []
non_negated_tuples = []
for template in templates_list:
negated = False
for tpl in template[1]:
head = tpl[0]
child = tpl[2]
if isinstance(head, list):
if f"lemma{cd}not" in head or "not" in head:
negated = True
break
else:
if f"lemma{cd}not" == head or "not" == head:
negated = True
break
if isinstance(child, list):
if f"lemma{cd}not" in child or "not" in child:
negated = True
break
else:
if f"lemma{cd}not" == child or "not" == child:
negated = True
break
if negated:
negated_tuples.append(template)
else:
non_negated_tuples.append(template)
negated_tuples = sorted(negated_tuples, key=lambda item: len(item[1]), reverse=True)
non_negated_tuples = sorted(non_negated_tuples, key=lambda item: len(item[1]), reverse=True)
ranked_list = negated_tuples + non_negated_tuples
return ranked_list
#t is a tree like in tree_compare(t1, t2)
def dictionary_templates_test(tree):
#indices of tuples in templates
tmother = 0 #mother node
tdeprel = 1 #dependency relation
tchild = 2 #child node
root = ""
for x in tree.words:
if x.head == 0:
root = x
break
if not root:
test_stuff.logger("no root found:")
test_stuff.logger(tree.words)
#test_stuff.logger("Testing Tree:")
for d in nlp_dictionary:
test_stuff.logger(f"MATCHING TO {d['id']}")
if "depparse" not in d.keys():
continue
for dep_template in d["depparse"]:
correct_tupel_counter = 0 #if correct match, correct_tupel_counter should be equal to the number of elements in dep_template
#test_stuff.logger(f"\t\t template {template_counter}")
for tup in dep_template:
found_mother = False
found_child = False
found_deprel = False
#test_stuff.logger(f"\t\t\t{tup}")
child_is_list = True if isinstance(tup[tchild], list) else False
deprel_is_list = True if isinstance(tup[tdeprel], list) else False
if tup[tmother] == "root":
root_correct = False
if child_is_list:
if root.text in tup[tchild]:
root_correct = True
elif root.text == tup[tchild]:
root_correct = True
#else:
#test_stuff.logger(f"\t\t\t\t {root.text} != {tup[tmother]}")
if root_correct:
found_mother = True
found_child = True
found_deprel = True
else:
#see if you find current tuple in t
for word in tree.words:
#check if word is a child node
if child_is_list:
if word.text in tup[tchild]:
found_child = True
else:
if word.text == tup[tchild]:
found_child = True
#check if mother and deprel match
#mother is a dictionary, just like a word
mother = get_word(f"{word.head}", tree.words)
if isinstance(mother, str):
mother_text = mother
else:
mother_text = mother.text
found_mother = True
if mother_text == tup[tmother]:
#check if deprel matches
if deprel_is_list:
if word.deprel in tup[tdeprel]:
found_deprel = True
else:
if word.deprel == tup[tdeprel]:
found_deprel = True
if found_mother and found_deprel and found_child:
break
if found_mother and found_deprel and found_child:
#test_stuff.logger("\t\t\t\t\t Tupel correct!")
correct_tupel_counter += 1
if correct_tupel_counter == len(dep_template):
#test_stuff.logger(f"///Found match ({d['id']}): {dep_template}\n")
return f"///Found match: {dep_template}\n"
else:
#test_stuff.logger(f"NO MATCH. mother: {found_mother}, deprel: {found_deprel}, child: {found_child}")
'''
("root", "root", "predicted"),
("predicted", "nsubj:pass", f"upos{category_tag}NOUN")
'''
def sentence_similarity(sent1, sent2, pipeline):
t1 = pipeline(sent1).sentences[0]
t2 = pipeline(sent2).sentences[0]
total, percent = tree_compare(t1, t2)
return total, percent
def print_depparsed_sentences(sentences, language="en", pipeline=""):
if not pipeline:
pipeline = init_stanza(language)
if isinstance(sentences, str):
sentences = [sentences]
output, _ = depparse(sentences, pipeline)
for i, o in enumerate(output):
print(f"{i}: {o}")
def debug_depparsed_sentences_to_console():
pipeline = init_stanza("de")
eric = eric_nlp.Eric_nlp()
sentence_list = ["Used sentences:"]
print("Please provide input:")
while True:
# for usr_in in whiletrue:
usr_in = input()
if not usr_in:
print("no input given")
continue
elif usr_in.lower() in ["exit", "exit()", "quit", "quit()", "end", "end()"]:
break
sentence_list.append(usr_in)
preprocessed = eric.preprocessing(usr_in, "usr_input")
print(f"preprocessed: {preprocessed}")
out, _ = depparse([preprocessed], pipeline)
root = ""
for o in out:
if "id: 0" in o:
finder = "word: "
ender = "lemma: "
index = o.find(finder) + len(finder)
index_end = o.find(ender)
root = o[index:index_end].strip()
if not root:
root = "root not found"
print(f"Root: {root}")
for o in out[3:]:
print(o)
print("Goodbye")
for sent in sentence_list:
print(sent)
def main():
debug_depparsed_sentences_to_console
quit()
input_language = "en"
stanza_pipeline = init_stanza(input_language)
eric = eric_nlp.Eric_nlp()
input_path = "data\\"
input_files = [f"{input_path}umfrage_input_{x}_cleaned.txt" for x in range(1,5)]
input_files.append(f"{input_path}manually_added.txt")
output_path = "output\\depparse\\data_analysis\\"
roots_out_file = f"{output_path}roots.csv"
input_accumulated = test_stuff.merge_input_files(input_files)#{x["id"]: x["key_sentences"] for x in nlp_dictionary}
input_accumulated = list(set(input_accumulated))
input_accumulated_as_dict = {}
for x in input_accumulated:
if x[0] in input_accumulated_as_dict.keys():
input_accumulated_as_dict[x[0]].append(x[1])
else:
input_accumulated_as_dict[x[0]] = [x[1]]
all_roots = dict() #keys are root words and the values are dicts where the keys are the function_id
for fct_id, unpreprocessed_sentences in input_accumulated_as_dict.items():
preprocessed_sentences = [eric.preprocessing(x, "usr_input") for x in unpreprocessed_sentences]
dep_output, roots = depparse(preprocessed_sentences, stanza_pipeline)
preface = [f"{v}: {k}" for k, v in roots.items()]
#extend all_roots
all_roots = extend_roots(all_roots, roots, fct_id)
all_output = ["Used Input:"] + input_files + ["\n"] + preface + dep_output
for o in all_output:
print(o)
create_roots_matrix(all_roots, roots_out_file, empty_cell="")
print(all_roots)
#for infi in input_files:
# input_data =
# test_input = [x[1] for x in test_stuff.read_input_from_file(f[0])]
# test_output = depparse("en", test_input)
# test_stuff.list_to_file(test_output, f[1])
def read_sentences_from_output(output_file):
stop_words = ["OUTPUT:", "Root:", "id:"]
file_lines = test_stuff.get_file_lines(output_file)
sentences = list()
for line in file_lines:
if line != "" and not line[0].isdigit() and line[0] != "=":
splitted = line.split()
if splitted[0] not in stop_words:
sentences.append(line)
return list(set(sentences))
'''
if you thought of new sentence while analysing the output and just depparsed them over debug console and included them in the output_file,
this function will help. It can read your originally used input again, then the output file, compare sentences and store all new ones, i.e. the manually analysed sentences in a new input_file.
Also, it will then overwrite the output file to update the root counts
'''
def update_depparse_output(input_files, output_file_overwrite, passed_fct_id, output_file_new_sentences="data\\manually_added.txt", sp=""):
#input_accumulated.extend([("why", "Why did you predict this outcome?"), ("why", "Why did you predict the outcome?")])
#1 get all three as dictionaries {passed_fct_id: list of sentences}
#1.1 originally used input
lines = test_stuff.merge_input_files(input_files)
lines = list(set(lines))
input_accumulated = convert_input_tuples_to_dict(lines)
#1.2 modified output
lines = read_sentences_from_output(output_file_overwrite)
output_accumulated = {passed_fct_id: lines}
#1.3 existing manually added sentences
lines = test_stuff.merge_input_files([output_file_new_sentences])
lines = list(set(lines))
manual_accumulated = convert_input_tuples_to_dict(lines)
#2 look for sentences in output_accumulated, that do not exist in input_accumulated and append these to manual_accumulated if they not already exist there
eric = eric_nlp.Eric_nlp()
for fct_id, sentences in output_accumulated.items():
if fct_id in input_accumulated.keys():
preprocessed_inputs = [eric.preprocessing(x, "usr_input") for x in input_accumulated[fct_id]]
for sent in sentences:
sentence = eric.preprocessing(sent, "usr_input")
if sentence not in preprocessed_inputs:
if fct_id in manual_accumulated.keys():
if sentence not in manual_accumulated[fct_id]:
manual_accumulated[fct_id].append(sentence)
else:
manual_accumulated[fct_id] = [sentence]
else:
#all are new sentences
if fct_id in manual_accumulated.keys():
if sentence not in manual_accumulated[fct_id]:
manual_accumulated[fct_id].append(sentence)
else:
manual_accumulated[fct_id] = [sentence]
#4 write manual_accumulated to data\\manually_added.txt (or sth else, if argument was given)
out= []
for fct_id, sentences in manual_accumulated.items():
out.append(f"[{fct_id}]")
out.extend(sentences)
out.append("")
test_stuff.list_to_file(out, output_file_new_sentences)
#5 update the output file
#5.1 get all sentences for fct_id from manually_added.txt and the input files
if not sp:
sp = init_stanza("en")
all_sentences = []
if passed_fct_id in manual_accumulated.keys():
all_sentences.extend(manual_accumulated[passed_fct_id])
if passed_fct_id in input_accumulated.keys():
all_sentences.extend(input_accumulated[passed_fct_id])
all_sentences = [eric.preprocessing(x, "usr_input") for x in all_sentences]
out, roots = depparse(all_sentences, sp)
preface = [f"{v}: {k}" for k, v in roots.items()]
all_out = preface + out
test_stuff.list_to_file(all_out, output_file_overwrite)
def convert_input_tuples_to_dict(input_tuples):
ret_val = dict()
for fct_id, sentence in input_tuples:
if fct_id in ret_val.keys():
if sentence not in ret_val[fct_id]:
ret_val[fct_id].append(sentence)
else:
ret_val[fct_id] = [sentence]
return ret_val
def test_some_sentences():
sp = init_stanza("en")
sentences = []
words = ["more", "less", "lower", "higher", "greater"]
more = [f"what if fare was {x} than 300 instead" for x in words]
sentences.extend(more)
more = [f"what if age was {x} than 44 instead" for x in words]
sentences.extend(more)
more = [f"what if age was {x} 44" for x in ["over", "under"]]
sentences.extend(more)
more = [f"what if age was {x}" for x in words]
sentences.extend(more)
out, _ = depparse(sentences, sp)
for o in out:
print(o)
if __name__ == "__main__":
#main()
debug_depparsed_sentences_to_console()
quit()
lines = test_stuff.read_input_from_file("data\\wrongly_accused.txt")
sentences = [x[1] for x in lines]
for s in sentences:
print(s)
print("//////////")
sp = init_stanza("en")
out, root = depparse(sentences, sp)
test_stuff.list_to_file(out, "output\\depparse\\wrongly_accused_out.txt")
quit()
#test_some_sentences()
for d in nlp_dictionary:
print(d["id"])
try:
x = d['depparse'][0]
print("\t---")
except Exception as e:
print("\tNOTHING")
sp = init_stanza("en")
input_files = [f"data\\umfrage_input_{x}_cleaned.txt" for x in range(1,5)]
fct = "whatif-gl"
update_depparse_output(input_files, f"output\\depparse\\{fct}.txt", fct, "data\\manually_added.txt", sp=sp)
| 40.387054
| 384
| 0.603735
| 4,023
| 30,573
| 4.391996
| 0.125031
| 0.011602
| 0.015281
| 0.012678
| 0.2707
| 0.185579
| 0.138152
| 0.108495
| 0.099666
| 0.079065
| 0
| 0.007568
| 0.30419
| 30,573
| 756
| 385
| 40.440476
| 0.823015
| 0.165604
| 0
| 0.26975
| 0
| 0.001927
| 0.07377
| 0.025234
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046243
| false
| 0.011561
| 0.011561
| 0.001927
| 0.098266
| 0.044316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
269ad28a75a19ae401ecbe624997f530c5904d6d
| 706
|
py
|
Python
|
ch03/simple_cbow_pytorch.py
|
tomy-0000/deep-learning-from-scratch-2
|
3d3d7fd614b8021499ffc103199be5e32622717e
|
[
"MIT"
] | null | null | null |
ch03/simple_cbow_pytorch.py
|
tomy-0000/deep-learning-from-scratch-2
|
3d3d7fd614b8021499ffc103199be5e32622717e
|
[
"MIT"
] | null | null | null |
ch03/simple_cbow_pytorch.py
|
tomy-0000/deep-learning-from-scratch-2
|
3d3d7fd614b8021499ffc103199be5e32622717e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import torch.nn as nn
class SimpleCBOW(nn.Module):
def __init__(self, vocab_size, hidden_size):
super(SimpleCBOW, self).__init__()
V, H = vocab_size, hidden_size
self.in_layer = nn.Linear(V, H, bias=False)
self.out_layer = nn.Linear(H, V, bias=False)
self.loss_layer = nn.CrossEntropyLoss()
def forward(self, contexts, target):
h0 = self.in_layer(contexts[:, 0])
h1 = self.in_layer(contexts[:, 1])
h = (h0 + h1) * 0.5
score = self.out_layer(h)
loss = self.loss_layer(score, target)
return loss
@property
def word_vecs(self):
return self.in_layer.weight.detach().numpy().T
| 27.153846
| 54
| 0.609065
| 100
| 706
| 4.09
| 0.44
| 0.05868
| 0.107579
| 0.09291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 0.260623
| 706
| 25
| 55
| 28.24
| 0.766284
| 0.018414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.055556
| 0.055556
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
269c16b6afd598ff0e05a59d38e14e46ebde748b
| 7,814
|
py
|
Python
|
modules/input_output.py
|
nicolasying/WordNet-Embeddings
|
a6a5782dca97376e487df41fb83542729f284197
|
[
"MIT"
] | null | null | null |
modules/input_output.py
|
nicolasying/WordNet-Embeddings
|
a6a5782dca97376e487df41fb83542729f284197
|
[
"MIT"
] | null | null | null |
modules/input_output.py
|
nicolasying/WordNet-Embeddings
|
a6a5782dca97376e487df41fb83542729f284197
|
[
"MIT"
] | null | null | null |
# coding=utf-8
#! /usr/bin/env python3.4
"""
MIT License
Copyright (c) 2018 NLX-Group
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This code reads wordnet data and index files
data_file_reader(file_name):
extract data from wordnet data files saved in "data/input" directory
output is
1- a dictionary with
key = synsetoffsets
data = (synsetWrds, synsetConnections, synsetRelationTypes, connectedSynsetPos, gloss)
2- and offset_list
Chakaveh.saedi@di.fc.ul.pt
"""
import os, sys
import numpy as np
from progressbar import ProgressBar, Percentage, Bar
def data_file_reader(file_name, lang):
print(" Working on " + file_name)
if lang == "Dutch":
path = os.getcwd() + '/data/input/Dutch_wnet/'
elif lang == "Portuguese":
path = os.getcwd() + '/data/input/Portuguese_wnet/'
else:
path = os.getcwd() + '/data/input/English_wnet/'
fl = open(path + file_name)
src = fl.readlines()
fl.close()
file_data = {}
offset_list = []
all_word = set()
amb_word = set()
for lineNum in range(len(src)):
dataLine = src[lineNum]
if dataLine[0:2] == " ": #or " 000 " in dataLine: # comments or synset with no relations
continue
else:
synsetWrds = []
synsetConnections = []
synsetRelationTypes = []
connectedSynsetPos = []
dataLineParts = dataLine.split(" ")
wrdCnt = int(dataLineParts[3], 16)
indx = 4
for i in range(wrdCnt):
synsetWrds.append(dataLineParts[indx])
"""
if dataLineParts[indx] not in all_word:
all_word.add(dataLineParts[indx])
else:
amb_word.add(dataLineParts[indx])
"""
indx += 2
connCnt = int(dataLineParts[indx])
indx += 1
for i in range(connCnt):
synsetRelationTypes.append(dataLineParts[indx])
indx += 1
synsetConnections.append(dataLineParts[indx])
indx += 1
connectedSynsetPos.append(dataLineParts[indx])
indx += 1
# the next field is 0000 or 000
indx += 1
gloss = dataLine.split("|")[1]
gloss = gloss.replace("\n","")
gloss = gloss.replace("'","''")
data = (synsetWrds, synsetConnections, synsetRelationTypes, connectedSynsetPos, gloss)
file_data.update({dataLineParts[0]:data})
offset_list.append(dataLineParts[0])
#if dataLineParts[0] in synsetConnections:
# print(" self loop", dataLineParts[0])
#print("number of extracted words: ", len(all_word), ", ", len(amb_word), "of which are ambiguous")
return file_data, offset_list
def emb_writer(emb_matrix, word_list, dim, iter, feature_name, for_WSD, main_path):
try:
if emb_matrix == []:
print("no changes was made to the previously saved file")
else:
out_file = open(main_path + "embeddings_" + iter + ".txt", "w")
out_file.write("%d %d\n" % (len(word_list), dim))
if "pyspark" not in str(type(emb_matrix)):
if dim > len(emb_matrix[0]):
dim = len(emb_matrix[0])
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(word_list))
for i in pbar(range(len(word_list))):
if for_WSD:
wrd = word_list[i].split("\t")[0]
else:
wrd = word_list[i]
emb = ""
for j in range(dim):
emb += str(emb_matrix[i][j]) + " "
emb += "\n"
emb = emb.replace(" \n", "\n")
out_file.write(wrd + " " + emb)
else:
i = 0
for row in emb_matrix.collect():
wrd = word_list[i].split("\t")[0]
i += 1
emb = row.asDict()
out_file.write(wrd + " " + str(emb[feature_name]).replace("[","").replace("]","").replace(","," ") + "\n")
out_file.close()
print("\n-------------------------------------------------------------")
print("Vector Embeddings are created and saved in \data\output folder")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("Unexpected error:", exc_value)
def array_writer(matrix, fname, type, main_path):
try:
print (" Saving %s data into a file"%(fname))
path = main_path + fname
if type == "txt":
np.savetxt(path, matrix)
else:
np.save(path, matrix)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("Unexpected error:", exc_value)
print(" COULDN'T SAVE THE %s FILE"%(fname))
def array_loader(fname, main_path):
path = main_path + fname + ".npy"
mat_data = np.load(path)
return(mat_data)
def info_writer(dim,wrd_cnt, non_zero, for_WSD, main_path):
path = main_path + 'last_run_info'
info = open(path,"w")
info.write("dim: %d\n" % (dim[0]))
info.write("for_WSD: %s\n" % (str(for_WSD)))
info.write("wrd_cnt: %d\n" % (wrd_cnt))
info.write("non_zero: %d\n" % (non_zero))
info.close()
def info_reader(main_path):
path = main_path+'last_run_info'
info = open(path)
data = info.readlines()
info.close()
dim = data[0].split(" ")[1].replace("\n","")
for_WSD = data[1].split(" ")[1].replace("\n","")
if for_WSD == "True":
for_WSD = True
else:
for_WSD = False
wrd_cnt = data[2].split(" ")[1].replace("\n","")
non_zero = data[3].split(" ")[1].replace("\n","")
return dim, for_WSD, wrd_cnt,non_zero
def log_writer(log, description, only_one_word, only_once, equal_weight, for_WSD, accepted_rel, iter, vec_dim):
try:
log.write("Only one word from each synset: %s \n" %(only_one_word))
log.write("Only one sense of each word: %s\n" %(only_once))
log.write("Equal weight for different relation types: %s\n" %(str(equal_weight)))
log.write("Different vectors for each sense of ambiguous words: %s \n" %(str(for_WSD)))
log.write("Accepted relations: %s \n" %(str(accepted_rel)))
log.write("Random walk method (infinite or itterative): %s \n" %(iter))
log.write("Vector dimension: %d\n" % (vec_dim))
if description != "":
log.write("Description: %s\n" % (description))
log.write("\n-----------------------------\n")
except:
print(" COULDN'T UPDATE THE LOG FILE")
| 36.344186
| 126
| 0.572562
| 972
| 7,814
| 4.486626
| 0.291152
| 0.01651
| 0.024077
| 0.020179
| 0.15524
| 0.094474
| 0.060995
| 0.052282
| 0.052282
| 0.052282
| 0
| 0.009253
| 0.294599
| 7,814
| 214
| 127
| 36.514019
| 0.78193
| 0.222805
| 0
| 0.20438
| 0
| 0
| 0.152505
| 0.029407
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051095
| false
| 0
| 0.021898
| 0
| 0.087591
| 0.065693
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
269f222cd807eb64aa23f3a0beb347f29492e7b2
| 4,089
|
py
|
Python
|
dqc/utils/safeops.py
|
Jaikinator/dqc
|
47c964c7d1323a35f4f69521d40476c41843810e
|
[
"Apache-2.0"
] | 39
|
2021-05-31T17:01:23.000Z
|
2022-03-23T19:20:35.000Z
|
dqc/utils/safeops.py
|
Jaikinator/dqc
|
47c964c7d1323a35f4f69521d40476c41843810e
|
[
"Apache-2.0"
] | 14
|
2021-09-01T13:39:11.000Z
|
2022-03-13T16:45:39.000Z
|
dqc/utils/safeops.py
|
Jaikinator/dqc
|
47c964c7d1323a35f4f69521d40476c41843810e
|
[
"Apache-2.0"
] | 6
|
2021-07-16T09:08:29.000Z
|
2022-02-24T01:13:54.000Z
|
import math
import torch
from typing import Union, Optional, Tuple
from dqc.utils.datastruct import ZType
eps = 1e-12
########################## safe operations ##########################
def safepow(a: torch.Tensor, p: torch.Tensor, eps: float = 1e-12) -> torch.Tensor:
if torch.any(a < 0):
raise RuntimeError("safepow only works for positive base")
base = torch.sqrt(a * a + eps * eps) # soft clip
return base ** p
def safenorm(a: torch.Tensor, dim: int, eps: float = 1e-15) -> torch.Tensor:
# calculate the 2-norm safely
return torch.sqrt(torch.sum(a * a + eps * eps, dim=dim))
########################## occupation number gradients ##########################
def occnumber(a: ZType,
n: Optional[int] = None,
dtype: torch.dtype = torch.double,
device: torch.device = torch.device('cpu')) -> torch.Tensor:
# returns the occupation number (maxed at 1) where the total sum of the
# output equals to a with length of the output is n
def _get_floor_and_ceil(aa: Union[int, float]) -> Tuple[int, int]:
# get the ceiling and flooring of aa
if isinstance(aa, int):
ceil_a: int = aa
floor_a: int = aa
else: # floor
ceil_a = int(math.ceil(aa))
floor_a = int(math.floor(aa))
return floor_a, ceil_a
if isinstance(a, torch.Tensor):
assert a.numel() == 1
floor_a, ceil_a = _get_floor_and_ceil(a.item())
else: # int or float
floor_a, ceil_a = _get_floor_and_ceil(a)
# get the length of the tensor output
if n is None:
nlength = ceil_a
else:
nlength = n
assert nlength >= ceil_a, "The length of occupation number must be at least %d" % ceil_a
if isinstance(a, torch.Tensor):
res = _OccNumber.apply(a, floor_a, ceil_a, nlength, dtype, device)
else:
res = _construct_occ_number(a, floor_a, ceil_a, nlength, dtype=dtype, device=device)
return res
def _construct_occ_number(a: float, floor_a: int, ceil_a: int, nlength: int,
dtype: torch.dtype, device: torch.device) -> torch.Tensor:
res = torch.zeros(nlength, dtype=dtype, device=device)
res[:floor_a] = 1
if ceil_a > floor_a:
res[ceil_a - 1] = a - floor_a
return res
class _OccNumber(torch.autograd.Function):
@staticmethod
def forward(ctx, a: torch.Tensor, # type: ignore
floor_a: int, ceil_a: int, nlength: int,
dtype: torch.dtype, device: torch.device) -> torch.Tensor:
res = _construct_occ_number(float(a.item()), floor_a, ceil_a, nlength, dtype=dtype, device=device)
ctx.ceil_a = ceil_a
return res
@staticmethod
def backward(ctx, grad_res: torch.Tensor): # type: ignore
grad_a = grad_res[ctx.ceil_a - 1]
return (grad_a,) + (None,) * 5
########################## other tensor ops ##########################
def safe_cdist(a: torch.Tensor, b: torch.Tensor, add_diag_eps: bool = False,
diag_inf: bool = False):
# returns the L2 pairwise distance of a and b
# a: (*BA, na, ndim)
# b: (*BB, nb, ndim)
# returns: (*BAB, na, nb)
square_mat = a.shape[-2] == b.shape[-2]
dtype = a.dtype
device = a.device
ab = a.unsqueeze(-2) - b.unsqueeze(-3) # (*BAB, na, nb, ndim)
# add the diagonal with a small eps to safeguard from nan
if add_diag_eps:
if not square_mat:
raise ValueError("Enabling add_diag_eps for non-square result matrix is invalid")
ab = ab + torch.eye(ab.shape[-2], dtype=dtype, device=device).unsqueeze(-1) * eps
ab = ab.norm(dim=-1) # (*BAB, na, nb)
# replace the diagonal with infinite (usually used for coulomb matrix)
if diag_inf:
if not square_mat:
raise ValueError("Enabling diag_inf for non-square result matrix is invalid")
infdiag = torch.eye(ab.shape[-1], dtype=dtype, device=device)
idiag = infdiag.diagonal()
idiag[:] = float("inf")
ab = ab + infdiag
return ab
| 36.837838
| 106
| 0.595011
| 580
| 4,089
| 4.072414
| 0.253448
| 0.042337
| 0.017782
| 0.027942
| 0.225233
| 0.212955
| 0.212955
| 0.118544
| 0.118544
| 0.061812
| 0
| 0.008581
| 0.258988
| 4,089
| 110
| 107
| 37.172727
| 0.770957
| 0.147713
| 0
| 0.194805
| 0
| 0
| 0.063746
| 0
| 0
| 0
| 0
| 0
| 0.025974
| 1
| 0.103896
| false
| 0
| 0.051948
| 0.012987
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
269fd1b0bc7030c4e1f6c761faa1320701f6d9dc
| 4,713
|
py
|
Python
|
extra_envs/extra_envs/envs/point.py
|
Fanshaoliu/safe_rl
|
16ab54bebb70a86a80e1bfadb62656afb1547965
|
[
"MIT"
] | 13
|
2021-06-19T03:19:36.000Z
|
2022-03-29T10:44:37.000Z
|
extra_envs/extra_envs/envs/point.py
|
Fanshaoliu/safe_rl
|
16ab54bebb70a86a80e1bfadb62656afb1547965
|
[
"MIT"
] | 5
|
2021-06-16T20:06:51.000Z
|
2021-12-14T22:55:54.000Z
|
extra_envs/extra_envs/envs/point.py
|
Fanshaoliu/safe_rl
|
16ab54bebb70a86a80e1bfadb62656afb1547965
|
[
"MIT"
] | 4
|
2021-11-03T13:30:08.000Z
|
2022-01-05T11:16:47.000Z
|
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class PointEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 30}
def __init__(self, mass=1., target_dist=5., xlim=2.5, cost_smoothing=0.):
self.mass = mass
self.dt = 0.1
self.target_dist = target_dist
self.world_width = 1.5*2*target_dist
self.max_speed = 2.
self.lim = np.array([xlim, self.world_width])
high_state = np.array([self.world_width, self.world_width, 1., 1.],
dtype=np.float32)
self.action_space = spaces.Box(low=-1., high=1., shape=(2,), dtype=np.float32)
self.observation_space = spaces.Box(low=-high_state, high=high_state,
dtype=np.float32)
self.reward_range = (-1., 1.)
self.cost_smoothing = cost_smoothing
self.seed()
self.state = None
self.viewer = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
posn = self.np_random.uniform(low=-0.1, high=0.1, size=2)
self.state = np.concatenate([posn, [0., 0.]]).astype(np.float32)
return np.array(self.state)
def get_state(self):
return np.array(self.state)
def step(self, a):
a = np.squeeze(a)
a = np.clip(a, self.action_space.low[0], self.action_space.high[0])
pos, vel = self.state[:2], self.state[2:]
rew = self.state[-2:].dot([-self.state[1], self.state[0]])
rew /= (1. + np.abs(np.linalg.norm(self.state[:2]) - self.target_dist))
# Normalizing to range [-1, 1]
rew /= self.max_speed*self.target_dist
# State update
pos += vel*self.dt + a*self.dt**2 / (2*self.mass)
vel += a*self.dt/self.mass
# Ensure agent is within reasonable range
vel[np.isclose(vel, 0)] = 0.
# Clip speed, if necessary
speed = np.linalg.norm(self.state[-2:])
if speed > self.max_speed:
self.state[-2:] *= self.max_speed / speed
done = (np.abs(pos) > self.lim).any() # constraint violation
distance = self.dist_to_unsafe()
cost = (float(distance == 0.)
if self.cost_smoothing == 0.
else max(0, 1 - distance/self.cost_smoothing))
info = dict(cost=cost, distance=distance)
return np.array(self.state), rew, done, info
def dist_to_unsafe(self):
return max(0, self.signed_dist_to_unsafe())
def signed_dist_to_unsafe(self):
return min(self.lim[0] - self.state[0], self.lim[0] + self.state[0],
self.lim[1] - self.state[1], self.lim[1] + self.state[1])
def render(self, mode='human'):
viewer_size = 500
center, scale = viewer_size // 2, viewer_size / self.world_width
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(viewer_size, viewer_size)
self.ring_trans = rendering.Transform((viewer_size/2, viewer_size/2))
self.ring = rendering.make_circle(self.target_dist*scale, res=100, filled=False)
self.ring.set_color(0., 0.8, 0.)
self.ring.add_attr(self.ring_trans)
self.viewer.add_geom(self.ring)
self.left_boundary = rendering.Line(start=(center - scale*self.lim[0], 0),
end=(center - scale*self.lim[0],
viewer_size))
self.left_boundary.set_color(0.8, 0., 0.)
self.viewer.add_geom(self.left_boundary)
self.right_boundary = rendering.Line(start=(center + scale*self.lim[0], 0),
end=(center + scale*self.lim[0],
viewer_size))
self.right_boundary.set_color(0.8, 0., 0.)
self.viewer.add_geom(self.right_boundary)
self.agent = rendering.make_circle(scale*0.1, res=100)
self.agent_trans = rendering.Transform((viewer_size/2, viewer_size/2))
self.agent.add_attr(self.agent_trans)
self.viewer.add_geom(self.agent)
if self.state is None:
return None
posn = self.state[:2]
self.agent_trans.set_translation(center + scale*posn[0], center + scale*posn[1])
return self.viewer.render(return_rgb_array=(mode == 'rgb_array'))
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
| 38.317073
| 92
| 0.569064
| 637
| 4,713
| 4.072214
| 0.205651
| 0.065921
| 0.026985
| 0.021588
| 0.250193
| 0.216654
| 0.14919
| 0.14919
| 0.132614
| 0.132614
| 0
| 0.030862
| 0.298748
| 4,713
| 122
| 93
| 38.631148
| 0.754009
| 0.026947
| 0
| 0.086957
| 0
| 0
| 0.013755
| 0.005022
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097826
| false
| 0
| 0.054348
| 0.032609
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26aabfb0114adf3aa767a0e26c7a937d741efc5e
| 9,018
|
py
|
Python
|
factom_core/blocks/entry_credit_block.py
|
sourcery-ai-bot/factom-core
|
186dca550d98d758e9f8dab878e6382153efeaf3
|
[
"MIT"
] | null | null | null |
factom_core/blocks/entry_credit_block.py
|
sourcery-ai-bot/factom-core
|
186dca550d98d758e9f8dab878e6382153efeaf3
|
[
"MIT"
] | null | null | null |
factom_core/blocks/entry_credit_block.py
|
sourcery-ai-bot/factom-core
|
186dca550d98d758e9f8dab878e6382153efeaf3
|
[
"MIT"
] | null | null | null |
import hashlib
import struct
from dataclasses import dataclass, field
from typing import Dict, List, Union
from factom_core.block_elements.balance_increase import BalanceIncrease
from factom_core.block_elements.chain_commit import ChainCommit
from factom_core.block_elements.entry_commit import EntryCommit
from factom_core.utils import varint
from .directory_block import DirectoryBlock
ECIDTypes = Union[ChainCommit, EntryCommit, int]
@dataclass
class EntryCreditBlockHeader:
CHAIN_ID = bytes.fromhex("000000000000000000000000000000000000000000000000000000000000000c")
body_hash: bytes
prev_header_hash: bytes
prev_full_hash: bytes
height: int
expansion_area: bytes
object_count: int
body_size: int
def __post_init__(self):
# TODO: value assertions
pass
def marshal(self) -> bytes:
buf = bytearray()
buf.extend(EntryCreditBlockHeader.CHAIN_ID)
buf.extend(self.body_hash)
buf.extend(self.prev_header_hash)
buf.extend(self.prev_full_hash)
buf.extend(struct.pack(">I", self.height))
buf.extend(varint.encode(len(self.expansion_area)))
buf.extend(self.expansion_area)
buf.extend(struct.pack(">Q", self.object_count))
buf.extend(struct.pack(">Q", self.body_size))
return bytes(buf)
@classmethod
def unmarshal(cls, raw: bytes):
h, data = EntryCreditBlockHeader.unmarshal_with_remainder(raw)
assert len(data) == 0, "Extra bytes remaining!"
return h
@classmethod
def unmarshal_with_remainder(cls, raw: bytes):
chain_id, data = raw[:32], raw[32:]
assert chain_id == EntryCreditBlockHeader.CHAIN_ID
body_hash, data = data[:32], data[32:]
prev_header_hash, data = data[:32], data[32:]
prev_full_hash, data = data[:32], data[32:]
height, data = struct.unpack(">I", data[:4])[0], data[4:]
header_expansion_size, data = varint.decode(data)
header_expansion_area, data = (
data[:header_expansion_size],
data[header_expansion_size:],
)
object_count, data = struct.unpack(">Q", data[:8])[0], data[8:]
body_size, data = struct.unpack(">Q", data[:8])[0], data[8:]
return (
EntryCreditBlockHeader(
body_hash=body_hash,
prev_header_hash=prev_header_hash,
prev_full_hash=prev_full_hash,
height=height,
expansion_area=header_expansion_area,
object_count=object_count,
body_size=body_size,
),
data,
)
@dataclass
class EntryCreditBlockBody:
objects: Dict[int, List[ECIDTypes]] = field(default_factory=dict)
def __post_init__(self):
# TODO: value assertions
pass
def marshal(self):
buf = bytearray()
for minute, objects in self.objects.items():
for o in objects:
if isinstance(o, int):
buf.append(0x00)
buf.append(o)
elif isinstance(o, ChainCommit):
buf.append(ChainCommit.ECID)
buf.extend(o.marshal())
elif isinstance(o, EntryCommit):
buf.append(EntryCommit.ECID)
buf.extend(o.marshal())
elif isinstance(o, BalanceIncrease):
buf.append(BalanceIncrease.ECID)
buf.extend(o.marshal())
else:
raise ValueError("Invalid ECID type!")
buf.append(0x01)
buf.append(minute)
return bytes(buf)
@classmethod
def unmarshal(cls, raw: bytes, object_count: int):
body, data = cls.unmarshal_with_remainder(raw, object_count)
assert len(data) == 0, "Extra bytes remaining!"
return body
@classmethod
def unmarshal_with_remainder(cls, raw: bytes, object_count: int):
data = raw
objects = {} # map of minute --> objects array
current_minute_objects = []
for _ in range(object_count):
ecid, data = data[0], data[1:]
if ecid == 0x00:
server_index, data = data[0], data[1:]
current_minute_objects.append(server_index)
elif ecid == 0x01:
minute, data = data[0], data[1:]
objects[minute] = current_minute_objects
current_minute_objects = []
elif ecid == ChainCommit.ECID:
chain_commit, data = (
data[: ChainCommit.BITLENGTH],
data[ChainCommit.BITLENGTH :],
)
chain_commit = ChainCommit.unmarshal(chain_commit)
current_minute_objects.append(chain_commit)
elif ecid == EntryCommit.ECID:
entry_commit, data = (
data[: EntryCommit.BITLENGTH],
data[EntryCommit.BITLENGTH :],
)
entry_commit = EntryCommit.unmarshal(entry_commit)
current_minute_objects.append(entry_commit)
elif ecid == BalanceIncrease.ECID:
balance_increase, data = BalanceIncrease.unmarshal_with_remainder(data)
current_minute_objects.append(balance_increase)
else:
raise ValueError
return EntryCreditBlockBody(objects=objects), data
def construct_header(self, prev_header_hash: bytes, prev_full_hash: bytes, height: int) -> EntryCreditBlockHeader:
object_count = 0
for object_list in self.objects.values():
object_count += len(object_list) + 1
marshalled_body = self.marshal()
return EntryCreditBlockHeader(
body_hash=hashlib.sha256(marshalled_body).digest(),
prev_header_hash=prev_header_hash,
prev_full_hash=prev_full_hash,
height=height,
expansion_area=b"",
object_count=object_count,
body_size=len(marshalled_body),
)
@dataclass
class EntryCreditBlock:
header: EntryCreditBlockHeader
body: EntryCreditBlockBody
_cached_header_hash: bytes = None
def __post_init__(self):
# TODO: value assertions
pass
@property
def header_hash(self):
if self._cached_header_hash is not None:
return self._cached_header_hash
self._cached_header_hash = hashlib.sha256(self.header.marshal()).digest()
return self._cached_header_hash
@property
def full_hash(self):
return hashlib.sha256(self.marshal()).digest()
def marshal(self):
"""Marshals the directory block according to the byte-level representation shown at
https://github.com/FactomProject/FactomDocs/blob/master/factomDataStructureDetails.md#entry-credit-block
Data returned does not include contextual metadata, such as timestamp or the pointer to the
next entry-credit block.
"""
buf = bytearray()
buf.extend(self.header.marshal())
buf.extend(self.body.marshal())
return bytes(buf)
@classmethod
def unmarshal(cls, raw: bytes):
"""Returns a new EntryCreditBlock object, unmarshalling given bytes according to:
https://github.com/FactomProject/FactomDocs/blob/master/factomDataStructureDetails.md#entry-credit-block
Useful for working with a single ecblock out of context, pulled directly from a factomd database for instance.
EntryCreditBlock created will not include contextual metadata, such as timestamp or the pointer to the
next entry-credit block.
"""
block, data = cls.unmarshal_with_remainder(raw)
assert len(data) == 0, "Extra bytes remaining!"
return block
@classmethod
def unmarshal_with_remainder(cls, raw: bytes):
header, data = EntryCreditBlockHeader.unmarshal_with_remainder(raw)
body, data = EntryCreditBlockBody.unmarshal_with_remainder(data, header.object_count)
return EntryCreditBlock(header=header, body=body), data
def add_context(self, directory_block: DirectoryBlock):
pass
def to_dict(self):
return {
"header_hash": self.header_hash.hex(),
"body_hash": self.header.body_hash.hex(),
"prev_header_hash": self.header.prev_header_hash.hex(),
"prev_full_hash": self.header.prev_full_hash.hex(),
"height": self.header.height,
"expansion_area": self.header.expansion_area.hex(),
"object_count": self.header.object_count,
"body_size": self.header.body_size,
"objects": {
minute: [o if type(o) is int else o.to_dict() for o in objects]
for minute, objects in self.body.objects.items()
},
}
def __str__(self):
return "{}(height={})".format(self.__class__.__name__, self.header.height)
| 36.216867
| 118
| 0.622422
| 991
| 9,018
| 5.455096
| 0.177598
| 0.033296
| 0.025897
| 0.018498
| 0.378838
| 0.301147
| 0.250832
| 0.241953
| 0.188309
| 0.151313
| 0
| 0.018622
| 0.285429
| 9,018
| 248
| 119
| 36.362903
| 0.820298
| 0.092038
| 0
| 0.25641
| 0
| 0
| 0.033457
| 0.007901
| 0
| 0
| 0.001975
| 0.004032
| 0.020513
| 1
| 0.092308
| false
| 0.020513
| 0.046154
| 0.015385
| 0.292308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26ac0f2a19c349ef5a8b08d5da941091d8465553
| 487
|
py
|
Python
|
alpinelib/aws/aws_lambda.py
|
nbcnews/alpinelib
|
8e0d065611b69fdc431ca30ca1a257516670bcf9
|
[
"MIT"
] | null | null | null |
alpinelib/aws/aws_lambda.py
|
nbcnews/alpinelib
|
8e0d065611b69fdc431ca30ca1a257516670bcf9
|
[
"MIT"
] | null | null | null |
alpinelib/aws/aws_lambda.py
|
nbcnews/alpinelib
|
8e0d065611b69fdc431ca30ca1a257516670bcf9
|
[
"MIT"
] | null | null | null |
import boto3
from .. import logging
logger = logging.getFormattedLogger()
lambda_client = boto3.client('lambda', region_name='us-west-2')
def invoke(function_name, message):
try:
response = lambda_client.invoke(
FunctionName=function_name,
InvocationType='Event',
Payload=message
)
return response
except Exception as e:
logger.exception("Failed to invoke lambda {}.".format(function_name))
raise e
| 24.35
| 77
| 0.648871
| 52
| 487
| 5.961538
| 0.615385
| 0.116129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008287
| 0.256674
| 487
| 19
| 78
| 25.631579
| 0.848066
| 0
| 0
| 0
| 0
| 0
| 0.096509
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26ad9a93696193c618815ae5d8967a74a464da8c
| 766
|
py
|
Python
|
test/test_lazy.py
|
sixty-north/python-transducers
|
575357e3a17ff3b4c757967afd396bf0ea042c08
|
[
"MIT"
] | 54
|
2015-10-02T02:45:36.000Z
|
2021-06-22T04:40:33.000Z
|
test/test_lazy.py
|
sixty-north/python-transducers
|
575357e3a17ff3b4c757967afd396bf0ea042c08
|
[
"MIT"
] | 3
|
2017-06-11T13:39:18.000Z
|
2017-06-12T06:07:24.000Z
|
test/test_lazy.py
|
sixty-north/python-transducers
|
575357e3a17ff3b4c757967afd396bf0ea042c08
|
[
"MIT"
] | 9
|
2015-10-28T23:36:50.000Z
|
2019-01-11T13:47:05.000Z
|
import unittest
from transducer.functional import compose
from transducer.lazy import transduce
from transducer.transducers import (mapping, filtering, taking, dropping_while, distinct)
class TestComposedTransducers(unittest.TestCase):
def test_chained_transducers(self):
result = transduce(transducer=compose(
mapping(lambda x: x*x),
filtering(lambda x: x % 5 != 0),
taking(6),
dropping_while(lambda x: x < 15),
distinct()),
iterable=range(20))
expected = [16, 36, 49]
for r, e in zip(result, expected):
self.assertEqual(r, e)
if __name__ == '__main__':
unittest.main()
| 31.916667
| 89
| 0.574413
| 78
| 766
| 5.487179
| 0.576923
| 0.018692
| 0.056075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02549
| 0.334204
| 766
| 23
| 90
| 33.304348
| 0.813725
| 0
| 0
| 0
| 0
| 0
| 0.010444
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.055556
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26adf150baad599be77596f054bbe7e015db202c
| 2,246
|
py
|
Python
|
cmake_pc_hooks/cppcheck.py
|
Takishima/cmake-pre-commit-hooks
|
a6d96865602f68f413f7f368aa1dbbb8bf495109
|
[
"Apache-2.0"
] | 2
|
2021-08-10T21:48:05.000Z
|
2022-02-28T11:46:51.000Z
|
cmake_pc_hooks/cppcheck.py
|
Takishima/cmake-pre-commit-hooks
|
a6d96865602f68f413f7f368aa1dbbb8bf495109
|
[
"Apache-2.0"
] | null | null | null |
cmake_pc_hooks/cppcheck.py
|
Takishima/cmake-pre-commit-hooks
|
a6d96865602f68f413f7f368aa1dbbb8bf495109
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2021 Damien Nguyen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper script for cppcheck."""
import sys
from pathlib import Path
from ._utils import Command
class CppcheckCmd(Command):
"""Class for the cppcheck command."""
command = "cppcheck"
lookbehind = "Cppcheck "
def __init__(self, args):
"""Initialize a CppcheckCmd object."""
super().__init__(self.command, self.lookbehind, args)
self.parse_args(args)
# quiet for stdout purposes
self.add_if_missing(["-q"])
# make cppcheck behave as expected for pre-commit
self.add_if_missing(["--error-exitcode=1"])
# Enable all of the checks
self.add_if_missing(["--enable=all"])
# Force location of compile database
self.add_if_missing([f'--project={Path(self.build_dir, "compile_commands.json")}'])
def _parse_output(self, result):
"""
Parse output and check whether some errors occurred.
Args:
result (namedtuple): Result from calling a command
Returns:
False if no errors were detected, True in all other cases.
"""
# Useless error see https://stackoverflow.com/questions/6986033
useless_error_part = "Cppcheck cannot find all the include files"
result.stderr = [line for line in result.stderr.splitlines(keepends=True) if useless_error_part not in line]
return result.returncode != 0
def main(argv=None):
"""
Run command.
Args:
argv (:obj:`list` of :obj:`str`): list of arguments
"""
if argv is None:
argv = sys.argv
cmd = CppcheckCmd(argv)
cmd.run()
if __name__ == "__main__":
main()
| 29.946667
| 116
| 0.662066
| 296
| 2,246
| 4.908784
| 0.537162
| 0.041294
| 0.024776
| 0.044047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010514
| 0.237756
| 2,246
| 74
| 117
| 30.351351
| 0.838201
| 0.501781
| 0
| 0
| 0
| 0
| 0.154916
| 0.055611
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26af8dafdbc00b0bb2091823b9a4a72611dc7cfc
| 521
|
py
|
Python
|
apps/boards/apps.py
|
julianwachholz/thefarland
|
c7259311fafb60beba167422eefd0d0c5d362514
|
[
"WTFPL"
] | null | null | null |
apps/boards/apps.py
|
julianwachholz/thefarland
|
c7259311fafb60beba167422eefd0d0c5d362514
|
[
"WTFPL"
] | null | null | null |
apps/boards/apps.py
|
julianwachholz/thefarland
|
c7259311fafb60beba167422eefd0d0c5d362514
|
[
"WTFPL"
] | null | null | null |
from django.apps import AppConfig
from django.db.models.signals import post_save, post_delete
from . import signals
class BoardsAppConfig(AppConfig):
name = 'apps.boards'
def ready(self):
Board = self.get_model('Board')
Thread = self.get_model('Thread')
Post = self.get_model('Post')
post_save.connect(signals.thread_post_save, sender=Thread)
post_save.connect(signals.post_post_save, sender=Post)
post_delete.connect(signals.thread_post_delete, sender=Thread)
| 30.647059
| 70
| 0.71785
| 69
| 521
| 5.217391
| 0.347826
| 0.111111
| 0.1
| 0.122222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182342
| 521
| 16
| 71
| 32.5625
| 0.84507
| 0
| 0
| 0
| 0
| 0
| 0.049904
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26af8f12a06f8edb90f5fc54b553edce179f388f
| 2,445
|
py
|
Python
|
danmu.py
|
wjhtime/douyu_danmu_python
|
432198f86bc9f6facd7ef531f301e8c7c8a9285f
|
[
"MIT"
] | 4
|
2018-12-15T10:35:20.000Z
|
2019-06-04T20:20:32.000Z
|
danmu.py
|
wjhtime/douyu_danmu_python
|
432198f86bc9f6facd7ef531f301e8c7c8a9285f
|
[
"MIT"
] | null | null | null |
danmu.py
|
wjhtime/douyu_danmu_python
|
432198f86bc9f6facd7ef531f301e8c7c8a9285f
|
[
"MIT"
] | 2
|
2019-04-29T08:20:08.000Z
|
2020-05-19T09:51:19.000Z
|
'''
利用斗鱼弹幕 api
尝试抓取斗鱼tv指定房间的弹幕
'''
import multiprocessing
import socket
import time
import re
import signal
# 构造socket连接,和斗鱼api服务器相连接
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostbyname("openbarrage.douyutv.com")
port = 8601
client.connect((host, port))
# 弹幕查询正则表达式
danmu_re = re.compile(b'txt@=(.+?)/cid@')
username_re = re.compile(b'nn@=(.+?)/txt@')
def send_req_msg(msgstr):
'''构造并发送符合斗鱼api的请求'''
msg = msgstr.encode('utf-8')
data_length = len(msg) + 8
code = 689
# 构造协议头
msgHead = int.to_bytes(data_length, 4, 'little') \
+ int.to_bytes(data_length, 4, 'little') + \
int.to_bytes(code, 4, 'little')
client.send(msgHead)
sent = 0
while sent < len(msg):
tn = client.send(msg[sent:])
sent = sent + tn
def DM_start(roomid):
# 构造登录授权请求
msg = 'type@=loginreq/roomid@={}/\0'.format(roomid)
send_req_msg(msg)
# 构造获取弹幕消息请求
msg_more = 'type@=joingroup/rid@={}/gid@=-9999/\0'.format(roomid)
send_req_msg(msg_more)
while True:
# 服务端返回的数据
data = client.recv(1024)
# 通过re模块找发送弹幕的用户名和内容
danmu_username = username_re.findall(data)
danmu_content = danmu_re.findall(data)
if not data:
break
else:
for i in range(0, len(danmu_content)):
try:
# 输出信息
print('[{}]:{}'.format(danmu_username[0].decode(
'utf8'), danmu_content[0].decode(encoding='utf8')))
except:
continue
def keeplive():
'''
保持心跳,15秒心跳请求一次
'''
while True:
msg = 'type@=keeplive/tick@=' + str(int(time.time())) + '/\0'
send_req_msg(msg)
print('发送心跳包')
time.sleep(15)
def logout():
'''
与斗鱼服务器断开连接
关闭线程
'''
msg = 'type@=logout/'
send_req_msg(msg)
print('已经退出服务器')
def signal_handler(signal, frame):
'''
捕捉 ctrl+c的信号 即 signal.SIGINT
触发hander:
登出斗鱼服务器
关闭进程
'''
p1.terminate()
p2.terminate()
logout()
print('Bye')
if __name__ == '__main__':
#room_id = input('请输入房间ID: ')
# lpl
room_id = 288016
# 开启signal捕捉
signal.signal(signal.SIGINT, signal_handler)
# 开启弹幕和心跳进程
p1 = multiprocessing.Process(target=DM_start, args=(room_id,))
p2 = multiprocessing.Process(target=keeplive)
p1.start()
p2.start()
| 21.447368
| 75
| 0.578323
| 285
| 2,445
| 4.807018
| 0.463158
| 0.025547
| 0.036496
| 0.037956
| 0.110949
| 0.084672
| 0.084672
| 0.046715
| 0.046715
| 0.046715
| 0
| 0.025452
| 0.276892
| 2,445
| 113
| 76
| 21.637168
| 0.749434
| 0.112065
| 0
| 0.079365
| 0
| 0
| 0.103019
| 0.052228
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079365
| false
| 0
| 0.079365
| 0
| 0.15873
| 0.063492
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26afa6ab00539bb702ecf9ce1071e801dd9694da
| 3,828
|
py
|
Python
|
03_spider_douyin/spider_douyin.py
|
theThreeKingdom/python-exercises
|
fc08a7bbb9d6b53d5761b9e1017f293bff4e26db
|
[
"Apache-2.0"
] | null | null | null |
03_spider_douyin/spider_douyin.py
|
theThreeKingdom/python-exercises
|
fc08a7bbb9d6b53d5761b9e1017f293bff4e26db
|
[
"Apache-2.0"
] | null | null | null |
03_spider_douyin/spider_douyin.py
|
theThreeKingdom/python-exercises
|
fc08a7bbb9d6b53d5761b9e1017f293bff4e26db
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2020/4/1 0:48
# @Author : Nixin
# @Email : nixin@foxmail.com
# @File : spider_douyin.py
# @Software: PyCharm
import requests, re, sys, os, time, random, socket
import http.client
from bs4 import BeautifulSoup
def get_html(url, data=None):
header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
timeout = random.choice(range(80, 100))
while True:
try:
response = requests.get(url, headers=header, timeout=timeout)
response.encoding = 'utf-8'
break
except socket.timeout as e:
print(e)
time.sleep(random.choice(range(20, 60)))
except socket.error as e:
print(e)
time.sleep(random.choice(range(20, 60)))
except http.client.BadStatusLine as e:
print(e)
time.sleep(random.choice(range(30, 60)))
except http.client.IncompleteRead as e:
print(e)
time.sleep(random.choice(range(20, 60)))
# print(response.text)
return response.text
def download_douyin(num, url):
rsp = get_html(url)
patt = 'playAddr: "(.*?)",'
play = re.compile(patt).findall(rsp)[0].replace("playwm", "play")
if not play.startswith('http'):
return 0
print(type(play))
print("url="+play)
header = {
'Accept': '*/*',
'Accept-Encoding': 'identity;q=1, *;q=0',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'
}
res = requests.get(play, stream=True, headers=header)
path = 'E:/nixin/douyin/video/20200419/'
if not os.path.exists(path):
os.makedirs(path)
pathinfo = 'E:/nixin/douyin/video/20200419/%d.mp4' % num # %d 用于整数输出 %s用于字符串输出
total_size = int(res.headers['Content-Length'])
print('这是视频的总大小:', total_size)
temp_size = 0
if res.status_code == 200:
with open(pathinfo, 'wb') as file:
# file.write(res.content)
# print(pathinfo + '下载完成啦啦啦啦啦')
# 当流下载时,下面是优先推荐的获取内容方式,iter_content()函数就是得到文件的内容,指定chunk_size=1024,大小可以自己设置哟,设置的意思就是下载一点流写一点流到磁盘中
for chunk in res.iter_content(chunk_size=1024):
if chunk:
temp_size += len(chunk)
file.write(chunk)
file.flush() # 刷新缓存
# 下载进度条部分start
done = int(50 * temp_size / total_size)
# print('百分比:',done)
# 调用标准输出刷新命令行,看到\r回车符了吧
# 相当于把每一行重新刷新一遍
sys.stdout.write("\r[%s%s] %d%%" % (
'█' * done, ' ' * (50 - done), 100 * temp_size / total_size) + " 文件:" + pathinfo + " 下载完成")
sys.stdout.flush() # 刷新缓存
# 下载进度条部分end
print('\n') # 每一条打印在屏幕上换行输出
return 1
pass
def batch_download_douyin(start, pathtxt):
with open(pathtxt) as f:
f_url_list = f.readlines() # 得到的是一个list类型
for a in f_url_list:
print(a.strip())
if download_douyin(start, a.strip()) > 0:
start += 1
time.sleep(random.choice(range(3, 6)))
pass
if __name__ == '__main__':
# download_douyin(56, "https://v.douyin.com/3wV6PQ")
batch_download_douyin(80, "E:/nixin/douyin/video/20200419/1.txt")
pass
| 33.876106
| 163
| 0.562696
| 496
| 3,828
| 4.266129
| 0.409274
| 0.008507
| 0.048204
| 0.049622
| 0.175331
| 0.127599
| 0.127599
| 0.127599
| 0.127599
| 0.111059
| 0
| 0.058932
| 0.290752
| 3,828
| 112
| 164
| 34.178571
| 0.720074
| 0.130355
| 0
| 0.181818
| 0
| 0.064935
| 0.22528
| 0.078319
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038961
| false
| 0.038961
| 0.038961
| 0
| 0.116883
| 0.116883
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26b4665a5f013ded26bc910df476a322704eda91
| 475
|
py
|
Python
|
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/logcat/pagefactory/logcat_template_path.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | 6
|
2018-11-26T08:42:52.000Z
|
2020-06-01T08:33:48.000Z
|
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/logcat/pagefactory/logcat_template_path.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | null | null | null |
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/logcat/pagefactory/logcat_template_path.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | 1
|
2019-01-22T06:45:36.000Z
|
2019-01-22T06:45:36.000Z
|
#coding=utf-8
'''
Created on 2015-10-10
@author: Devuser
'''
class LogcatPagePath(object):
left_nav_template_path="home/home_left_nav.html"
logger_page_path="logcat/logcat_index.html"
logger_list_page="logcat/logcat_list_page.html"
logger_list_controll="logcat/logcat_loger_list_controll.html"
logger_content_container="logcat/logcat_logger_content.html"
class LogcatCommonPath(object):
logger_log_js="common/logcat_log.js"
| 19
| 65
| 0.751579
| 64
| 475
| 5.203125
| 0.484375
| 0.12012
| 0.084084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022167
| 0.145263
| 475
| 25
| 66
| 19
| 0.79803
| 0.109474
| 0
| 0
| 0
| 0
| 0.399038
| 0.350962
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26b76d047c1414efdb3d56d1cf6e2c55efd68449
| 745
|
py
|
Python
|
icepll.py
|
carlosedp/fusesoc-generators
|
4ee343ce0013952bd89d6986bfb5ed861b2cf6b2
|
[
"MIT"
] | null | null | null |
icepll.py
|
carlosedp/fusesoc-generators
|
4ee343ce0013952bd89d6986bfb5ed861b2cf6b2
|
[
"MIT"
] | null | null | null |
icepll.py
|
carlosedp/fusesoc-generators
|
4ee343ce0013952bd89d6986bfb5ed861b2cf6b2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from fusesoc.capi2.generator import Generator
import subprocess
class IcepllGenerator(Generator):
def run(self):
fin = self.config.get('freq_in', 12)
fout = self.config.get('freq_out', 60)
module = self.config.get('module', False)
filename = self.config.get('filename', 'pll.v' if module else 'pll.vh')
args = ['icepll', '-f', filename, '-i', str(fin), '-o', str(fout)]
if module:
args.append('-m')
rc = subprocess.call(args)
if rc:
exit(1)
self.add_files([{filename : {'file_type' : 'verilogSource',
'is_include_file' : not module}}])
g = IcepllGenerator()
g.run()
g.write()
| 31.041667
| 79
| 0.555705
| 90
| 745
| 4.533333
| 0.588889
| 0.098039
| 0.127451
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011299
| 0.287248
| 745
| 23
| 80
| 32.391304
| 0.757062
| 0.021477
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.105263
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26bd34791b254cf4bcb5957b49692dda6546cfa1
| 1,059
|
py
|
Python
|
BUNKURO/BUNKURO.py
|
kantoku-code/Fusion360_BUNKURO
|
0c83f2ab57f03c83fcad98b85b59792360f7a804
|
[
"MIT"
] | 1
|
2022-03-18T13:06:57.000Z
|
2022-03-18T13:06:57.000Z
|
BUNKURO/BUNKURO.py
|
kantoku-code/Fusion360_BUNKURO
|
0c83f2ab57f03c83fcad98b85b59792360f7a804
|
[
"MIT"
] | null | null | null |
BUNKURO/BUNKURO.py
|
kantoku-code/Fusion360_BUNKURO
|
0c83f2ab57f03c83fcad98b85b59792360f7a804
|
[
"MIT"
] | null | null | null |
# Author-kantoku
# Description-コンポーネント毎に分割してクローン作るよ!
# Fusion360API Python
import adsk.core
import traceback
try:
from . import config
from .apper import apper
from .commands.BUNKUROCore import BUNKUROCore
# Create our addin definition object
my_addin = apper.FusionApp(config.app_name, config.company_name, False)
my_addin.root_path = config.app_path
my_addin.add_command(
'ぶんくろ',
BUNKUROCore,
{
'cmd_description': 'コンポーネント毎に分割してクローン作るよ!',
'cmd_id': 'bunkuro',
'workspace': 'FusionSolidEnvironment',
'toolbar_panel_id': 'UtilityPanel',
'cmd_resources': 'BUNKURO',
'command_visible': True,
'command_promoted': False,
'create_feature': False,
}
)
except:
app = adsk.core.Application.get()
ui = app.userInterface
if ui:
ui.messageBox('Initialization: {}'.format(traceback.format_exc()))
def run(context):
my_addin.run_app()
def stop(context):
my_addin.stop_app()
| 23.021739
| 75
| 0.634561
| 109
| 1,059
| 5.972477
| 0.541284
| 0.053763
| 0.043011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003827
| 0.259679
| 1,059
| 45
| 76
| 23.533333
| 0.826531
| 0.097262
| 0
| 0
| 0
| 0
| 0.205047
| 0.045216
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.16129
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26c07cd4c709d13692e520d5fa627ce985733c5a
| 3,172
|
py
|
Python
|
sfc_models/examples/scripts/deprecated/ex20170108_model_PC.py
|
MachineLP/SFC_models
|
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
|
[
"Apache-2.0"
] | 21
|
2016-11-03T12:30:50.000Z
|
2022-03-24T06:54:14.000Z
|
sfc_models/examples/scripts/deprecated/ex20170108_model_PC.py
|
MachineLP/SFC_models
|
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
|
[
"Apache-2.0"
] | 1
|
2019-04-02T02:01:27.000Z
|
2019-04-07T21:07:10.000Z
|
sfc_models/examples/scripts/deprecated/ex20170108_model_PC.py
|
MachineLP/SFC_models
|
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
|
[
"Apache-2.0"
] | 12
|
2016-11-03T12:30:57.000Z
|
2021-09-14T23:08:23.000Z
|
"""
ex20170108_model_PC.py
Create Model PC (Godley & Lavoie Chapter 4).
Copyright 2017 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sfc_models.examples.Quick2DPlot import Quick2DPlot
from sfc_models.models import *
from sfc_models.sector import Market
from sfc_models.sector_definitions import Household, Treasury, CentralBank, TaxFlow, FixedMarginBusiness, DepositMarket, \
MoneyMarket
def main():
# Create model, which holds all entities
mod = Model()
# Create first country - Canada. (This model only has one country.)
can = Country(mod, 'CA', 'Canada')
# Create sectors
tre = Treasury(can, 'TRE', 'Treasury')
cb = CentralBank(can, 'CB', 'Central Bank')
hh = Household(can, 'HH', 'Household')
# A literally non-profit business sector
bus = FixedMarginBusiness(can, 'BUS', 'Business Sector')
# Create the linkages between sectors - tax flow, markets - labour ('LAB'), goods ('GOOD')
tax = TaxFlow(can, 'TF', 'TaxFlow', .2)
labour = Market(can, 'LAB', 'Labour market')
goods = Market(can, 'GOOD', 'Goods market')
# Add the financial markets
# GOV -> issuing sector
mm = MoneyMarket(can)
dep = DepositMarket(can)
# --------------------------------------------
# Financial asset demand equations
# Need to call this before we set the demand functions for
mod._GenerateFullSectorCodes()
# Need the full variable name for 'F' in household
hh_F = hh.GetVariableName('F')
hh.AddVariable('DEM_MON', 'Demand for Money', '0.5 * ' + hh_F)
hh.AddVariable('DEM_DEP', 'Demand for deposits', '0.5 * ' + hh_F)
# -----------------------------------------------------------------
# Need to set the exogenous variables
# Government demand for Goods ("G" in economist symbology)
mod.AddExogenous('TRE', 'DEM_GOOD', '[20.,] * 105')
mod.AddExogenous('DEP', 'r', '[0.0,] * 5 + [0.04]*100')
mod.AddInitialCondition('HH', 'F', 80.)
# Build the model
# Output is put into two files, based on the file name passed into main() ['out_SIM_Machine_Model']
# (1) [out_YYY]_log.txt: Log file
# (2) [out_YYY].py: File that solves the system of equations
mod.MaxTime = 100
eqns = mod._main_deprecated('out_ex20170108_model_PC')
# Only import after the file is created (which is unusual).
import out_ex20170108_model_PC as SFCmod
obj = SFCmod.SFCModel()
obj.main()
obj.WriteCSV('out_ex20170103_model_PC.csv')
Quick2DPlot(obj.t[1:], obj.GOOD_SUP_GOOD[1:], 'Goods supplied (national production Y)')
Quick2DPlot(obj.t[1:], obj.HH_F[1:], 'Household Financial Assets (F)')
if __name__ == '__main__':
main()
| 39.65
| 122
| 0.669294
| 428
| 3,172
| 4.86215
| 0.474299
| 0.028832
| 0.024988
| 0.015377
| 0.01826
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029092
| 0.187264
| 3,172
| 79
| 123
| 40.151899
| 0.778123
| 0.482661
| 0
| 0
| 0
| 0
| 0.213003
| 0.03096
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.147059
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26c5a0a8bb014c980c7a75f56eb95838d11757a4
| 2,287
|
py
|
Python
|
qingcloud/cli/iaas_client/actions/cluster/deploy_app_version.py
|
knktc/qingcloud-cli
|
2be8bba43e08bd7a76e1326ece871386cc9b5b55
|
[
"Apache-2.0"
] | 11
|
2015-05-27T19:52:36.000Z
|
2021-04-15T09:07:39.000Z
|
qingcloud/cli/iaas_client/actions/cluster/deploy_app_version.py
|
knktc/qingcloud-cli
|
2be8bba43e08bd7a76e1326ece871386cc9b5b55
|
[
"Apache-2.0"
] | 7
|
2017-07-19T05:05:03.000Z
|
2019-04-25T07:18:04.000Z
|
qingcloud/cli/iaas_client/actions/cluster/deploy_app_version.py
|
knktc/qingcloud-cli
|
2be8bba43e08bd7a76e1326ece871386cc9b5b55
|
[
"Apache-2.0"
] | 19
|
2016-03-15T07:31:47.000Z
|
2021-07-26T09:31:33.000Z
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.iaas import constants as const
from qingcloud.cli.iaas_client.actions.base import BaseAction
class DeployAppVersionAction(BaseAction):
action = const.ACTION_DEPLOY_APP_VERSION
command = 'deploy-app-version'
usage = '%(prog)s -v <version_id> -c <conf> [-d <debug>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-v', '--version_id', dest='version_id',
action='store', type=str, default=None,
help='the ID of application version which you want to create.')
parser.add_argument('-c', '--conf', dest='conf',
action="store", type=str, default=None,
help='the json format string of config to create the cluster')
parser.add_argument('-d', '--debug', dest='debug',
action="store", type=int, default=0,
help='whether to open debug mode [0 or 1]')
@classmethod
def build_directive(cls, options):
if options.version_id is None:
print('error: version_id should be specified.')
return None
if options.conf is None:
print('error: conf should be specified.')
return None
directive = {
"version_id": options.version_id,
"conf": options.conf,
"debug": options.debug}
return directive
| 40.122807
| 91
| 0.567118
| 262
| 2,287
| 4.885496
| 0.496183
| 0.054688
| 0.039844
| 0.025
| 0.098438
| 0.05625
| 0.05625
| 0.05625
| 0
| 0
| 0
| 0.006399
| 0.24836
| 2,287
| 56
| 92
| 40.839286
| 0.73822
| 0.35024
| 0
| 0.133333
| 0
| 0
| 0.246939
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.366667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26c6baf54f78e9c92b1e52fb48aafcc91b720d02
| 1,409
|
py
|
Python
|
server/getSert.py
|
sviridovt/WIE
|
9af6d3dff7e774f5e332e6c77eadde815d4c375d
|
[
"MIT"
] | 1
|
2021-09-03T11:36:02.000Z
|
2021-09-03T11:36:02.000Z
|
server/getSert.py
|
sviridovt/WIE
|
9af6d3dff7e774f5e332e6c77eadde815d4c375d
|
[
"MIT"
] | null | null | null |
server/getSert.py
|
sviridovt/WIE
|
9af6d3dff7e774f5e332e6c77eadde815d4c375d
|
[
"MIT"
] | 1
|
2021-09-03T11:36:04.000Z
|
2021-09-03T11:36:04.000Z
|
# allows to import RSA lib from different dir
import sys
# inserts path to access RSA encryption lib
# sys.path.insert(0, '../RSAEncryption')
import socket
import json
from libs.communication import sendEncrypted, recvEncrypted, sendData, readData
from libs.RSAKeys import readPrivateKey
from libs.EncryptedSocket import EncryptedSocket
from libs.settings import *
HOST = '127.0.0.1'
PORT = 4444
printDebug = True
SSID = "SecureCanes"
def readData(conn):
packetFile = open("packetText.txt", mode = 'a+')
recvd = 0
while True:
mess = conn.recv(512).decode('utf-8')
if len(mess) < 512:
packetFile.write(mess)
break
recvd += len(mess)
packetFile.write(mess)
# packetFile.close()
#packetFile = open("packetText.txt", mode = 'r')
serverData = packetFile.read(recvd)
return serverData
# sending data
def sendData(conn, data):
dataFile = open("sendData.txt", mode = 'a+')
dataFile.write(data)
while True:
packet = dataFile.read(512)
if len(packet) < 512:
conn.send(packet.encode('utf-8'))
sent += len(packet)
dataFile.close()
break
sent += len(packet)
conn.send(packet.encode('utf-8'))
return sent
def renewCert(pubKey, SSID):
# Encrypted Sockets
s = EncryptedSocket(HOST, PORT)
# send SSID
s.send(SSID)
# receive certificate
cert = s.read()
fl = open(CERT_FILE, 'w+')
fl.write(cert)
s.close()
| 21.029851
| 79
| 0.675656
| 185
| 1,409
| 5.140541
| 0.432432
| 0.033649
| 0.050473
| 0.056782
| 0.115668
| 0.050473
| 0
| 0
| 0
| 0
| 0
| 0.023873
| 0.197303
| 1,409
| 66
| 80
| 21.348485
| 0.816976
| 0.178141
| 0
| 0.232558
| 0
| 0
| 0.058515
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.162791
| 0
| 0.27907
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26c71f804645b9d738d4394f797c6533de859d14
| 7,085
|
py
|
Python
|
code/billiard_game_multi_ball.py
|
ifsheldon/billiard_game
|
1ce13d39158734efd76e617bba2bb319d5498c3f
|
[
"BSD-2-Clause"
] | null | null | null |
code/billiard_game_multi_ball.py
|
ifsheldon/billiard_game
|
1ce13d39158734efd76e617bba2bb319d5498c3f
|
[
"BSD-2-Clause"
] | null | null | null |
code/billiard_game_multi_ball.py
|
ifsheldon/billiard_game
|
1ce13d39158734efd76e617bba2bb319d5498c3f
|
[
"BSD-2-Clause"
] | null | null | null |
import taichi as ti
import numpy as np
from functools import partial
from itertools import combinations
from billiard_game_dual_ball import normalize_vector, two_ball_collides, calc_next_pos_and_velocity, \
calc_after_collision_velocity, rectify_positions_in_collision, rectify_positions_and_velocities
# Constants
WHITE = 0xFFFFFF
RED = 0xFF0000
GREEN = 0x00FF00
BLUE = 0x0000FF
# wc for world space x[0.0, ratio], y[0.0, 1.0]
# sc for screen space [0.0, 1.0]^2
# Constant parameters
RESOLUTION = (1230, 750)
RATIO = RESOLUTION[0] / RESOLUTION[1] # x/y
FPS = 60
CUE_BALL_IDX = 0
STICK_LENGTH_SC = 0.1
DRAG_COEFFICIENT = 0.03
G = 9.8
CUE_BALL_MAX_SPEED_WC = 1.0
BALL_PIXEL_RADIUS = 10
HOLE_PIXEL_RADIUS = 15
num_balls = 1
# Derived parameters
ball_radius_wc = BALL_PIXEL_RADIUS / RESOLUTION[1]
hole_radius_wc = HOLE_PIXEL_RADIUS / RESOLUTION[1]
x_begin_wc = 0.0
x_end_wc = RATIO
y_begin_wc = 0.0
y_end_wc = 1.0
def score(hole_center_positions, ball_position):
# Don't care now
diff = hole_center_positions - ball_position.reshape(1, 2)
square_dist = (diff ** 2).sum(axis=-1)
radii_square_sum = (0.8 * ball_radius_wc + hole_radius_wc) ** 2
return np.any(square_dist <= radii_square_sum)
def place_balls_wc(span_wc, offset_wc):
# No need now
ball_pos_wc = np.zeros((num_balls, 2))
for i in range(num_balls):
ball_i_pos_wc = np.random.rand(2) * span_wc + offset_wc
if i != CUE_BALL_IDX:
while two_ball_collides(ball_pos_wc[CUE_BALL_IDX], ball_i_pos_wc, ball_radius_wc):
ball_i_pos_wc = np.random.rand(2) * span_wc + offset_wc
ball_pos_wc[i] = ball_i_pos_wc
return ball_pos_wc
if __name__ == "__main__":
ti.init(ti.cpu)
print("Press A to kick the cue ball")
wc_to_sc_multiplier = np.array([1 / RATIO, 1]) # transform to [0,1]^ screen space
sc_to_wc_multiplier = np.array([RATIO, 1])
virtual_bound_x = np.array([ball_radius_wc, x_end_wc - ball_radius_wc])
virtual_bound_y = np.array([ball_radius_wc, y_end_wc - ball_radius_wc])
dx_wc = x_end_wc / 2.
dy_wc = y_end_wc / 2.
hole_pos_x = np.arange(3) * dx_wc
hole_pos_y = np.arange(3) * dy_wc
hole_pos_x, hole_pos_y = np.meshgrid(hole_pos_x, hole_pos_y)
hole_center_positions_wc = np.stack([hole_pos_x, hole_pos_y], axis=-1).reshape(-1, 2) # (3, 3, 2) -> (9, 2)
hole_center_positions_wc = np.delete(hole_center_positions_wc, 4, axis=0)
hole_center_positions_sc = hole_center_positions_wc * wc_to_sc_multiplier.reshape(1, 2)
ball_velocities_wc = np.zeros((num_balls, 2))
ball_visible = np.ones(num_balls, dtype=bool)
span_wc = np.array([virtual_bound_x[1] - virtual_bound_x[0], virtual_bound_y[1] - virtual_bound_y[0]])
offset_wc = np.array([virtual_bound_x[0], virtual_bound_y[0]])
ball_pos_wc = place_balls_wc(span_wc, offset_wc)
gui = ti.GUI("billiard_game_multi_ball", RESOLUTION)
gui.fps_limit = FPS
delta_t = 1.0 / FPS
boundary_begin_wc = np.array([
[x_begin_wc, y_begin_wc],
[x_begin_wc, y_begin_wc],
[x_end_wc, y_end_wc],
[x_end_wc, y_end_wc]
])
boundary_end_wc = np.array([
[x_end_wc, y_begin_wc],
[x_begin_wc, y_end_wc],
[x_end_wc, y_begin_wc],
[x_begin_wc, y_end_wc]
])
# a convenient partial function of rectify_positions_and_velocities
rectify_pv = partial(rectify_positions_and_velocities,
virtual_bound_x[0], virtual_bound_x[1],
virtual_bound_y[0], virtual_bound_y[1])
ball_pairs = list(combinations(range(num_balls), 2))
ball_color_indices = np.ones(num_balls)
ball_color_indices[CUE_BALL_IDX] = 0
ball_colors = [WHITE, RED]
while gui.running:
gui.clear(GREEN)
hit_ball = gui.get_event(ti.GUI.PRESS) and gui.is_pressed("a")
cue_ball_pos_sc = ball_pos_wc[CUE_BALL_IDX] * wc_to_sc_multiplier
# the current setting is only when all balls are stationary, the mouse is available
if np.allclose((ball_velocities_wc ** 2).sum(-1), 0., rtol=0.001, atol=0.001) and ball_visible[CUE_BALL_IDX]:
rod_dir_sc, length = normalize_vector(gui.get_cursor_pos() - cue_ball_pos_sc)
rod_line = rod_dir_sc * min(STICK_LENGTH_SC, length)
gui.line(cue_ball_pos_sc, cue_ball_pos_sc + rod_line, radius=2)
if hit_ball:
ball_velocities_wc[CUE_BALL_IDX] = (rod_dir_sc * sc_to_wc_multiplier) \
* CUE_BALL_MAX_SPEED_WC * (min(STICK_LENGTH_SC,
length) / STICK_LENGTH_SC) # modify the speed with a multiplier dependent on the distance between mouse and the cue ball
# for i in range(num_balls): # for each ball, if score() returns True, set this ball invisible
# # Not care now
# if score(hole_center_positions_wc, ball_pos_wc[i]):
# ball_visible[i] = False
# ball_velocities_wc[i] = 0.
# No need to care about this in verilog
gui.lines(begin=boundary_begin_wc, end=boundary_end_wc, radius=2)
gui.circles(ball_pos_wc[ball_visible] * wc_to_sc_multiplier.reshape(1, 2),
radius=BALL_PIXEL_RADIUS,
palette=ball_colors,
palette_indices=ball_color_indices[ball_visible])
gui.circles(hole_center_positions_sc, radius=HOLE_PIXEL_RADIUS, color=0)
gui.show()
for i in range(num_balls): # unroll this loop for the two ball case
if not ball_visible[i]:
continue
next_pos_wc, next_velocity_wc = calc_next_pos_and_velocity(ball_pos_wc[i], ball_velocities_wc[i],
delta_t, DRAG_COEFFICIENT, G)
next_pos_wc, next_velocity_wc = rectify_pv(next_pos_wc, next_velocity_wc)
ball_pos_wc[i] = next_pos_wc
ball_velocities_wc[i] = next_velocity_wc
for ball_i, ball_j in ball_pairs: # only one iteration for the two ball case, since we have only one pair
if not ball_visible[ball_i] or not ball_visible[ball_j]:
continue
ball_i_pos_wc = ball_pos_wc[ball_i]
ball_j_pos_wc = ball_pos_wc[ball_j]
if two_ball_collides(ball_i_pos_wc, ball_j_pos_wc, ball_radius_wc):
ball_i_pos_wc, ball_j_pos_wc = rectify_positions_in_collision(ball_i_pos_wc, ball_j_pos_wc,
ball_radius_wc)
ball_i_v_wc = ball_velocities_wc[ball_i]
ball_j_v_wc = ball_velocities_wc[ball_j]
ball_i_v_wc, ball_j_v_wc = calc_after_collision_velocity(ball_i_pos_wc, ball_j_pos_wc,
ball_i_v_wc, ball_j_v_wc)
ball_velocities_wc[ball_i] = ball_i_v_wc
ball_velocities_wc[ball_j] = ball_j_v_wc
| 44.006211
| 203
| 0.651941
| 1,119
| 7,085
| 3.701519
| 0.18588
| 0.047803
| 0.03042
| 0.021729
| 0.355867
| 0.257605
| 0.146548
| 0.103573
| 0.060357
| 0.049252
| 0
| 0.023709
| 0.261821
| 7,085
| 160
| 204
| 44.28125
| 0.76826
| 0.117572
| 0
| 0.096774
| 0
| 0
| 0.009794
| 0.003854
| 0
| 0
| 0.005138
| 0
| 0
| 1
| 0.016129
| false
| 0
| 0.040323
| 0
| 0.072581
| 0.008065
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26c8199913901f96201fe9b8091ee36c1351a53e
| 347
|
py
|
Python
|
examples/prompt.py
|
nelice/bullet
|
aafec4d0ca8f628d2be9b0667c50477929c2cca7
|
[
"MIT"
] | 1
|
2021-03-22T07:55:30.000Z
|
2021-03-22T07:55:30.000Z
|
examples/prompt.py
|
nelice/bullet
|
aafec4d0ca8f628d2be9b0667c50477929c2cca7
|
[
"MIT"
] | null | null | null |
examples/prompt.py
|
nelice/bullet
|
aafec4d0ca8f628d2be9b0667c50477929c2cca7
|
[
"MIT"
] | null | null | null |
from bullet import Bullet, Prompt, Check, Input, YesNo
from bullet import styles
cli = Prompt(
[
Bullet("Choose from a list: ", **styles.Example),
Check("Choose from a list: ", **styles.Example),
Input("Who are you? "),
YesNo("Are you a student? ")
],
spacing = 2
)
result = cli.launch()
print(result)
| 23.133333
| 57
| 0.599424
| 44
| 347
| 4.727273
| 0.5
| 0.096154
| 0.153846
| 0.144231
| 0.269231
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0.003906
| 0.262248
| 347
| 15
| 58
| 23.133333
| 0.808594
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26cacd8b2394e2ededf66d1f7ced4b0560e95348
| 594
|
py
|
Python
|
src/volume_0/0011_Drawing_Lots.py
|
DaikiShimada/aoj-exercise
|
dd4b70d4fd64aa28bc4cc75f5cdb8d02ea796803
|
[
"MIT"
] | null | null | null |
src/volume_0/0011_Drawing_Lots.py
|
DaikiShimada/aoj-exercise
|
dd4b70d4fd64aa28bc4cc75f5cdb8d02ea796803
|
[
"MIT"
] | null | null | null |
src/volume_0/0011_Drawing_Lots.py
|
DaikiShimada/aoj-exercise
|
dd4b70d4fd64aa28bc4cc75f5cdb8d02ea796803
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
def amida(w, side_bar):
result = []
side_bar.reverse()
for x in range(1, w+1):
status = x
for bar in side_bar:
if status == bar[0]:
status = bar[1]
elif status == bar[1]:
status = bar[0]
result.append(status)
return result
def main():
W = int(input())
N = int(input())
side_bar = [tuple(map(int, input().split(','))) for line in range(N)]
result = amida(W, side_bar)
for r in result:
print(r)
if __name__ == '__main__':
main()
| 21.214286
| 73
| 0.503367
| 82
| 594
| 3.487805
| 0.414634
| 0.122378
| 0.06993
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017995
| 0.345118
| 594
| 27
| 74
| 22
| 0.717224
| 0.035354
| 0
| 0
| 0
| 0
| 0.015762
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.045455
| 0
| 0.181818
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26cbd6df4059d6dbdf0c29f052b92ccdc1a7a881
| 1,832
|
py
|
Python
|
mglg/util/profiler.py
|
aforren1/mglg
|
a9b703e109a66377dd404929fc0b13ccc12b5214
|
[
"MIT"
] | null | null | null |
mglg/util/profiler.py
|
aforren1/mglg
|
a9b703e109a66377dd404929fc0b13ccc12b5214
|
[
"MIT"
] | 9
|
2019-08-05T21:11:09.000Z
|
2021-11-18T18:19:33.000Z
|
mglg/util/profiler.py
|
aforren1/mglg
|
a9b703e109a66377dd404929fc0b13ccc12b5214
|
[
"MIT"
] | null | null | null |
from timeit import default_timer
import numpy as np
class Profiler:
__slots__ = ('active', 'gpuquery', 't0',
'cpubuffer', 'gpubuffer', 'counter',
'_size', 'worst_cpu', 'worst_gpu')
def __init__(self, gpu=False, ctx=None, buffer_size=200):
self.active = False
self.gpuquery = None
if gpu and ctx is not None:
self.gpuquery = ctx.query(time=True)
self.cpubuffer = np.zeros(buffer_size, dtype='f4')
self.gpubuffer = np.zeros(buffer_size, dtype='f4')
self._size = buffer_size
self.counter = 0
self.worst_cpu = 0
self.worst_gpu = 0
def begin(self):
if self.active:
if self.gpuquery:
self.gpuquery.mglo.begin()
self.t0 = default_timer()
def end(self):
t1 = default_timer()
if self.active:
if self.gpuquery:
self.gpuquery.mglo.end()
if self.counter < self._size:
self.worst_gpu = 0
self.worst_cpu = 0
cpu_time = (t1 - self.t0) * 1000 # ms
self.cpubuffer[self.counter % self._size] = cpu_time
self.worst_cpu = cpu_time if cpu_time > self.worst_cpu else self.worst_cpu
if self.gpuquery:
gpu_time = self.gpuquery.elapsed/1000000.0 # ms
self.gpubuffer[self.counter % self._size] = gpu_time
self.worst_gpu = gpu_time if gpu_time > self.worst_gpu else self.worst_gpu
self.counter += 1
def reset(self):
self.cpubuffer[:] = 0
self.gpubuffer[:] = 0
self.counter = 0
self.worst_cpu = 0
self.worst_gpu = 0
def __enter__(self):
self.begin()
return self
def __exit__(self, *args):
self.end()
| 31.586207
| 90
| 0.555131
| 229
| 1,832
| 4.222707
| 0.240175
| 0.111686
| 0.074457
| 0.040331
| 0.322647
| 0.229576
| 0.229576
| 0.171665
| 0.171665
| 0.084798
| 0
| 0.027341
| 0.341157
| 1,832
| 57
| 91
| 32.140351
| 0.773819
| 0.002729
| 0
| 0.265306
| 0
| 0
| 0.037281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122449
| false
| 0
| 0.040816
| 0
| 0.22449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26cf29a0e44e798901be0b42a84cea83caaf14fe
| 364
|
py
|
Python
|
plugins/rain.py
|
xditya/PikaBotPlugins
|
2c5c52716158cd8964220bcc71fa383ccaf1210a
|
[
"Apache-2.0"
] | 2
|
2021-02-16T05:35:41.000Z
|
2021-05-25T16:59:47.000Z
|
plugins/rain.py
|
xditya/PikaBotPlugins
|
2c5c52716158cd8964220bcc71fa383ccaf1210a
|
[
"Apache-2.0"
] | null | null | null |
plugins/rain.py
|
xditya/PikaBotPlugins
|
2c5c52716158cd8964220bcc71fa383ccaf1210a
|
[
"Apache-2.0"
] | 2
|
2021-02-07T03:09:40.000Z
|
2021-05-25T16:59:59.000Z
|
#Originally created By KingMars ✅ Rain Sequence 2 {Updated}
from telethon import events
import asyncio
from collections import deque
@ItzSjDude(outgoing=True, pattern=r"km_rain2")
async def _(event):
if event.fwd_from:
return
deq = deque(list("☁️⛈Ř/~\İŇ🌬⚡🌪"))
for _ in range(100):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
| 22.75
| 59
| 0.717033
| 60
| 364
| 4.416667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 0.142857
| 364
| 15
| 60
| 24.266667
| 0.798077
| 0.159341
| 0
| 0
| 0
| 0
| 0.065574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26cfb507f5245413925f5d6ffbbfcea4aa484298
| 6,126
|
py
|
Python
|
plot.py
|
lizzieayton/PrimordialOozebot
|
1e330b1ac6f27bd167734ad6c6ecff70f816986a
|
[
"MIT"
] | null | null | null |
plot.py
|
lizzieayton/PrimordialOozebot
|
1e330b1ac6f27bd167734ad6c6ecff70f816986a
|
[
"MIT"
] | null | null | null |
plot.py
|
lizzieayton/PrimordialOozebot
|
1e330b1ac6f27bd167734ad6c6ecff70f816986a
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import csv
import statistics
import math
plt.title('Population Diversity')
plt.ylabel('Diversity Score')
plt.xlabel('Iteration Number')
random = []
randombars = []
rmin = []
rmax = []
hill = []
hillbars = []
hmin = []
hmax = []
evo = []
emin = []
emax = []
evobars = []
cross = []
crossbars = []
cmin = []
cmax = []
numRuns = 5
numIterations = 100000000
sqrtRuns = math.sqrt(numRuns)
iterationDataRandom = []
iterationDataHill = []
iterationDataEvo = []
iterationDataCross = []
indicesToPlot = [10, 15, 20, 25]
index = 60
while indicesToPlot[-1] < numIterations:
indicesToPlot.append(index)
index = int(index * 1.02)
indicesToPlot[-1] = numIterations - 1
#xtiks = []
#for i in range(10):
# xtiks.append(int(numIterations / 5 * i))
#plt.xticks(xtiks)
for i in range(1, numRuns + 1):
iterationDataRandom.append({})
iterationDataHill.append({})
iterationDataEvo.append({})
iterationDataCross.append({})
with open('rand' + str(i) + '.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
index = 0
for row in reversed(list(reader)):
vals = row[0].split(',')
iteration = int(vals[0])
val = float(vals[1])
while index < len(indicesToPlot) - 1 and indicesToPlot[index + 1] < iteration:
index += 1
iterationDataRandom[-1][indicesToPlot[index]] = val
with open('hill' + str(i) + '.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
index = 0
for row in reversed(list(reader)):
vals = row[0].split(',')
iteration = int(vals[0])
val = float(vals[2])
while index < len(indicesToPlot) - 1 and indicesToPlot[index] < iteration:
index += 1
iterationDataHill[-1][indicesToPlot[index]] = val
with open('evo' + str(i) + '.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
index = 0
for row in reversed(list(reader)):
vals = row[0].split(',')
iteration = int(vals[0]) * 100
val = float(vals[2])
while index < len(indicesToPlot) - 1 and indicesToPlot[index] < iteration:
index += 1
iterationDataEvo[-1][indicesToPlot[index]] = val
with open('ed' + str(i) + '.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
index = 0
for row in reversed(list(reader)):
vals = row[0].split(',')
iteration = int(vals[0])
val = float(vals[2])
while index < len(indicesToPlot) - 1 and indicesToPlot[index] < iteration:
index += 1
iterationDataCross[-1][indicesToPlot[index]] = val
print("Done reading data")
unifiedRandom = []
unifiedHill = []
unifiedEvo = []
unifiedCross = []
index = 0
for iteration in indicesToPlot:
currentRandom = []
currentHill = []
currentEvo = []
currentCross = []
unifiedRandom.append(currentRandom)
unifiedHill.append(currentHill)
unifiedEvo.append(currentEvo)
unifiedCross.append(currentCross)
for run in range(numRuns):
valRandom = -1
if iteration in iterationDataRandom[run]:
valRandom = iterationDataRandom[run][iteration]
else:
# unchanged
valRandom = unifiedRandom[-2][run]
currentRandom.append(valRandom)
valHill = -1
if iteration in iterationDataHill[run]:
valHill = iterationDataHill[run][iteration]
else:
# unchanged
valHill = unifiedHill[-2][run]
currentHill.append(valHill)
valEvo = -1
if iteration in iterationDataEvo[run]:
valEvo = iterationDataEvo[run][iteration]
else:
#unchanged
valEvo = unifiedEvo[-2][run]
currentEvo.append(valEvo)
valCross = -1
if iteration in iterationDataCross[run]:
valCross = iterationDataCross[run][iteration]
else:
#unchanged
valCross = unifiedCross[-2][run]
currentCross.append(valCross)
randomAverage = statistics.mean(currentRandom)
randomError = statistics.stdev(currentRandom) / sqrtRuns
random.append(randomAverage)
randombars.append(randomError)
hillAverage = statistics.mean(currentHill)
hillError = statistics.stdev(currentHill) / sqrtRuns
hill.append(hillAverage)
hillbars.append(hillError)
evoAverage = statistics.mean(currentEvo)
evoError = statistics.stdev(currentEvo) / sqrtRuns
evo.append(evoAverage)
evobars.append(evoError)
crossAverage = statistics.mean(currentCross)
crossError = statistics.stdev(currentCross) / sqrtRuns
cross.append(crossAverage)
crossbars.append(crossError)
for i in range(len(random)):
rmin.append(random[i] - randombars[i])
rmax.append(random[i] + randombars[i])
hmin.append(hill[i] - hillbars[i])
hmax.append(hill[i] + hillbars[i])
emin.append(evo[i] - evobars[i])
emax.append(evo[i] + evobars[i])
cmin.append(cross[i] - crossbars[i])
cmax.append(cross[i] + crossbars[i])
print("Done processing data")
plt.xscale('log')
#plt.yscale('log')
#plt.plot(indicesToPlot, random, color='blue', linewidth=1, label='Random Search')
plt.plot(indicesToPlot, hill, color='green', linewidth=1, label='Parallel Hill Climb')
plt.plot(indicesToPlot, evo, color='red', linewidth=1, label='Weighted Selection')
plt.plot(indicesToPlot, cross, color='blue', linewidth=1, label='Parental Replacement')
plt.fill_between(indicesToPlot, hmin, hmax, facecolor='green', lw=0, alpha=0.5)
plt.fill_between(indicesToPlot, emin, emax, facecolor='red', lw=0, alpha=0.5)
plt.fill_between(indicesToPlot, cmin, cmax, facecolor='blue', lw=0, alpha=0.5)
#plt.fill_between(indicesToPlot, rmin, rmax, facecolor='blue', lw=0, alpha=0.5)
plt.legend(loc='best')
plt.savefig('diversityp.png', dpi=500)
plt.show()
| 30.939394
| 90
| 0.623572
| 658
| 6,126
| 5.799392
| 0.218845
| 0.037736
| 0.011792
| 0.014675
| 0.334382
| 0.269392
| 0.245807
| 0.245807
| 0.223008
| 0.19392
| 0
| 0.019503
| 0.238328
| 6,126
| 197
| 91
| 31.096447
| 0.798328
| 0.049625
| 0
| 0.216561
| 0
| 0
| 0.039752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025478
| 0
| 0.025478
| 0.012739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26cfea22c43edc42786c9199d503d77927f66e4d
| 1,918
|
py
|
Python
|
python/obra_hacks/backend/commands.py
|
brandond/obra-hacks
|
df451c6c6cd78b48f6e32bbd102a8e8a6bd77cb3
|
[
"Apache-2.0"
] | null | null | null |
python/obra_hacks/backend/commands.py
|
brandond/obra-hacks
|
df451c6c6cd78b48f6e32bbd102a8e8a6bd77cb3
|
[
"Apache-2.0"
] | null | null | null |
python/obra_hacks/backend/commands.py
|
brandond/obra-hacks
|
df451c6c6cd78b48f6e32bbd102a8e8a6bd77cb3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from datetime import date
import click
from .data import DISCIPLINE_MAP
from .outputs import OUTPUT_MAP
@click.command()
@click.option('--discipline', type=click.Choice(DISCIPLINE_MAP.keys()), required=True)
@click.option('--output', type=click.Choice(sorted(OUTPUT_MAP.keys())), default='text')
@click.option('--scrape/--no-scrape', default=True)
@click.option('--debug/--no-debug', default=False)
def cli(discipline, output, scrape, debug):
log_level = 'DEBUG' if debug else 'INFO'
logging.basicConfig(level=log_level, format='%(levelname)s:%(module)s.%(funcName)s:%(message)s')
# Import these after setting up logging otherwise we don't get logs
from .scrapers import clean_events, scrape_year, scrape_new, scrape_parents, scrape_recent
from .upgrades import confirm_pending_upgrades, recalculate_points, print_points, sum_points
from .rankings import calculate_race_ranks
from .models import db
with db.atomic('IMMEDIATE'):
if scrape:
# Scrape last 5 years of results
cur_year = date.today().year
for year in range(cur_year - 6, cur_year + 1):
scrape_year(year, discipline)
scrape_parents(year, discipline)
clean_events(year, discipline)
# Load in anything new
scrape_new(discipline)
# Check for updates to anything touched in the last three days
scrape_recent(discipline, 3)
# Calculate points from new data
if recalculate_points(discipline, incremental=False):
calculate_race_ranks(discipline, incremental=False)
sum_points(discipline)
confirm_pending_upgrades(discipline)
# Finally, output data
print_points(discipline, output)
if __name__ == '__main__':
cli()
| 34.25
| 100
| 0.684567
| 239
| 1,918
| 5.301255
| 0.451883
| 0.034728
| 0.023678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003313
| 0.213243
| 1,918
| 55
| 101
| 34.872727
| 0.836315
| 0.142336
| 0
| 0
| 0
| 0
| 0.08369
| 0.029933
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.294118
| 0
| 0.323529
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26d2a8925926b05405485ed3b4fa01550942c26f
| 657
|
py
|
Python
|
join_json.py
|
ryavorsky/med_robo
|
56f8d2067921ef7208166380e50af0600c10032a
|
[
"CC0-1.0"
] | null | null | null |
join_json.py
|
ryavorsky/med_robo
|
56f8d2067921ef7208166380e50af0600c10032a
|
[
"CC0-1.0"
] | null | null | null |
join_json.py
|
ryavorsky/med_robo
|
56f8d2067921ef7208166380e50af0600c10032a
|
[
"CC0-1.0"
] | null | null | null |
import json
with open('bibliography.json', 'r', encoding='utf-8') as bib_data:
bib = sorted(json.load(bib_data), key=lambda d: d['ID'])
with open('abstracts.json', 'r', encoding='utf-8') as tex_data:
tex = sorted(json.load(tex_data), key=lambda d: d['ID'])
ID1 = [b['ID'] for b in bib]
ID2 = [t['ID'] for t in tex]
for i in range(len(ID1)):
bib[i]['reference'] = tex[i]['title']
bib[i]['abstract'] = tex[i]['abstract']
print('Done')
with open('med_robo_papers.json', 'w', encoding='utf-8') as res_file:
res_file.write(json.dumps(bib, indent=4, ensure_ascii=False, sort_keys=True))
res_file.close()
| 28.565217
| 82
| 0.614916
| 110
| 657
| 3.572727
| 0.463636
| 0.061069
| 0.091603
| 0.10687
| 0.183206
| 0.183206
| 0
| 0
| 0
| 0
| 0
| 0.01306
| 0.18417
| 657
| 22
| 83
| 29.863636
| 0.720149
| 0
| 0
| 0
| 0
| 0
| 0.175079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26d8d630adbf36e69e2b1f614c164c0bdbf94301
| 7,563
|
py
|
Python
|
pizzerias/pizzerias_search.py
|
LiushaHe0317/pizzerias_block_search
|
16dd7fb20b1a29a4f16b28ac7e5a84b30f7f9a7b
|
[
"MIT"
] | null | null | null |
pizzerias/pizzerias_search.py
|
LiushaHe0317/pizzerias_block_search
|
16dd7fb20b1a29a4f16b28ac7e5a84b30f7f9a7b
|
[
"MIT"
] | null | null | null |
pizzerias/pizzerias_search.py
|
LiushaHe0317/pizzerias_block_search
|
16dd7fb20b1a29a4f16b28ac7e5a84b30f7f9a7b
|
[
"MIT"
] | null | null | null |
from typing import Sequence
import numpy
class PizzeriasSearcher:
"""
This object takes the size of the city and number of shops, and construct the matrices each shop delivery can cover
and number of delivery for each cell in the city. It can also computes number of delivery for a given cell,
maximum of number of delivery, and a sequence of cell coordinates which have the maximum.
:param n_of_block: An integer which indicates the size of the city.
:param shop_covers: A sequence of sequences, each sequence contains a tuple of two integers representing the
coordinate of a pizzerias shop and an integer representing the distance the shop could cover.
"""
def __init__(self, n_of_block: int, shop_covers: Sequence):
self.n_of_block = n_of_block
self.shop_covers = shop_covers
def each_shop_matrix(self, shop_loc: Sequence):
"""
This method takes the location of a shop and dimensionality of the city, converts to a 2D ``numpy.ndarray``
which indicates the whole area a pizzerias shop delivery service can cover.
:param shop_loc: A sequence containing a tuple of two integers which indicate the coordinates on x- and y- axis and
an integer which indicates the farthest distance a delivery guy can go.
:return: A 2D ``numpy.ndarray``.
"""
(x_initial, y_initial), r = shop_loc
matrix = numpy.zeros([self.n_of_block, self.n_of_block])
# convert x, y coordinates
x_center = x_initial - 1 # in numpy, x axis = 1
y_center = self.n_of_block - y_initial # in numpy, y axis = 0
# create a list of x or y coordinate which indicates the cells the shop could cover
x_list = [x for x in range(x_center-r, x_center+r+1) if x >= 0 and x < self.n_of_block]
# y_list = [y for y in range(y_center-r, y_center+r+1) if y >= 0 and y <= n_of_block-1]
for d1 in x_list:
high_bound = y_center + r - numpy.abs(d1 - x_center) + 1
low_bound = y_center - r + numpy.abs(d1 - x_center)
matrix[low_bound:high_bound, d1] = 1
return matrix
def area_matrix(self, loc: Sequence, radius: int):
"""
This method takes a tuple of coordinates and a radius, construct a sub-matrix of the city matrix accordingly.
:param loc: A tuple of integers.
:param radius: An integer.
:return: A 2D ``numpy.ndarray``.
"""
x_initial, y_initial = loc
if y_initial < 0 or x_initial > self.n_of_block or x_initial < 0 or y_initial > self.n_of_block:
raise ValueError('The location is out of city range.')
else:
y_center = self.n_of_block - y_initial
x_center = x_initial - 1
low0 = y_center - radius if y_center - radius >= 0 else 0
high0 = y_center + radius + 1 if y_center + radius + 1 <= self.n_of_block else self.n_of_block
left1 = x_center - radius if x_center - radius >= 0 else 0
right1 = x_center + radius + 1 if x_center + radius + 1 <= self.n_of_block else self.n_of_block
return self.pizzerias_matrix[low0: high0, left1: right1]
def maximum_in_matrix(self, matrix=None):
"""
This method returns the maximum a city block could have.
:param matrix: A ``numpy.ndarray``.
:return: An integer.
"""
if isinstance(matrix, numpy.ndarray):
return int(numpy.amax(matrix))
elif matrix is None:
return int(numpy.amax(self.pizzerias_matrix))
else:
raise Exception('Accept numpy.ndarray only!')
def max_locations(self, matrix=None, d0_start=0, d1_start=0):
"""
This method returns a set of cells which have maximum.
:param matrix: A ``numpy.ndarray`.
:param d0_start: An integer.
:param d1_start: An integer.
:return: A set of tuples.
"""
if matrix is None:
d0, d1 = numpy.where(self.pizzerias_matrix == numpy.amax(self.pizzerias_matrix))
return {(x + 1, self.n_of_block - d0[i]) for i, x in enumerate(d1)}
elif isinstance(matrix, numpy.ndarray):
d0, d1 = numpy.where(matrix == numpy.amax(matrix))
return {(x + 1 + d1_start, self.n_of_block - (d0[i] + d0_start)) for i, x in enumerate(d1)}
else:
raise Exception('Accept numpy.ndarray only!')
@property
def no_of_pizzeriass(self):
"""
This method returns the total number of shops in the city.
"""
return len(self.shop_covers)
@property
def pizzerias_matrix(self):
"""
This method returns a matrix indicating the whole picture of pizzerias delivery services.
"""
p_matrix = numpy.zeros([self.n_of_block, self.n_of_block])
for shop_loc in self.shop_covers:
p_matrix += self.each_shop_matrix(shop_loc)
return p_matrix
def check_location(self, home_loc: Sequence, report=False):
"""
This method takes a tuple of two integers which indicate the coordinate of a given home location.
:param home_loc: A tuple of integers.
:return: number of delivery in the current location.
"""
num = self.pizzerias_matrix[self.n_of_block - home_loc[1], home_loc[0] - 1]
if report:
if num == 0:
print("Unfortunately, there is no delivery service in your current location.")
else:
print(f'Cool, {int(num)} pizzerias could cover your current location.')
return num
def check_area(self, loc: Sequence, radius: int, report=False):
"""
This method takes a location coordinate and a radius and search the delivery services around this specified area.
:param loc: A tuple of integers.
:param radius: An integer.
:param report: A boolean that indicates whether or not print a report.
return:
- A sub-matrix of the pizzerias matrix which is created in terms of specified range.
- A maximum in this area.
- A set of cells that have maximum.
"""
matrix = self.area_matrix(loc, radius)
x_initial, y_initial = loc
y_center = self.n_of_block - y_initial
x_center = x_initial - 1
low0 = y_center - radius if y_center - radius >= 0 else 0
left1 = x_center - radius if x_center - radius >= 0 else 0
maximum = self.maximum_in_matrix(matrix)
max_set = self.max_locations(matrix=matrix, d0_start=low0, d1_start=left1)
if report:
print(f"In the given area, there are {len(max_set)} areas where {maximum} Pizzerias delivery service "
f"can cover, they are: ", max_set)
return matrix, maximum, max_set
def check_city(self, report=False):
"""
This method returns the matrix, the maximum and a set of maximum tuple of cells.
:param report: A boolean indicating whether or not print report.
:return:
- The pizzerias matrix.
- A maximum in this the pizzerias matrix.
- A set of cells that have maximum.
"""
if report:
print(f"There are {len(self.max_locations())} area(s) where {self.maximum_in_matrix()} Pizzerias can cover, "
f"they are: ", self.max_locations())
return self.pizzerias_matrix, self.maximum_in_matrix(), self.max_locations()
| 42.728814
| 123
| 0.627793
| 1,092
| 7,563
| 4.203297
| 0.14652
| 0.014379
| 0.038344
| 0.049673
| 0.301961
| 0.216776
| 0.185839
| 0.157081
| 0.135948
| 0.106754
| 0
| 0.012885
| 0.291948
| 7,563
| 177
| 124
| 42.728814
| 0.844258
| 0.365331
| 0
| 0.2875
| 0
| 0.0125
| 0.101429
| 0.012218
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.025
| 0
| 0.3
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26da85c2640497939b911d5705595d7671906491
| 1,158
|
py
|
Python
|
tests/test_stats.py
|
janjaappape/pastas
|
521b27efd921e240df0717038f8389d62099b8ff
|
[
"MIT"
] | 252
|
2017-01-25T05:48:53.000Z
|
2022-03-31T17:46:37.000Z
|
tests/test_stats.py
|
janjaappape/pastas
|
521b27efd921e240df0717038f8389d62099b8ff
|
[
"MIT"
] | 279
|
2017-02-14T10:59:01.000Z
|
2022-03-31T09:17:37.000Z
|
tests/test_stats.py
|
janjaappape/pastas
|
521b27efd921e240df0717038f8389d62099b8ff
|
[
"MIT"
] | 57
|
2017-02-14T10:26:54.000Z
|
2022-03-11T14:04:48.000Z
|
import numpy as np
import pandas as pd
import pastas as ps
def acf_func(**kwargs):
index = pd.to_datetime(np.arange(0, 100, 1), unit="D", origin="2000")
data = np.sin(np.linspace(0, 10 * np.pi, 100))
r = pd.Series(data=data, index=index)
acf_true = np.cos(np.linspace(0.0, np.pi, 11))[1:]
acf = ps.stats.acf(r, lags=np.arange(1.0, 11.), min_obs=1, **kwargs).values
return acf, acf_true
def test_acf_rectangle():
acf, acf_true = acf_func(bin_method="rectangle")
assert abs((acf - acf_true)).max() < 0.05
def test_acf_gaussian():
acf, acf_true = acf_func(bin_method="gaussian")
assert abs((acf - acf_true)).max() < 0.05
def test_runs_test():
"""
http://www.itl.nist.gov/div898/handbook/eda/section3/eda35d.htm
True Z-statistic = 2.69
Read NIST test data
"""
data = pd.read_csv("tests/data/nist.csv")
test, _ = ps.stats.runs_test(data)
assert test[0] - 2.69 < 0.02
def test_stoffer_toloi():
res = pd.Series(index=pd.date_range(start=0, periods=1000, freq="D"),
data=np.random.rand(1000))
_, pval = ps.stats.stoffer_toloi(res)
assert pval > 1e-10
| 27.571429
| 79
| 0.638169
| 195
| 1,158
| 3.651282
| 0.415385
| 0.058989
| 0.070225
| 0.036517
| 0.162921
| 0.162921
| 0.162921
| 0.089888
| 0.089888
| 0.089888
| 0
| 0.063441
| 0.196891
| 1,158
| 41
| 80
| 28.243902
| 0.702151
| 0.092401
| 0
| 0.08
| 0
| 0
| 0.040896
| 0
| 0
| 0
| 0
| 0
| 0.16
| 1
| 0.2
| false
| 0
| 0.12
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26db23f57ee2cf9c420d9e5404d2b60d7671991a
| 320
|
py
|
Python
|
venv/lib64/python3.8/site-packages/tld/registry.py
|
nrfkhira/dnx-engine
|
99a326d83058bcfe54a0f455672d90637fe753c6
|
[
"MIT"
] | null | null | null |
venv/lib64/python3.8/site-packages/tld/registry.py
|
nrfkhira/dnx-engine
|
99a326d83058bcfe54a0f455672d90637fe753c6
|
[
"MIT"
] | null | null | null |
venv/lib64/python3.8/site-packages/tld/registry.py
|
nrfkhira/dnx-engine
|
99a326d83058bcfe54a0f455672d90637fe753c6
|
[
"MIT"
] | null | null | null |
import warnings
from .base import Registry
__author__ = "Artur Barseghyan"
__copyright__ = "2013-2021 Artur Barseghyan"
__license__ = "MPL-1.1 OR GPL-2.0-only OR LGPL-2.1-or-later"
__all__ = ("Registry",)
warnings.warn(
"The `Registry` class is moved from `tld.registry` to `tld.base`.",
DeprecationWarning,
)
| 24.615385
| 71
| 0.721875
| 45
| 320
| 4.777778
| 0.666667
| 0.139535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 0.146875
| 320
| 12
| 72
| 26.666667
| 0.736264
| 0
| 0
| 0
| 0
| 0.1
| 0.49375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26ddb52d2be72d7d4dbeca2609c7ac5ce525625e
| 2,091
|
py
|
Python
|
SingleIRdetection/get_data.py
|
biqute/QTLab2122
|
4d53d4c660bb5931615d8652e698f6d689a4dead
|
[
"MIT"
] | 3
|
2021-11-30T18:41:11.000Z
|
2021-12-12T12:27:14.000Z
|
SingleIRdetection/get_data.py
|
biqute/QTLab2122
|
4d53d4c660bb5931615d8652e698f6d689a4dead
|
[
"MIT"
] | null | null | null |
SingleIRdetection/get_data.py
|
biqute/QTLab2122
|
4d53d4c660bb5931615d8652e698f6d689a4dead
|
[
"MIT"
] | null | null | null |
from instruments import VNA_handler, Fridge_handler
import os
import time
from datetime import date, datetime
today = date.today()
d1 = today.strftime("_%d_%m")
directory = "data"+d1
dir_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),directory)
if not os.path.isdir(dir_path):
try:
os.mkdir(directory)
except:
pass
VNA_lab=VNA_handler()
Fridge=Fridge_handler()
temps=[]
freqs1=[]
freqs2=[]
r = Fridge.execute("C3")
file_log = open(directory + "\\log.txt", "w")
def log_sensori():
file_log.write(f"\n{datetime.now():%H:%M:%S}")
for i in range(0, 36):
file_log.write(f"\n\tsens({i}): {Fridge.get_T(i)}")
with open('temperatures_gap.txt', encoding='utf-8') as file:
for line in file:
line = line.replace('\n', '')
temps.append(int(line))
with open('frequency_ranges_gap_1.txt', encoding='utf-8') as file:
for line in file:
line = line.replace('\n', '')
splitted = [float(x) for x in line.split('\t')]
freqs1.append(splitted)
with open('frequency_ranges_gap_2.txt', encoding='utf-8') as file:
for line in file:
line = line.replace('\n', '')
splitted = [float(x) for x in line.split('\t')]
freqs2.append(splitted)
for T in temps:
try:
print("Set temp: " + str(T))
print(f"{datetime.now():%H:%M:%S}\tsens_1:{Fridge.get_T(1)}\tsens_2:{Fridge.get_T(2)}\tsens_3:{Fridge.get_T(3)}\tG1: {Fridge.get_T(14)}\tG2: {Fridge.get_T(15)}")
log_sensori()
time.sleep(10)
Fridge.wait_for_T(T)
if T >= 200:
freqs = freqs2
else:
freqs = freqs1
for idx,f in enumerate(freqs):
file_name=str(T)+'mK_range'+str(idx+1)+'.txt'
print("Set freqs: " + str(f[0]) + " - "+ str(f[1]))
VNA_lab.set_sweep_freq(f[0],f[1])
VNA_lab.inst.write('AVERREST;')
time.sleep(40)
VNA_lab.save_sweep_data(directory + '\\' + file_name, 'polar')
except:
pass
log_sensori()
Fridge.set_T(0)
log_sensori()
file_log.close()
| 27.155844
| 169
| 0.595887
| 317
| 2,091
| 3.772871
| 0.328076
| 0.045151
| 0.050167
| 0.037625
| 0.265886
| 0.175585
| 0.175585
| 0.175585
| 0.175585
| 0.175585
| 0
| 0.026038
| 0.228599
| 2,091
| 76
| 170
| 27.513158
| 0.715437
| 0
| 0
| 0.274194
| 0
| 0.016129
| 0.182209
| 0.10043
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0.032258
| 0.064516
| 0
| 0.080645
| 0.048387
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26ddc48f78a12f6195556b4fffb431166aa3a248
| 1,356
|
py
|
Python
|
repos.py
|
gigamonkey/git-utils
|
ac26ccab836b276fb7061167b4b2dc2a6bd87e66
|
[
"BSD-3-Clause"
] | null | null | null |
repos.py
|
gigamonkey/git-utils
|
ac26ccab836b276fb7061167b4b2dc2a6bd87e66
|
[
"BSD-3-Clause"
] | 1
|
2021-05-04T19:45:16.000Z
|
2021-05-04T19:45:16.000Z
|
repos.py
|
gigamonkey/git-utils
|
ac26ccab836b276fb7061167b4b2dc2a6bd87e66
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""
Get a json dump of all the repos belonging to a GitHub org or user.
"""
import json
import os
import sys
from functools import reduce
import requests
url = "https://api.github.com/graphql"
token = os.environ["GITHUB_TOKEN"]
headers = {"Authorization": "bearer {}".format(token)}
FIELDS = [
"name",
"description",
"sshUrl",
"isArchived",
"isFork",
"isPrivate",
"pushedAt",
]
def query(who, after):
args = f'first:100, after:"{after}"' if after else "first:100"
fields = " ".join(FIELDS)
return f'query {{ organization(login: "{who}") {{ repositories({args}) {{ edges {{ cursor node {{{fields} defaultBranchRef {{ name }} }} }} }} }} }}'
def maybe_get(top, *path):
return reduce(lambda d, k: None if d is None else d.get(k), path, top)
def node(edge):
n = edge["node"]
return {
**{f: n.get(f) for f in FIELDS},
"defaultBranch": maybe_get(n, "defaultBranchRef", "name"),
}
if __name__ == "__main__":
who = sys.argv[1]
edges = True
after = None
while edges:
r = requests.post(url, json={"query": query(who, after)}, headers=headers)
edges = json.loads(r.text)["data"]["organization"]["repositories"]["edges"]
for e in edges:
print(json.dumps(node(e)))
after = edges[-1]["cursor"]
| 22.229508
| 153
| 0.597345
| 175
| 1,356
| 4.565714
| 0.508571
| 0.020025
| 0.032541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008604
| 0.228614
| 1,356
| 60
| 154
| 22.6
| 0.755258
| 0.065634
| 0
| 0
| 0
| 0.025641
| 0.303415
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.128205
| 0.025641
| 0.282051
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26de76c7a526dbcb257d0562f65b8f5f56302812
| 994
|
py
|
Python
|
tfLego/logger/BasicLogger.py
|
FrancescoSaverioZuppichini/tfLego
|
485653eff6d3b8c6677b600a4e0d3623c844749f
|
[
"MIT"
] | null | null | null |
tfLego/logger/BasicLogger.py
|
FrancescoSaverioZuppichini/tfLego
|
485653eff6d3b8c6677b600a4e0d3623c844749f
|
[
"MIT"
] | null | null | null |
tfLego/logger/BasicLogger.py
|
FrancescoSaverioZuppichini/tfLego
|
485653eff6d3b8c6677b600a4e0d3623c844749f
|
[
"MIT"
] | null | null | null |
class BasicLogger:
def __init__(self):
self.loss_history = []
self.accuracy_history = []
self.val_loss_history = []
self.val_accuracy_history = []
self.initialise()
def initialise(self):
self.total_loss = 0
self.total_accuracy = 0
self.current = 0
def log_batch(self, loss, outputs, accuracy, *args, **kwargs):
self.current += 1
self.total_loss += loss
self.total_accuracy += accuracy
def log_epoch(self, i, X, is_val=False, *args, **kwargs):
loss = self.total_loss / len(X)
accuracy = self.total_accuracy / len(X)
if(is_val):
self.val_loss_history.append(loss)
self.val_accuracy_history.append(accuracy)
else:
self.loss_history.append(loss)
self.accuracy_history.append(accuracy)
print('EPOCH: {0}. AVG Loss: {1:0.4f} Acc: {2:0.4f}'.format(i,loss, accuracy))
self.initialise()
| 23.116279
| 86
| 0.585513
| 122
| 994
| 4.557377
| 0.278689
| 0.097122
| 0.070144
| 0.064748
| 0.089928
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01567
| 0.293763
| 994
| 42
| 87
| 23.666667
| 0.776353
| 0
| 0
| 0.076923
| 0
| 0.038462
| 0.044355
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0
| 0
| 0.192308
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26e3cb56bf5c43ffe1ebc53ce33bf565445ae974
| 6,107
|
py
|
Python
|
FGMabiotic.py
|
tjscott214/long-term-conflict-with-1nFGM
|
1c701e83c71ebe21fbc1192ca3d523a000614819
|
[
"MIT"
] | 2
|
2019-09-13T13:46:33.000Z
|
2020-05-14T17:21:09.000Z
|
FGMabiotic.py
|
tjscott214/long-term-conflict-with-1nFGM
|
1c701e83c71ebe21fbc1192ca3d523a000614819
|
[
"MIT"
] | null | null | null |
FGMabiotic.py
|
tjscott214/long-term-conflict-with-1nFGM
|
1c701e83c71ebe21fbc1192ca3d523a000614819
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
### This program simulates Fisher's geometric model with abiotic change equal to fixations during conflict simulations (from FGMconflict.py) ###
### python3 FGMabiotic.py -help for input options ###
### Written by Trey J Scott 2018 ###
### python --version ###
### Python 3.5.2 :: Anaconda 4.2.0 (x86_64) ###
# Import programs
import random
import numpy as np
from scipy.spatial import distance as dist
from scipy.stats import norm
import scipy.stats as stats
import matplotlib.pyplot as plt
import pandas as pd
import argparse
import scipy.special as spc
from itertools import groupby
### FUNCTIONS ###
# Function to generate random mutations with a specified average size
def generate_random_vector():
if distribution == 'uniform':
radial = np.random.uniform(0,uni)
if distribution == 'chi':
radial = np.random.chisquare(n)
if distribution == 'exponential':
radial = np.random.exponential(expo)
if distribution == 'normal':
radial = abs(np.random.normal(0, sd_1d))
vector = np.array(radial * (-1)**random.randint(1,2))
return radial, vector
# Gaussian fitness function
def fitness_function(distance,d):
return np.exp(-(d*(distance**Q)))
# Calculates probability of fixation for new mutations
def calculate_u(new_distance, old_distance, N = 'infinite', denominator = 0.5):
fitness_new = fitness_function(new_distance, denominator)
fitness_old = fitness_function(old_distance, denominator)
s_coefficient = (fitness_new/fitness_old) - 1
if N == 'infinite':
probability_of_fixation = (1 - np.exp(-2*s_coefficient))
elif N > 0:
probability_of_fixation = ((1 - np.exp(-2*s_coefficient))/(1 - np.exp(-4*s_coefficient*N)))
return probability_of_fixation, s_coefficient
# Functon that simulates adaptation to a moving optimum with Fisher's geometric model
def abiotic_change(position, optimum, mut_list, samp):
counter = 0
distance_to_optimum = dist.euclidean(position, optimum)
moving_optimum = optimum
for d in range(0,len(mut_list)):
moving_optimum = moving_optimum + (mut_list[d])*((-1)**(random.randint(1,2)))
distance_to_optimum = dist.euclidean(position, moving_optimum)
mutation_size, vector = generate_random_vector()
future_position = position + vector
new_dist_to_optimum = dist.euclidean(future_position, moving_optimum)
u, s = calculate_u(new_dist_to_optimum, distance_to_optimum, N_1,d1)
if random.random() <= u:
mutation_fitness = vector
position = future_position
distance_to_optimum = dist.euclidean(position, moving_optimum)
if counter >= burn_in:
output.write(str(d) + ',' + str(samp) + ',' + str(position[0]) + ',' + str(s) + ',' + str(mutation_size) + ',' + str(fitness_function(distance_to_optimum,d1)) + ',Abiotic Change,Fixed\n')
else:
if counter >= burn_in:
output.write(str(d) + ',' + str(samp) + ',' + str(position[0]) + ',' + str(s) + ',' + str(mutation_size)+ ',' + str(fitness_function(distance_to_optimum,d1)) + ',Abiotic Change,Unfixed\n')
counter += 1
# Runs simulations multiple times
def run_simulations(position, num_samples):
df = pd.read_csv(shake_file)
optimum = np.array([(-(1/d1)*np.log(r))**(1/Q)])
master_mut_list = df.groupby('Population')['Mutation'].apply(list)[1]
index = 0
for sample in range(num_samples):
mut_list = master_mut_list[index:index + m]
abiotic_change(position, optimum, mut_list, sample)
index += m
output.close()
### SET ARGUMENTS
ap = argparse.ArgumentParser()
ap.add_argument('-x', '--samples', help = 'number of resamples', type = int)
ap.add_argument('-p', '--population_size1', help = 'population size for one population', type = int)
ap.add_argument('-pp', '--population_size2', help = 'population size for second population', type = int)
ap.add_argument('-m', '--mutations', help = 'mutation distribution for mutation vectors')
ap.add_argument('-q', '--Q', help = 'changes Q parameter in fitness function', type = float)
ap.add_argument('-z', '--attempts', help = 'number of generations per walk', type = int)
ap.add_argument('-c', '--init_fit', help = 'changes the distance optimal values by a factor of the input value', type = float)
ap.add_argument('-r', '--rate', help = 'mutation rate for population 1', type = int)
ap.add_argument('-b', '--burn_in', help = 'define burn in period for equilibrium', type = int)
ap.add_argument('-a', '--ave_mut', help = 'average mutation norm', type = float)
ap.add_argument('-d', '--selection', help = 'Adjust strength of selection', type = float)
ap.add_argument('-mut', '--changes', help = 'mutation file for moving optimum', type = str)
args = ap.parse_args()
# get arguments
if args.samples:
samples = args.samples
else:
samples = 500
# Define initial position and optima
position1 = np.zeros(1)
position = position1
position2 = position1
if args.init_fit:
r = 1-args.init_fit
else:
r = 1-0.2
# Set average norm size for mutations
if args.ave_mut:
average_mutation = args.ave_mut
else:
average_mutation = 0.1
# Get population sizes
# Population 1
if args.population_size1:
N_1 = 10**(args.population_size1)
else:
N_1 = 'infinite'
# Population 2
if args.population_size2:
N_2 = 10**(args.population_size2)
else:
N_2 = 'infinite'
# Get distributions
# Mutation distribution (default is uniform)
if args.mutations:
distribution = args.mutations
else:
distribution = 'normal'
# Number of mutations
if args.attempts:
m = args.attempts
else:
m = 50000
# Get mutation rate
if args.rate:
rate = args.rate
else:
rate = 1
# Calculate normalization factor (used in mutation function)
sd_1d = average_mutation*((np.pi)**(1/2))/(2**(1/2))
uni = 2*average_mutation
expo = average_mutation
if args.burn_in:
burn_in = args.burn_in
else:
burn_in = 0
if args.Q:
Q = args.Q
q_string = 'Q_' + str(Q) + '_'
else:
Q = 2
q_string = ''
if args.selection:
d1 = args.selection
else:
d1 = 0.5
if args.changes:
shake_file = args.changes[:-7] + 'mut.csv'
# Open output file
output = open('abiotic_data.csv', 'w')
output.write('Iteration,Simulation,z,s,Mutation Size,Fitness,Population,Status\n')
### Run simulations
run_simulations(position, samples)
| 34.117318
| 192
| 0.7159
| 890
| 6,107
| 4.767416
| 0.252809
| 0.014141
| 0.036766
| 0.016969
| 0.185246
| 0.13811
| 0.098515
| 0.098515
| 0.074476
| 0.055621
| 0
| 0.018951
| 0.144588
| 6,107
| 178
| 193
| 34.308989
| 0.793262
| 0.150811
| 0
| 0.119403
| 0
| 0
| 0.155902
| 0.012683
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037313
| false
| 0
| 0.074627
| 0.007463
| 0.134328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26e5678c410804c82e1a66c1a1c30cc2e8b118d5
| 873
|
py
|
Python
|
epdif.py
|
cvasqxz/rpi-epd
|
b7921190dd84b1187364902f0e3059cba5a1973f
|
[
"MIT"
] | null | null | null |
epdif.py
|
cvasqxz/rpi-epd
|
b7921190dd84b1187364902f0e3059cba5a1973f
|
[
"MIT"
] | null | null | null |
epdif.py
|
cvasqxz/rpi-epd
|
b7921190dd84b1187364902f0e3059cba5a1973f
|
[
"MIT"
] | null | null | null |
import spidev
import RPi.GPIO as GPIO
import time
import yaml
with open("config.yml", 'r') as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
# Pin definition
RST_PIN = cfg['pinout']['RST_PIN']
DC_PIN = cfg['pinout']['DC_PIN']
CS_PIN = cfg['pinout']['CS_PIN']
BUSY_PIN = cfg['pinout']['BUSY_PIN']
# SPI device, bus = 0, device = 0
SPI = spidev.SpiDev(0, 0)
def epd_digital_write(pin, value):
GPIO.output(pin, value)
def epd_digital_read(pin):
return GPIO.input(BUSY_PIN)
def epd_delay_ms(delaytime):
time.sleep(delaytime / 1000.0)
def spi_transfer(data):
SPI.writebytes(data)
def epd_init():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(RST_PIN, GPIO.OUT)
GPIO.setup(DC_PIN, GPIO.OUT)
GPIO.setup(CS_PIN, GPIO.OUT)
GPIO.setup(BUSY_PIN, GPIO.IN)
SPI.max_speed_hz = 2000000
SPI.mode = 0b00
return 0;
| 21.292683
| 46
| 0.683849
| 143
| 873
| 4.013986
| 0.412587
| 0.041812
| 0.083624
| 0.073171
| 0.099303
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027624
| 0.170676
| 873
| 40
| 47
| 21.825
| 0.765193
| 0.052692
| 0
| 0
| 0
| 0
| 0.075243
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.137931
| 0.034483
| 0.37931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26e616bae86ed51b35013c799f67005f184552f2
| 2,469
|
py
|
Python
|
main.py
|
amankumarjsr/BinanceDataScrapper
|
e3d56c4bd274a8e472de1fbe1c9603c9e94e1d14
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
amankumarjsr/BinanceDataScrapper
|
e3d56c4bd274a8e472de1fbe1c9603c9e94e1d14
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
amankumarjsr/BinanceDataScrapper
|
e3d56c4bd274a8e472de1fbe1c9603c9e94e1d14
|
[
"Apache-2.0"
] | null | null | null |
from datetime import date
from unicodedata import name
from urllib import request
import requests
from bs4 import BeautifulSoup as bs
import pandas as pd
import datetime
import os
import zipfile
import glob
CoinName= input('Enter the coin name: ').upper()
duration= input('Enter the duration of data you want(1m,1h,2h): ').lower()
start_date= input ('Enter the date (dd-mm-yyyy): ')
end_date= input('Enter the end date (dd-mm-yyyy): ')
coin= requests.get('https://data.binance.vision/?prefix=data/spot/daily/klines/')
ucoin= bs(coin.content , 'html.parser')
start = datetime.datetime.strptime(start_date, "%d-%m-%Y")
end = datetime.datetime.strptime(end_date, "%d-%m-%Y")
date_generated = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days)]
date_list=[]
for date in date_generated:
x=date.strftime("%Y-%m-%d")
date_list.append(x)
file_name_list= []
cols=['opening time', 'opening price','highest price','lowest price','closing price','volume','closing time','turnover','number of transactions','active buy volume','NA','NAN']
for item in date_list:
try:
file_name=(f'{CoinName}-{duration}-{item}.zip')
download_mainurl= (f'https://data.binance.vision/data/spot/daily/klines/{CoinName}/{duration}/{CoinName}-{duration}-{item}.zip')
download= requests.get(download_mainurl, allow_redirects= True)
print(f'Scrapping data of {item} ')
with open(file_name, 'wb') as f:
f.write(download.content)
with zipfile.ZipFile(file_name, 'r') as zip_ref:
zip_ref.extractall('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data')
file_name_list.append(file_name+'.csv')
os.remove(file_name)
except:
print('skipped')
continue
master_df= pd.DataFrame()
for file in os.listdir('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data'):
if file.endswith('.csv'):
master_df= master_df.append(pd.read_csv('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data/'+file, names= cols))
master_df.to_csv(f'{CoinName}-{duration}-master file.csv', index=False)
for file in os.listdir('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data'):
if file.endswith('.csv'):
os.remove('C:/Users/rocka/Desktop/Practice python/Binance data scrapper/data/'+file)
print('Data Scrapped sucessfully!!!')
| 34.291667
| 177
| 0.681247
| 349
| 2,469
| 4.73639
| 0.355301
| 0.033878
| 0.033273
| 0.054446
| 0.253479
| 0.215971
| 0.215971
| 0.215971
| 0.215971
| 0.215971
| 0
| 0.002438
| 0.169299
| 2,469
| 71
| 178
| 34.774648
| 0.80351
| 0
| 0
| 0.081633
| 0
| 0.020408
| 0.389237
| 0.089695
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.204082
| 0
| 0.204082
| 0.061224
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26e61f306df9220c42f34738c067514777287317
| 19,370
|
py
|
Python
|
api/api.py
|
geoai-lab/GeoAnnotator
|
6d5ee22888571f5ffefdb1d2f2455eaa9e5054f3
|
[
"MIT"
] | 1
|
2022-02-14T20:43:41.000Z
|
2022-02-14T20:43:41.000Z
|
api/api.py
|
geoai-lab/GeoAnnotator
|
6d5ee22888571f5ffefdb1d2f2455eaa9e5054f3
|
[
"MIT"
] | null | null | null |
api/api.py
|
geoai-lab/GeoAnnotator
|
6d5ee22888571f5ffefdb1d2f2455eaa9e5054f3
|
[
"MIT"
] | null | null | null |
from flask import Flask, jsonify, request, session,redirect, url_for
import bcrypt
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import func
from sqlalchemy.exc import IntegrityError
import os
from sqlalchemy.orm import load_only
from flask_bcrypt import Bcrypt
import urllib.parse
from itertools import groupby
from operator import attrgetter
import json
from flask_cors import CORS, cross_origin
from flask_session import Session
import redis
from werkzeug.utils import secure_filename
from datetime import datetime, timedelta, timezone
from models import db, tweet_database, User, LoginForm, Project, Submission, CompareSubmission
from dotenv import load_dotenv
from flask_login import LoginManager, login_required, login_user, current_user, logout_user
from sqlalchemy.orm import sessionmaker
import pandas as pd
import requests
from sqlalchemy.types import String, DateTime
import io
load_dotenv()
app = Flask(__name__,static_folder="../build", static_url_path='/')#
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///HarveyTwitter.db"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config["SECRET_KEY"] = "6236413AA53537DE57D1F6931653B"
app.config['SQLALCHEMY_ECHO'] = True
app.config['SESSION_TYPE'] = "filesystem" # causes bugs right here this needs to be in redis soon need to download reddis and do some reddis cli stuff
app.config['SESSION_USE_SIGNER'] = True
#app.config['SESSION_COOKIE_NAME']
#app.config['SESSION_COOKIE_DOMAIN]
#app.config['SESSIO N_COOKies]
#app.config['SESSION_COOKIE_SECURE'] = True # add this to make the cookies invisible or something
bcrypt = Bcrypt(app) # this is encyrpt the app
CORS(app, supports_credentials=True)
server_session = Session(app)
db.__init__(app)
with app.app_context():
db.create_all()
login_manager = LoginManager()
login_manager.init_app(app)
with app.app_context():
# before intialization of the app, commands under here are ran first
# Replace with the commented when running the command gunicorn3 -w 3 GeoAnnotator.api:app
optionsData = jsonify(json.load(open('../../createProjectOptions.json'))) # 'GeoAnnotator/api/createProjectOptions.json'
configurationsData = json.load(open('../../configuration_data.json')) # 'GeoAnnotator/api/configuration_data.json'
@login_manager.user_loader
def load_user(user_id):
"""
Loads current user data
---
"""
return User.query.filter_by(id=user_id).first()
@app.route('/')
def index():
"""
Initialization of flask object
---
return:
returns an index.html object built by react's build file.
"""
return app.send_static_file("index.html")
@app.route("/@me", methods = ["GET"]) # might need to change
def get_current_user():
"""
User session data is retrieved through this callback.
---
GET:
description: Get session data
security:
- Session Token
responses:
200:
content:
User/json
"""
if not session["project_name"]:
return jsonify({"error": "did not select project"}), 401
if not current_user.is_authenticated:
return jsonify({"error": "Unauthorized"}), 401
return jsonify({
"id": str(current_user.id),
"email": current_user.email,
"username": current_user.username,
"projectName":session["project_name"]
}),200
@app.route("/login", methods=["POST"])
def login():
"""
Function that handles login of user
---
POST:
description: Add new user in the session
responses:
200:
description:
Successfuly log in user onto the session.
401:
description:
User entered wrong username/password that does not match any data on the database.
"""
loginform = LoginForm()
email = request.json["email"]
password = request.json["password"]
project_name = request.json["project"]
session["project_name"] = project_name
user = User.query.filter_by(email=loginform.email.data).first()
if user is None:
return jsonify({"error": "Wrong Email/Password"}), 401
if not bcrypt.check_password_hash(user.password, loginform.password.data):
return jsonify({"error": "Wrong Email/Password"}), 401
login_user(user)
return jsonify({
"id": str(user.id),
"email": user.email
}),200
@app.route("/logout", methods=["POST"])
@login_required
def logout():
"""
Function that handles logout of user
---
POST:
description: remove curent user in the session
responses:
200:
description:
Successfuly log out user from the session.
"""
logout_user() # flask logout library
return redirect("/", code=200) # successful log out will redirect to the homepage
@app.route("/createprojects", methods=["GET"])
@login_required
def create():
"""
Function that returns state geojson at the create projects page.
---
GET:
data: optionsData =>
responses:
200:
description:
Successfuly log out user from the session.
"""
return optionsData, 200
@app.route("/project+descriptions", methods=["GET"])
def project_descriptions():
"""
Function that returns data from the project database that are not deleted by the user.
---
GET:
responses:
200:
data:
{"project-name": <Project.project_name>, "geo_json":<Project.geo_json>}
"""
projects = Project.query.filter_by(isDeleted = 0).all()
print(projects)
list_of_projects = []
for project in projects:
list_of_projects.append({"project-name": project.project_name, "geo_json": project.geo_json})
return jsonify(list_of_projects), 200
@app.route("/createproject-submit", methods=["POST"])
@login_required
def createproject_submission():
"""
Creation of a new project
---
POST:
description: adds a new project item onto the Projects table of the database
responses:
200:
description:
new project added
409:
description:
* if the project name given already exists within the database
"""
projectName = request.json["Project Name"]
mapLayers = request.json["map-layers"]
project_exists = Project.query.filter_by(project_name = projectName).first() is not None
if(project_exists):
return jsonify({"error": "project already exists"}), 409
session['project_name'] = projectName
new_project = Project(project_name = projectName, geo_json = mapLayers, isDeleted = 0 )
db.session.add(new_project)
db.session.commit()
return jsonify({"success": "project created"}), 200
@app.route("/register", methods=["POST"])
def register_user():
"""
By registering a new user in the database, you may add new user data to the database.
---
POST:
description: Add new user in the database
responses:
200:
description:
new username and password are added onto the database.
409:
description:
* if the username used to register already exists in the database
* if the password entered and the password retyped do not match
"""
email = request.json["email"]
password = request.json["password"]
retype = request.json["retypepassword"]
username = request.json["username"]
user_exists = User.query.filter_by(email=email).first() is not None
if user_exists:
return jsonify({"error": "User already exists"}), 409
elif password != retype:
return jsonify({"error":"password do not match"}), 409
hashed_password = bcrypt.generate_password_hash(password)
new_user = User(email=email, username=username ,password=hashed_password)
db.session.add(new_user)
db.session.commit()
return jsonify({
"id": str(new_user.id),
"email": new_user.email
}), 200
@app.route('/comparison', methods =['GET'])
@login_required
def compare_data():
"""
Obtain information for the comparative page.
When the user who is the resolver requests data to compare,
this method must deliver data that the resolver has not resolved previously.
That would be the value of the notYet_submitted variable.
---
GET:
responses:
200:
data:
list of data that the resolver can compare and resolve
format:
{
text:<tweet_database.text>,
submission_id:<Submission.submission_id>,
annotation:<Submission.annotation>,
username:<Submission.username>,
projectGeojson:<Project.geo_json>,
tweetid:<tweet_database.id>,
userid:<Submission.userid>
}
where current_user=Submission.id values are not in current_user=CompareSubmission.id values
"""
project_name = session["project_name"]
to_send_data = []
alreadySubmitted_ids = [idvid for subid in CompareSubmission.query.filter_by(userid = current_user.id).options(load_only(CompareSubmission.submissionid_1, CompareSubmission.submissionid_2)).all() for idvid in [subid.submissionid_1,subid.submissionid_2]]
# need to change the tweet id here later on
# grab submissions you haven't looked at yet
notYet_submitted = Submission.query.filter_by(project_name= project_name).filter(Submission.submission_id.notin_(alreadySubmitted_ids)) \
.join(tweet_database, Submission.tweetid == tweet_database.id) \
.join(Project, Submission.project_name == project_name) \
.filter_by(project_name = project_name).add_columns(tweet_database.text, Submission.submission_id, Submission.annotation,Submission.username, Project.geo_json, tweet_database.id, Submission.userid)
df = pd.DataFrame(notYet_submitted, columns = ["SubmissionObject","text","submission_id","annotation","username","geo_json","id","userid"]).astype(str)
to_iterate =None # grab the first group of unique IDS
# an alternate to implementing the for loop below is by doing df.grouby('id',sort=False).first()
for name, group in df.groupby('id',sort=False):
to_iterate = group
break
for index,filtered_submission in to_iterate.iterrows(): # each group is a tweet set
to_send_data.append({"text": filtered_submission.text,
"submission_id": str(filtered_submission.submission_id),
"annotation": json.loads(filtered_submission.annotation)["annotation"],
"username":filtered_submission.username,
"projectGeojson": json.loads(filtered_submission.geo_json),
"tweetid":str(filtered_submission.id),
"userid":str(filtered_submission.userid)})
return jsonify(to_send_data), 200
@app.route('/api-grab/<tweetid>', methods=['GET'])
@login_required
def app_data(tweetid):
"""
Obtain information for the Annotation page page.
When the user who is the annotator requests data to annotate,
this method must deliver data that the annotatoer has not annotated previously.
---
@param:
tweetid: Grab the data in the database where Tweet_database.id == tweetid if this parameter exists.
---
GET:
responses:
200:
data:
data that the annotator can annotate
format:
{
id:<tweet_database.id>,
content:<tweet_data.text>,
neuro_result: Model rest api data,
project_description:{label:<Project.project_name>,geo_json:<Project.geo_json>}
}
409:
description:
* If the data from the Model prediction link did not yield any results (i.g. response from the UB servers are not 200)
* If there is no project in session
"""
submissions_exists = Submission.query.filter_by(userid = current_user.id) is not None
if(submissions_exists): # if User already annotated data before, find data that the user has not annotated before and return that
tweet_ids = [ids.tweetid for ids in Submission.query.filter_by(userid = current_user.id, project_name = session["project_name"]).options(load_only(Submission.tweetid)).all()]
tweets = tweet_database.query.filter_by(projectName = session["project_name"]).filter(tweet_database.id.notin_(tweet_ids)).first()
else: # It's the user's first time annotating, therefore pick the first tweet in the database
tweets = tweet_database.query.filter_by(projectName = session["project_name"]).first()
if(tweetid != 'any'):
tweets = tweet_database.query.filter_by(id = str(tweetid)).first()
content = tweets.text
project_name = session["project_name"]
if project_name: # if the session has a project, then query the project GeoJson
project_json = Project.query.filter_by(project_name = project_name).first()
else: # Since users must first register a project before signing in, this is extremely unlikely to occur.
return jsonify({"error": "No Project on session"}), 409
urlEncoded = urllib.parse.quote(tweets.text) #encode the text content of a tweet so that it may be converted into a url format
toRequestModel = "{}={}".format(configurationsData['modelLink'],urlEncoded) # Using the model url link from configuration.json, get a request using the URLencoded method.
response = requests.get(toRequestModel)
if response.status_code != 200:
# If the model url link does not return a response of 200, send a 409 since we do not have model prediction data.
# Cases of where the code fires here is when the servers at the University at Buffalo are down.
return jsonify({"error": "Rest Api Model unable to grab data"}), 409
neuro_results_json = response.json()['annotation'] # data from the response
toSend = {'id': str(tweets.id),
'content': content,
'neuro_result':neuro_results_json,
'project_description': {"label":project_json.project_name, "geo_json": json.loads(project_json.geo_json)}}
return jsonify(toSend), 200
@app.route('/uploadfile', methods=['POST'])
@login_required
def uploading_textFile():
"""
This method is related to the create project part,
since if a user submits twitter data, it must first
go via this method to be preprocessed and stored in the database.
---
POST:
responses:
200:
description:
The data from tweets has been successfully preprocessed and should now be available in the database.
401:
description:
* Preprocessing failed due to data format.
"""
try:
projectName = request.form['projectName'] #The name of the project on which the user wishes to upload new tweets
project_exists = Project.query.filter_by(project_name = projectName).first() is not None
if project_exists: # if the project name already exists, then tell the user
return jsonify({"error":"Project Name Already Exists"}), 401
file = request.files['file']
df = pd.read_json(file.stream.read().decode("UTF8"), lines=True, encoding="utf8")[['text','id','created_at']]
df['projectName'] = projectName
dtype={"text": String(),"id":String(), "created_at":DateTime(), "projectName":String()}
rowsAffected = df.to_sql(name = 'TwitterDataSet',con = db.engine, index = False, if_exists='append',dtype=dtype) # upload onto the database
except Exception as e: #If the entire procedure above fails, publish the line number where the error occurred.
print(
type(e).__name__, # TypeError
__file__, # /tmp/example.py
e.__traceback__.tb_lineno # 2
)
return jsonify({"error": "File Upload Fail"}), 401
return jsonify({"success": "Upload Complete"}), 200
@app.route('/deleteproject', methods=['POST'])
@login_required
def deleting_projects():
"""
This approach replaces the value on the isDeleted part of the Project column by one.
If we replace the column value with 1, we will not display the user this project since
they requested that it be removed.
---
POST:
responses:
200:
description:
Project data has successfuly been deleted/hidden fromn the user's view
"""
projects = request.json['projects'] # contains a list of projects that the user desires to get deleted
queried_projects = Project.query.filter(Project.project_name.in_(projects))
for query in queried_projects: # we replace the value with 1
query.isDeleted = 1
db.session.commit()
return jsonify({"success": "Upload Complete"}), 200
@app.route('/api/submit', methods=['POST'])
@login_required
def submission():
"""
This method handles the event when a user submits an annotation.
---
POST:
responses:
200:
description:
adds a new row value in the Submission table on the HarveyTwitter.db
"""
json_object = request.json
tweetid =json_object["tweetid"]
project = session["project_name"]
highlight = json_object["highlight"]
spatial_footprint = json_object["spatial-footprint"]
timestamp = json_object["timestamp"]
category = json_object["category"]
annotation = json.dumps({"annotation": {
"highlight": highlight ,
"spatial-footprint": spatial_footprint,
"category": category
}})
new_submission = Submission(userid = current_user.id, tweetid = tweetid, project_name = project,
timestamp = timestamp, annotation = annotation, username = current_user.username)
db.session.add(new_submission)
db.session.commit()
return jsonify("Success"), 200
@app.route('/compare/submit', methods=['POST'])
@login_required
def compare_submission():
"""
When a resolver submits a resolution from the compare submissions page, this method handles the event.
---
POST:
responses:
200:
description:
adds a new row value in the compare-submission table on the HarveyTwitter.db
"""
json_object = request.json
userId1 = json_object['submission-userid-1']
userId2 = json_object['submission-userid-2']
submissionid1 = json_object['submissionid-1']
submissionid2 = json_object['submissionid-2']
choosenId = json_object['choosing-correct-submission']
CurrentUserId = current_user.id
new_submission = CompareSubmission(userid = CurrentUserId,
submission_userid_1 = userId1,
submission_userid_2 = userId2,
submissionid_1 = submissionid1,
submissionid_2 = submissionid2,
choosing_correct_submission = choosenId)
db.session.add(new_submission)
db.session.commit()
return jsonify("Success"), 200
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 40.10352
| 257
| 0.663087
| 2,345
| 19,370
| 5.348827
| 0.202985
| 0.034202
| 0.01451
| 0.011481
| 0.188153
| 0.147333
| 0.124292
| 0.097106
| 0.072471
| 0.06697
| 0
| 0.013129
| 0.237119
| 19,370
| 483
| 258
| 40.10352
| 0.835691
| 0.356995
| 0
| 0.167331
| 0
| 0
| 0.138887
| 0.020331
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059761
| false
| 0.039841
| 0.099602
| 0
| 0.262948
| 0.015936
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26ed69ff9590d721e4368e521015afe41d5f9df5
| 2,536
|
py
|
Python
|
samples/people_on_stairs/classify_overspeeding/classify_overspeeding.py
|
vgvoleg/gst-video-analytics
|
7e4006551f38334bc59b2ef3d205273d07d40ce4
|
[
"MIT"
] | null | null | null |
samples/people_on_stairs/classify_overspeeding/classify_overspeeding.py
|
vgvoleg/gst-video-analytics
|
7e4006551f38334bc59b2ef3d205273d07d40ce4
|
[
"MIT"
] | null | null | null |
samples/people_on_stairs/classify_overspeeding/classify_overspeeding.py
|
vgvoleg/gst-video-analytics
|
7e4006551f38334bc59b2ef3d205273d07d40ce4
|
[
"MIT"
] | 1
|
2020-05-14T15:30:03.000Z
|
2020-05-14T15:30:03.000Z
|
from os.path import join, realpath
from os import listdir, environ
import shlex
import subprocess
import pickle
import json
import pickle as pkl
import time
import numpy as np
from copy import copy
MODEL_PATH = ("/root/Projects/models/intel/person-detection-retail-0013/FP32"
"/person-detection-retail-0013.xml")
DATASET_PATH = "/root/Projects/train/"
ALPHA = 0.1
ALPHA_HW = 0.01
RES_PATH = ("/root/Projects/gst-video-analytics-0.7.0/samples/"
"people_on_stairs/classify_overspeeding/res.json")
SVM_PATH = '/root/Projects/models/overspeed_classify/SVM_Classifier_without_interval.sav'
CLASSIFY_PIPELINE_TEMPLATE = """gst-launch-1.0 filesrc \
location={} \
! decodebin ! videoconvert ! video/x-raw,format=BGRx ! gvadetect \
model={} ! queue \
! gvaspeedometer alpha={} alpha-hw={} interval=0.03333333 \
! gvapython module={} class=OverspeedClassifier arg=[\\"{}\\"] \
! fakesink sync=false"""
class OverspeedClassifier():
def __init__(self, out_path=RES_PATH):
self.velocities = []
self._result_path = out_path
self.frames_processed = 0
def process_frame(self, frame):
for region in frame.regions():
for tensor in region.tensors():
if tensor.has_field("velocity"):
self.velocities.append(tensor['velocity'])
self.__updateJSON()
self.frames_processed += 1
def __updateJSON(self):
with open(self._result_path, "w") as write_file:
json.dump(self.velocities,
write_file, indent=4, sort_keys=True)
def __dump_data(self):
with open(self._result_path, "a") as write_file:
write_file.write("{} \n".format(self.velocities))
if __name__ == "__main__":
svclassifier = pickle.load(open(SVM_PATH, 'rb'))
for file_name in listdir(DATASET_PATH):
if file_name.endswith(".mp4"):
video_path = join(DATASET_PATH, file_name)
pipeline_str = CLASSIFY_PIPELINE_TEMPLATE.format(
video_path,
MODEL_PATH,
ALPHA,
ALPHA_HW,
realpath(__file__),
join(DATASET_PATH, file_name.replace('.mp4', '.json'))
)
print(pipeline_str)
proc = subprocess.run(
shlex.split(pipeline_str), env=environ.copy())
if proc.returncode != 0:
print("Error while running pipeline")
exit(-1)
| 32.101266
| 89
| 0.615536
| 295
| 2,536
| 5.047458
| 0.444068
| 0.021491
| 0.042982
| 0.02955
| 0.065816
| 0.034923
| 0
| 0
| 0
| 0
| 0
| 0.019459
| 0.270505
| 2,536
| 78
| 90
| 32.512821
| 0.785405
| 0
| 0
| 0
| 0
| 0
| 0.27011
| 0.132098
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063492
| false
| 0
| 0.15873
| 0
| 0.238095
| 0.031746
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26f1b913f1ee12f1e92139c51f5d8c9e44276d06
| 4,335
|
py
|
Python
|
pymockserver/client.py
|
MXWest/py-mockserver
|
cd0783aac2e5c1b8a021c29a4c70ef5414b7f7cc
|
[
"MIT"
] | 3
|
2018-06-14T19:44:05.000Z
|
2020-12-14T04:33:21.000Z
|
pymockserver/client.py
|
MXWest/py-mockserver
|
cd0783aac2e5c1b8a021c29a4c70ef5414b7f7cc
|
[
"MIT"
] | 4
|
2020-02-01T16:20:18.000Z
|
2021-03-23T14:43:54.000Z
|
pymockserver/client.py
|
MXWest/py-mockserver
|
cd0783aac2e5c1b8a021c29a4c70ef5414b7f7cc
|
[
"MIT"
] | 2
|
2020-02-01T16:25:50.000Z
|
2021-03-23T13:06:25.000Z
|
import requests
import json
from urllib3.exceptions import HTTPError
class Client(object):
"""Client to connect to the mockserver"""
def __init__(self, host='localhost', port=1080):
"""
Class initialization
:param str host: host of the mockserver
:param int port: port of the mockserver
"""
self.host = host
self.port = port
self.headers = {
'Content-Type': 'application/json'
}
def _get_url(self):
"""Get full URL of the mockserver
:return str url of the mockserver
"""
return 'http://{}:{}'.format(self.host, self.port)
def expectation(self, request, response, times=None):
"""create expectation on mockserver
:param request httpRequest object
:param response httpResponse object
"""
data = {
'httpRequest': request.dict(),
'httpResponse': response.dict(),
'times': {
'remainingTimes': 1,
'unlimited': True
}
}
if times:
data['times'] = vars(times)
req = requests.put('{}/expectation'.format(self._get_url()),
json.dumps(data))
return req
def forward(self, request, forward, times=None):
"""create forwarding on mockserver
:param times: times object (optional)
:param request httpRequest object
:param forward httpResponse object
"""
data = {
'httpRequest': request.dict(),
'httpForward': forward.dict(),
'times': {
'remainingTimes': 1,
'unlimited': True
}
}
if times:
data['times'] = vars(times)
req = requests.put('{}/expectation'.format(self._get_url()),
json.dumps(data))
return req
def active_expectations(self):
"""Get list of active expectations
:return Array active expectations
"""
req = requests.put(
'{}/retrieve'.format(self._get_url()), params={'type': 'active_expectations'})
if req.status_code == 200:
try:
return req.json()
except ValueError:
return []
return []
def retrieve_requests(self, request=None):
"""Get all recorded requests
:return Array recorded requests
"""
data = {}
if request:
data = request.dict()
req = requests.put('{}/retrieve'.format(self._get_url()),
params={'type': 'requests'}, data=json.dumps(data))
if req.status_code == 200:
try:
return req.json()
except ValueError:
return []
return []
def verify(self, request, times=None):
"""Verify if a request has been received in specific number of times
:param Request request: Request object to verify
:param Times times: Times object for count. Default=None, count=1
:return Boolean true if verified, false if not
"""
data = {
'httpRequest': request.dict()
}
if times:
data['times'] = vars(times)
else:
data['times'] = {
'count': 1,
'exact': True
}
req = requests.put('{}/verify'.format(self._get_url()),
headers=self.headers,
data=json.dumps(data))
resp = {
'status': 'OK',
'reason': req.content.decode('utf-8'),
'found': None
}
if req.status_code == 202:
resp['reason'] = None
resp['found'] = True
elif req.status_code == 406:
resp['found'] = False
else:
resp['status'] = 'ERROR'
return resp
def reset(self):
"""delete all active expectations and recorded requests"""
requests.put('{}/reset'.format(self._get_url()))
def clear(self, request):
"""Delete active expectation and recorded request
:param Request request: Request to clear
"""
requests.put('{}/clear'.format(self._get_url()), data=request.json())
| 29.290541
| 90
| 0.514418
| 422
| 4,335
| 5.220379
| 0.251185
| 0.028597
| 0.041307
| 0.05084
| 0.322742
| 0.270086
| 0.218793
| 0.218793
| 0.218793
| 0.218793
| 0
| 0.008029
| 0.367935
| 4,335
| 147
| 91
| 29.489796
| 0.795985
| 0.217762
| 0
| 0.402174
| 0
| 0
| 0.111853
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097826
| false
| 0
| 0.032609
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26f481dfc45ad24d352172f8f79006991163fc28
| 5,277
|
py
|
Python
|
workflow/executors/validation_tasks.py
|
mettadatalabs1/oncoscape-datapipeline
|
9c3209ba88831c3f1c598182c719ce45b4724fff
|
[
"Apache-2.0"
] | null | null | null |
workflow/executors/validation_tasks.py
|
mettadatalabs1/oncoscape-datapipeline
|
9c3209ba88831c3f1c598182c719ce45b4724fff
|
[
"Apache-2.0"
] | null | null | null |
workflow/executors/validation_tasks.py
|
mettadatalabs1/oncoscape-datapipeline
|
9c3209ba88831c3f1c598182c719ce45b4724fff
|
[
"Apache-2.0"
] | null | null | null |
from validators.validation_configurator import ValidationConfigurator
from pipeline.models import InputFile
class HugoValidator(object):
# hugo_genes_map (Dictionary): a dictionary that has the hugo genes and
# respective aliases. Each entry is db:{gene: Set(aliases),}.
# This is created the first time the class is loaded and is static.
# We use set because alias look up will be O(1) and the overall complexity
# for each row is O(n), yielding a total complexity of O(n^2)
# for an input file. The assumption is that different projects might have
# different gene maps and we want to create the map per project once.
hugo_genes_map = {}
@classmethod
def populate_hugo_genes_map(cls, mongo_connector,collection):
"""
Populates the hugo_genes_map for a given database.
Args:
mongo_connector (db.mongo_connector.MongoConnector): The mongo
connection holding the db name and the connection to the db
collection: the name of the collection to query
"""
db = mongo_connector.db.name
if db not in HugoValidator.hugo_genes_map:
gene_maps_from_db = mongo_connector.find(query=None,
collection=collection)
gene_maps_local = {}
for gene_map in gene_maps_from_db:
gene_maps_local[gene_map["hugo"]] =\
frozenset(gene_map["symbols"])
HugoValidator.hugo_genes_map[db] = gene_maps_local
print (len(HugoValidator.hugo_genes_map[db]))
@classmethod
def validate_hugo(cls, db, gene_symbol):
"""
Validates if a given gene symbol is a gene name, an alias, or is an
invalid entry.
Args:
db (string): The database in which we want to check
gene_symbol (string): The gene symbol to checking
Returns:
(string, string): A 2 tuple with gene_symbol that was sent and the
parent if it is an alias. If a match, the tuple is (None, gene_symbol).
If invalid, the tuple is (None, None)
"""
gene_valid_status = (None, None)
db_genes_map = HugoValidator.hugo_genes_map[db]
if gene_symbol in db_genes_map:
gene_valid_status = (None, gene_symbol)
else:
for gene in db_genes_map:
if gene_symbol in db_genes_map[gene]:
gene_valid_status = (gene_symbol, gene)
break
return gene_valid_status
def validate_file(input_file_obj):
if not input_file_obj.directory and not input_file_obj.s3_path:
return None
if not input_file_obj.file:
return None
input_file = (input_file_obj.directory
if input_file_obj.directory else input_file_obj.s3_path)
input_file += "/" + input_file_obj.file
# validation_configurator (ValidationConfigurator)
validation_configurator = ValidationConfigurator(input_file_obj.datatype)
with open(input_file, "r") as file_to_validate:
header = file_to_validate.readline().strip("\n")
# header row: gene sample1 sample2 sample 3
# valid_samples(list(dictionary): A list of dictionary to store all the
# valid rows for a given sample. The dictionary has sample as the key
# and a dictionary with 2 lists, one for valid values and other for
# the genes. The values and genes are 1-1 meaning value[0] corresponds
# to the value of the first gene for the sample. If we have an invalid
# value, then we will not store the gene for the sample.
# todo: add documentation link to the datastructure.
valid_samples = [{"sample": sample, "values":[],"genes":[],}
for sample in header.split("\t")[1:]]
print (valid_samples[-1])
for line in file_to_validate:
line_tokens = line.strip("\n").split("\t")
gene = line_tokens[0]
hugo_validation = HugoValidator.validate_hugo("tcga", gene)
gene_valid = False
if hugo_validation[1]:
# the gene is alias if first token is not None else valid
gene_valid = "alias" if hugo_validation[0] else "valid"
enumerated_tokens = enumerate(line_tokens[1:])
# parse rest of the line only for valid genes
for idx,line_token in enumerated_tokens:
# the element is valid
is_valid, value = validation_configurator.validate(
line_token)
if is_valid:
# the index refers to the sample location in valid_samples.
# append the gene and the value at the end
valid_samples[idx]["genes"].append(gene)
valid_samples[idx]["values"].append(value)
# THIS HAS TO CHANGE. IF THERE IS ONE INVALID ENTRY
# the whole sample should change.
# HANDLE NULL. Default is NA. Put this in job_config
# sklearn.decomposition.PCA lib for PCA
input_file_obj.valid_samples = valid_samples
| 49.783019
| 83
| 0.617965
| 688
| 5,277
| 4.5625
| 0.273256
| 0.04014
| 0.038229
| 0.031857
| 0.079325
| 0.01784
| 0.01784
| 0.01784
| 0
| 0
| 0
| 0.00499
| 0.316468
| 5,277
| 105
| 84
| 50.257143
| 0.865262
| 0.379761
| 0
| 0.070175
| 0
| 0
| 0.020421
| 0
| 0
| 0
| 0
| 0.009524
| 0
| 1
| 0.052632
| false
| 0
| 0.035088
| 0
| 0.175439
| 0.035088
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26f602e46a5eecf3c443505b6bc8ba0c321a760e
| 1,290
|
py
|
Python
|
pytglib/api/types/input_message_video_note.py
|
iTeam-co/pytglib
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 6
|
2019-10-30T08:57:27.000Z
|
2021-02-08T14:17:43.000Z
|
pytglib/api/types/input_message_video_note.py
|
iTeam-co/python-telegram
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 1
|
2021-08-19T05:44:10.000Z
|
2021-08-19T07:14:56.000Z
|
pytglib/api/types/input_message_video_note.py
|
iTeam-co/python-telegram
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 5
|
2019-12-04T05:30:39.000Z
|
2021-05-21T18:23:32.000Z
|
from ..utils import Object
class InputMessageVideoNote(Object):
"""
A video note message
Attributes:
ID (:obj:`str`): ``InputMessageVideoNote``
Args:
video_note (:class:`telegram.api.types.InputFile`):
Video note to be sent
thumbnail (:class:`telegram.api.types.inputThumbnail`):
Video thumbnail, if available
duration (:obj:`int`):
Duration of the video, in seconds
length (:obj:`int`):
Video width and height; must be positive and not greater than 640
Returns:
InputMessageContent
Raises:
:class:`telegram.Error`
"""
ID = "inputMessageVideoNote"
def __init__(self, video_note, thumbnail, duration, length, **kwargs):
self.video_note = video_note # InputFile
self.thumbnail = thumbnail # InputThumbnail
self.duration = duration # int
self.length = length # int
@staticmethod
def read(q: dict, *args) -> "InputMessageVideoNote":
video_note = Object.read(q.get('video_note'))
thumbnail = Object.read(q.get('thumbnail'))
duration = q.get('duration')
length = q.get('length')
return InputMessageVideoNote(video_note, thumbnail, duration, length)
| 28.666667
| 77
| 0.615504
| 134
| 1,290
| 5.843284
| 0.41791
| 0.103448
| 0.068966
| 0.05364
| 0.081737
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003215
| 0.276744
| 1,290
| 44
| 78
| 29.318182
| 0.836013
| 0.424806
| 0
| 0
| 0
| 0
| 0.113464
| 0.06354
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26f984eeef056e7ffe65f198d0e3689278e5fc57
| 2,098
|
py
|
Python
|
aiida_logger/calculations/test_calculations.py
|
SINTEF/aiida-logger
|
d97aced2ec8967cb359f488d2218cc3b47c92f6b
|
[
"MIT"
] | null | null | null |
aiida_logger/calculations/test_calculations.py
|
SINTEF/aiida-logger
|
d97aced2ec8967cb359f488d2218cc3b47c92f6b
|
[
"MIT"
] | null | null | null |
aiida_logger/calculations/test_calculations.py
|
SINTEF/aiida-logger
|
d97aced2ec8967cb359f488d2218cc3b47c92f6b
|
[
"MIT"
] | null | null | null |
"""
Tests for calculations.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import numpy as np
def test_process(logger_code):
"""
Test running a calculation.
Also checks its outputs.
"""
from aiida.plugins import DataFactory, CalculationFactory
from aiida.engine import run
from aiida.common.extendeddicts import AttributeDict
from aiida_logger.tests import TEST_DIR # pylint: disable=wrong-import-position
# Prepare input parameters
parameters = AttributeDict()
parameters.comment_string = '#'
parameters.labels = True
# Define input files to use
SinglefileData = DataFactory('singlefile')
datafile = SinglefileData(
file=os.path.join(TEST_DIR, 'input_files', 'datafile'))
# Set up calculation
inputs = {
'code': logger_code,
'parameters': DataFactory('dict')(dict=parameters),
'datafiles': {
'datafile': datafile
},
'metadata': {
'options': {
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
'parser_name': 'logger',
'withmpi': False,
'output_filename': 'logger.out'
},
'description': 'Test job submission with the aiida_logger plugin'
},
}
result = run(CalculationFactory('logger'), **inputs)
assert 'data' in result
assert 'metadata' in result
data = result['data']
metadata = result['metadata']
metadata = metadata.get_dict()
assert 'labels' in metadata
assert 'comments' in metadata
assert metadata['labels'] == ['time', 'param1', 'param2', 'param3']
assert metadata['comments'][0] == '# This is an example file'
test_array = np.array([[1.0e+00, 3.0e+00, 4.0e+00, 5.0e+00],
[2.0e+00, 4.0e+00, 5.7e+00, -1.0e-01],
[3.0e+00, 1.0e-03, 1.0e+03, 8.0e-01]])
np.testing.assert_allclose(data.get_array('content'), test_array)
| 29.138889
| 84
| 0.594376
| 232
| 2,098
| 5.24569
| 0.482759
| 0.023007
| 0.026294
| 0.011504
| 0.016434
| 0.016434
| 0
| 0
| 0
| 0
| 0
| 0.035976
| 0.284557
| 2,098
| 71
| 85
| 29.549296
| 0.774817
| 0.088656
| 0
| 0
| 0
| 0
| 0.183511
| 0.012766
| 0
| 0
| 0
| 0
| 0.145833
| 1
| 0.020833
| false
| 0
| 0.166667
| 0
| 0.1875
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26fdabbca3431190e788d02f52c14a320298b8ac
| 9,425
|
py
|
Python
|
discopy/components/sense/explicit/bert_conn_sense.py
|
rknaebel/discopy
|
5507d656987af2df9e595434a82c0a12bbc713e4
|
[
"MIT"
] | 14
|
2019-04-14T16:10:23.000Z
|
2022-03-09T14:56:10.000Z
|
discopy/components/sense/explicit/bert_conn_sense.py
|
rknaebel/discopy
|
5507d656987af2df9e595434a82c0a12bbc713e4
|
[
"MIT"
] | 15
|
2019-04-15T16:44:40.000Z
|
2021-11-23T17:36:41.000Z
|
discopy/components/sense/explicit/bert_conn_sense.py
|
rknaebel/discopy
|
5507d656987af2df9e595434a82c0a12bbc713e4
|
[
"MIT"
] | 1
|
2020-02-28T23:36:35.000Z
|
2020-02-28T23:36:35.000Z
|
import json
import logging
import os
from typing import List, Dict
import click
import numpy as np
import tensorflow as tf
from sklearn.metrics import cohen_kappa_score, precision_recall_fscore_support, accuracy_score
from tqdm import tqdm
from discopy.components.component import Component
from discopy.components.connective.base import get_connective_candidates
from discopy.evaluate.conll import evaluate_docs, print_results
from discopy.utils import init_logger
from discopy_data.data.doc import Document
from discopy_data.data.loaders.conll import load_bert_conll_dataset
from discopy_data.data.relation import Relation
logger = logging.getLogger('discopy')
def get_conn_model(in_size, out_size, hidden_size, hidden_size2=256):
x = y = tf.keras.layers.Input(shape=(in_size,), name='connective')
y = tf.keras.layers.Dense(hidden_size, kernel_initializer='lecun_normal', activation='selu')(y)
y = tf.keras.layers.Dropout(0.3)(y)
y = tf.keras.layers.Dense(hidden_size2, kernel_initializer='lecun_normal', activation='selu')(y)
y = tf.keras.layers.Dropout(0.3)(y)
y = tf.keras.layers.Dense(out_size, activation='softmax')(y)
model = tf.keras.models.Model(x, y)
optimizer = tf.keras.optimizers.RMSprop()
model.compile(optimizer, 'sparse_categorical_crossentropy', metrics=[
"accuracy",
])
return model
def get_bert_features(idxs, doc_bert, used_context=0):
idxs = list(idxs)
pad = np.zeros_like(doc_bert[0])
embd = doc_bert[idxs].mean(axis=0)
if used_context > 0:
left = [doc_bert[i] if i >= 0 else pad for i in range(min(idxs) - used_context, min(idxs))]
right = [doc_bert[i] if i < len(doc_bert) else pad for i in range(max(idxs) + 1, max(idxs) + 1 + used_context)]
embd = np.concatenate(left + [embd] + right).flatten()
return embd
def generate_pdtb_features(docs: List[Document], sense_map: Dict[str, int], used_context=0):
features = []
for doc in tqdm(docs):
doc_bert = doc.get_embeddings()
global_id_map = {(s_i, t.local_idx): t.idx for s_i, s in enumerate(doc.sentences) for t in s.tokens}
conns = {tuple(t.idx for t in r.conn.tokens): r.senses[0] for r in doc.get_explicit_relations()}
for sent_i, sentence in enumerate(doc.sentences):
for connective_candidate in get_connective_candidates(sentence):
conn_idxs = tuple(global_id_map[(sent_i, i)] for i, c in connective_candidate)
if conn_idxs in conns:
sense = sense_map.get(conns[conn_idxs])
if not sense:
continue
features.append((get_bert_features(conn_idxs, doc_bert, used_context), sense))
else:
features.append((get_bert_features(conn_idxs, doc_bert, used_context), 0))
x, y = list(zip(*features))
return np.stack(x), np.array(y)
def get_sense_mapping(docs):
sense_map = {
'NoSense': 0,
}
senses = sorted({s for doc in docs for rel in doc.relations for s in rel.senses})
i = 1
for s in senses:
if s in sense_map:
sense_map[s] = sense_map[s]
else:
sense_map[s] = i
i += 1
classes = []
for sense, sense_id in sorted(sense_map.items(), key=lambda x: x[1]):
if len(classes) > sense_id:
continue
classes.append(sense)
return sense_map, classes
class ConnectiveSenseClassifier(Component):
model_name = 'explicit_sense_bert_classifier'
used_features = ['vectors']
def __init__(self, input_dim, used_context: int = 0, hidden_dim: int = 2048):
self.input_dim = input_dim
self.used_context = used_context
self.in_size = input_dim + 2 * used_context * input_dim
self.hidden_dim = hidden_dim
self.sense_map = {}
self.classes = []
self.model = None
self.batch_size = 512
def get_config(self):
return {
'model_name': self.model_name,
'input_dim': self.input_dim,
'hidden_dim': self.hidden_dim,
'used_context': self.used_context,
'sense_map': self.sense_map,
'classes': self.classes,
}
@staticmethod
def from_config(config: dict):
clf = ConnectiveSenseClassifier(config['input_dim'], config['used_context'], config['hidden_dim'])
clf.sense_map = config['sense_map']
clf.classes = config['classes']
return clf
def load(self, path):
self.sense_map = json.load(open(os.path.join(path, self.model_name, 'senses.json'), 'r'))
self.classes = []
for sense, sense_id in sorted(self.sense_map.items(), key=lambda x: x[1]):
if len(self.classes) > sense_id:
continue
self.classes.append(sense)
if not os.path.exists(os.path.join(path, self.model_name)):
raise FileNotFoundError("Model not found.")
self.model = tf.keras.models.load_model(os.path.join(path, self.model_name),
compile=False)
def save(self, path):
if not os.path.exists(path):
os.makedirs(path)
self.model.save(os.path.join(path, self.model_name))
json.dump(self.sense_map, open(os.path.join(path, self.model_name, 'senses.json'), 'w'))
def fit(self, docs_train: List[Document], docs_val: List[Document] = None):
if docs_val is None:
raise ValueError("Validation data is missing.")
self.sense_map, self.classes = get_sense_mapping(docs_train)
self.model = get_conn_model(self.in_size, len(self.sense_map), self.hidden_dim, 128)
self.model.summary()
print(self.sense_map, self.classes)
x_train, y_train = generate_pdtb_features(docs_train, self.sense_map, used_context=self.used_context)
x_val, y_val = generate_pdtb_features(docs_val, self.sense_map, used_context=self.used_context)
self.model.fit(x_train, y_train, validation_data=(x_val, y_val), verbose=1, shuffle=True, epochs=20,
batch_size=self.batch_size,
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.001, patience=7, verbose=0,
restore_best_weights=True),
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.75, patience=3, verbose=0)
])
def score_on_features(self, x, y):
y_pred = self.model.predict(x, batch_size=self.batch_size).argmax(-1)
logger.info("Evaluation: Connective")
logger.info(" Acc : {:<06.4}".format(accuracy_score(y, y_pred)))
prec, recall, f1, support = precision_recall_fscore_support(y, y_pred, average='macro')
logger.info(" Macro: P {:<06.4} R {:<06.4} F1 {:<06.4}".format(prec, recall, f1))
logger.info(" Kappa: {:<06.4}".format(cohen_kappa_score(y, y_pred)))
def score(self, docs: List[Document]):
if not self.model:
raise ValueError("Score of untrained model.")
x, y = generate_pdtb_features(docs, self.sense_map, used_context=self.used_context)
self.score_on_features(x, y)
def parse(self, doc: Document, relations=None, **kwargs):
if not self.model:
raise ValueError("Score of untrained model.")
relations: List[Relation] = []
doc_bert = doc.get_embeddings()
global_id_map = {(s_i, t.local_idx): t.idx for s_i, s in enumerate(doc.sentences) for t in s.tokens}
for sent_i, sent in enumerate(doc.sentences):
for connective_candidate in get_connective_candidates(sent):
conn_idxs = tuple(global_id_map[(sent_i, i)] for i, c in connective_candidate)
features = get_bert_features(conn_idxs, doc_bert, self.used_context)
pred = self.model.predict(np.expand_dims(features, axis=0)).argmax(-1).flatten()[0]
if pred > 0:
conn_tokens = [sent.tokens[i] for i, c in connective_candidate]
relations.append(Relation(
conn=conn_tokens,
type='Explicit',
senses=[self.classes[pred]]
))
return relations
@click.command()
@click.argument('conll-path')
def main(conll_path):
logger = init_logger()
docs_val = load_bert_conll_dataset(os.path.join(conll_path, 'en.dev'),
cache_dir=os.path.join(conll_path, 'en.dev.bert-base-cased.joblib'))
docs_train = load_bert_conll_dataset(os.path.join(conll_path, 'en.train'),
cache_dir=os.path.join(conll_path, 'en.train.bert-base-cased.joblib'))
clf = ConnectiveSenseClassifier(input_dim=docs_val[0].get_embedding_dim(), used_context=2)
logger.info('Train model')
clf.fit(docs_train, docs_val)
logger.info('Evaluation on TRAIN')
clf.score(docs_train)
logger.info('Evaluation on TEST')
clf.score(docs_val)
# logger.info('Parse one document')
# print(docs_val[0].to_json())
print(clf.parse(docs_val[0], []))
preds = [d.with_relations(clf.parse(d)) for d in docs_val]
print_results(evaluate_docs(docs_val, preds))
if __name__ == "__main__":
main()
| 44.042056
| 119
| 0.634589
| 1,295
| 9,425
| 4.408494
| 0.185328
| 0.03223
| 0.023121
| 0.014714
| 0.301279
| 0.267122
| 0.251182
| 0.213698
| 0.196882
| 0.182169
| 0
| 0.01087
| 0.248382
| 9,425
| 213
| 120
| 44.248826
| 0.795031
| 0.006578
| 0
| 0.113514
| 0
| 0
| 0.067628
| 0.012927
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075676
| false
| 0
| 0.086486
| 0.005405
| 0.216216
| 0.021622
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8065cbbdc71ae71f6d602d2671a71b28b0eea4a
| 2,057
|
py
|
Python
|
tools/draw_comparison_head_design_choices.py
|
twangnh/Calibration_mrcnn
|
e5f3076cefbe35297a403a753bb57e11503db818
|
[
"Apache-2.0"
] | 87
|
2020-07-24T01:28:39.000Z
|
2021-08-29T08:40:18.000Z
|
tools/draw_comparison_head_design_choices.py
|
twangnh/Calibration_mrcnn
|
e5f3076cefbe35297a403a753bb57e11503db818
|
[
"Apache-2.0"
] | 3
|
2020-09-27T12:59:28.000Z
|
2022-01-06T13:14:08.000Z
|
tools/draw_comparison_head_design_choices.py
|
twangnh/Calibration_mrcnn
|
e5f3076cefbe35297a403a753bb57e11503db818
|
[
"Apache-2.0"
] | 20
|
2020-09-05T04:37:19.000Z
|
2021-12-13T02:25:48.000Z
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
labels = ['AP on bin (0,10)', 'AP on bin (10,100)']
baseline = [0.0, 13.3]
fc2_ncm = [6.0, 18.9]
fc2 = [8.6, 22.0]
fc3_rand = [9.1, 18.8]
fc3_ft = [13.2, 23.1]
x = np.arange(len(labels)) # the label locations
width = 0.15 # the width of the bars
matplotlib.rcParams.update({'font.size': 16})
# plt.rc('ytick', labelsize=10)
fig, ax = plt.subplots()
# rects1 = ax.bar(x - width, baseline, width, label='baseline')
# rects2 = ax.bar(x - width/2, fc2_ncm, width, label='2fc_ncm')
# rects3 = ax.bar(x , baseline, fc2, label='baseline')
# rects4 = ax.bar(x + width/2, fc3_rand, width, label='2fc_ncm')
# rects5 = ax.bar(x + width, fc3_ft, width, label='baseline')
# Set position of bar on X axis
r1 = np.arange(len(labels))
r2 = [x + width for x in r1]
r3 = [x + width for x in r2]
r4 = [x + width for x in r3]
r5 = [x + width for x in r4]
# Make the plot
rects1 = ax.bar(r1, baseline, color='#7f6d5f', width=width, edgecolor='white', label='baseline')
rects2 = ax.bar(r2, fc2_ncm, color='#557f2d', width=width, edgecolor='white', label='2fc_ncm')
rects3 = ax.bar(r3, fc2, width=width, edgecolor='white', label='2fc_rand')
rects4 = ax.bar(r4, fc3_rand, width=width, edgecolor='white', label='3fc_rand')
rects5 = ax.bar(r5, fc3_ft, width=width, edgecolor='white', label='3fc_ft')
ax.set_ylim([0,25])
ax.set_xticks([0.3, 1.3])
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
autolabel(rects4)
autolabel(rects5)
fig.tight_layout()
plt.savefig('head_design_choices.eps', format='eps', dpi=1000)
plt.show()
| 31.166667
| 96
| 0.6456
| 334
| 2,057
| 3.904192
| 0.347305
| 0.038344
| 0.023006
| 0.092025
| 0.232362
| 0.125767
| 0
| 0
| 0
| 0
| 0
| 0.066548
| 0.181818
| 2,057
| 65
| 97
| 31.646154
| 0.708259
| 0.24842
| 0
| 0
| 0
| 0
| 0.112418
| 0.015033
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.069767
| 0
| 0.093023
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f807e6a714508c55a5204cce88f3927910a26a1e
| 9,916
|
py
|
Python
|
src/entry.py
|
akilmarshall/vash-2
|
5307bc414afba24b235ae0ae9b2583c33ea69b1f
|
[
"MIT"
] | null | null | null |
src/entry.py
|
akilmarshall/vash-2
|
5307bc414afba24b235ae0ae9b2583c33ea69b1f
|
[
"MIT"
] | null | null | null |
src/entry.py
|
akilmarshall/vash-2
|
5307bc414afba24b235ae0ae9b2583c33ea69b1f
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from itertools import count
from tkinter import *
import tkinter.ttk as ttk
from functools import partial
from tkcalendar import DateEntry
from case import COD, CONTRIES, Case, INCIDENT, ORGANIZATION, POLICESTATION, STATES
from db import referred_other_agency
from preview import CasePreview
class CaseEntry(ttk.Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
row = count(0, 1)
# first name
r = next(row)
ttk.Label(self, text='First Name').grid(row=r, column=0)
self.first_name = StringVar()
self.fname_entry = ttk.Entry(self, textvariable=self.first_name)
self.fname_entry.grid(row=r, column=1)
# last name
r = next(row)
ttk.Label(self, text='Last name').grid(row=r, column=0)
self.last_name = StringVar()
self.lname_entry = ttk.Entry(self, textvariable=self.last_name)
self.lname_entry.grid(row=r, column=1)
# incident date
r = next(row)
ttk.Label(self, text='Incident Date (m/d/y)').grid(row=r, column=0)
self.date = StringVar()
DateEntry(self, textvariable=self.date).grid(row=r, column=1)
# incident type
self.other_incident_death_label = None
self.other_incident_label = None
def variable_incident_entry(value):
if self.incident.get() == 'death':
self.destroy_other_incident()
self.other_incident_death_label = ttk.Label(self, text='Cause of Death')
self.other_incident_death_label.grid(row=incident_row, column=2)
self.cod = StringVar()
self.cod_combobox = ttk.Combobox(self, textvariable=self.cod)
self.cod_combobox['values'] = [''] + COD
self.cod_combobox.set('')
self.cod_combobox.grid(row=incident_row, column=3)
elif self.incident.get() == 'other':
self.destroy_other_death()
self.other_incident = StringVar()
self.other_incident_label = ttk.Label(self, text='Other Incident')
self.other_incident_label.grid(row=incident_row, column=2)
self.other_incident_entry = ttk.Entry(self, textvariable=self.other_incident)
self.other_incident_entry.grid(row=incident_row, column=3)
else:
self.destroy_other_incident()
self.destroy_other_death()
incident_row = r = next(row)
ttk.Label(self, text='Incident Type').grid(row=r, column=0)
self.incident = StringVar()
incident = ttk.Combobox(self, textvariable=self.incident)
incident.bind('<<ComboboxSelected>>', variable_incident_entry)
incident['values'] = [''] + INCIDENT
incident.set('')
incident.grid(row=r, column=1)
# water related?
r = next(row)
ttk.Label(self, text='Water Related?').grid(row=r, column=0)
self.water_related = StringVar()
ttk.Radiobutton(self, text='True', value=True,
variable=self.water_related).grid(row=r, column=1)
ttk.Radiobutton(self, text='False', value=False,
variable=self.water_related).grid(row=r, column=2)
# party size
r = next(row)
ttk.Label(self, text='Party Size').grid(row=r, column=0)
self.party_size = StringVar()
party_size = ttk.Combobox(self, textvariable=self.party_size)
party_size['values'] = list(range(1, 10))
party_size.set(1)
party_size.grid(row=r, column=1)
# incident location
r = next(row)
ttk.Label(self, text='Incident Location').grid(row=r, column=0)
self.location = StringVar()
ttk.Entry(self, textvariable=self.location).grid(row=r, column=1)
# referred by
self.other_referred_label = None
self.other_referred = StringVar()
def referred_entry(_):
if self.referred.get() == 'other':
self.other_referred_label = ttk.Label(self, text='Other Agency')
self.other_referred_label.grid(row=referred_row, column=2)
self.other_referred_entry = ttk.Entry(self, textvariable=self.other_referred)
self.other_referred_entry.grid(row=referred_row, column=3)
else:
self.destroy_other_referred()
referred_row = r = next(row)
ttk.Label(self, text='Referred by').grid(row=r, column=0)
self.referred = StringVar()
referred = ttk.Combobox(self, textvariable=self.referred)
referred.bind('<<ComboboxSelected>>', referred_entry)
referred['values'] = [''] + ORGANIZATION
referred.set('')
referred.grid(row=r, column=1)
# police station
r = next(row)
ttk.Label(self, text='Police Station').grid(row=r, column=0)
self.police = StringVar()
police = ttk.Combobox(self, textvariable=self.police)
police['values'] = [''] + POLICESTATION
police.grid(row=r, column=1)
# visitor type
r = next(row)
ttk.Label(self, text='Visitor Type').grid(row=r, column=0)
self.visitor_type = StringVar()
visitor_type = ttk.Combobox(self, textvariable=self.visitor_type)
visitor_type['values'] = ['land', 'cruise']
visitor_type.grid(row=r, column=1)
# country of origin
self.state_label = None
def state_entry(_):
if self.country.get() == 'United States':
# state of origin
self.state = StringVar()
self.state_label = ttk.Label(self, text='State')
self.state_label.grid(row=country_row, column=2)
self.state_combobox = ttk.Combobox(self, textvariable=self.state)
self.state_combobox['values'] = [''] + STATES
self.state_combobox.set('')
self.state_combobox.grid(row=country_row, column=3)
else:
self.destroy_other_state()
country_row = r = next(row)
ttk.Label(self, text='Country').grid(row=r, column=0)
self.country = StringVar()
country = ttk.Combobox(self, textvariable=self.country)
country.bind('<<ComboboxSelected>>', state_entry)
country['values'] = [''] + CONTRIES
country.set('')
country.grid(row=r, column=1)
# case notes
r = next(row)
self.notes = Text(self, height=10)
ttk.Label(self, text='Notes').grid(row=r, column=0)
self.notes.grid(row=r, column=1)
# Buttons
r = next(row)
ttk.Button(self, text='Submit', command=self.submit).grid(
row=r, column=1)
r = next(row)
ttk.Button(self, text='Clear', command=self.clear).grid(
row=r, column=1)
r = next(row)
def destroy_other_incident(self):
if self.other_incident_label is not None:
self.other_incident_label.destroy()
self.other_incident_entry.destroy()
def destroy_other_death(self):
if self.other_incident_death_label is not None:
self.other_incident_death_label.destroy()
self.cod_combobox.destroy()
def destroy_other_referred(self):
if self.other_referred_label is not None:
self.other_referred_label.destroy()
self.other_referred_entry.destroy()
def destroy_other_state(self):
if self.state_label is not None:
self.state_label.destroy()
self.state_combobox.destroy()
def submit(self):
fname = self.first_name.get()
lname = self.last_name.get()
date = datetime.strptime(self.date.get(), '%m/%d/%y')
incident = self.incident.get()
cod = ''
incident_other = ''
if incident == 'death':
cod = self.cod.get()
elif incident == 'other':
incident_other = self.other_incident_entry.get()
party_size = int(self.party_size.get())
location = self.location.get()
water_related = True if self.water_related.get() == '1' else False
referred = self.referred.get()
referred_other = self.other_referred.get() if referred == 'other' else ''
police = self.police.get()
visitor_type = self.visitor_type.get()
country = self.country.get()
state = self.state.get() if country == 'United States' else ''
notes = self.notes.get('1.0', 'end')
case = Case(
fname,
lname,
date,
incident,
incident_other,
cod,
party_size,
location,
water_related,
referred,
referred_other,
police,
visitor_type,
country,
state,
notes
)
CasePreview(self, case)
# self.clear() # somehow need pass an asyn message and check
# if the write was successfull
def clear(self):
self.first_name.set('')
self.first_name.set('')
date = datetime.today()
y, m, d = date.year, date.month, date.day
self.date.set(f'{m}/{d}/{y - 2000}')
self.last_name.set('')
self.incident.set('')
# self.cod.set('')
self.party_size.set(1)
self.location.set('')
self.water_related.set('')
self.referred.set('')
self.police.set('')
self.visitor_type.set('')
self.country.set('')
# self.state.set('')
self.notes.delete('1.0', END)
self.destroy_other_state()
self.destroy_other_referred()
self.destroy_other_death()
self.destroy_other_incident()
if __name__ == '__main__':
root = Tk()
entry = CaseEntry(root)
entry.pack()
root.mainloop()
| 38.583658
| 93
| 0.588846
| 1,188
| 9,916
| 4.763468
| 0.108586
| 0.043294
| 0.038169
| 0.066796
| 0.406256
| 0.251635
| 0.15144
| 0.070507
| 0
| 0
| 0
| 0.007508
| 0.28812
| 9,916
| 256
| 94
| 38.734375
| 0.794163
| 0.031263
| 0
| 0.133641
| 0
| 0
| 0.04444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046083
| false
| 0
| 0.041475
| 0
| 0.092166
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8082f1e3f5f385cac811686714cd680277f4584
| 7,406
|
py
|
Python
|
repro_eval/__main__.py
|
irgroup/repro_eval
|
35a4cf083dbb5f4b29d6ef602a604f0686a537c9
|
[
"MIT"
] | 8
|
2020-10-27T02:11:53.000Z
|
2022-03-02T11:00:10.000Z
|
repro_eval/__main__.py
|
irgroup/repro_eval
|
35a4cf083dbb5f4b29d6ef602a604f0686a537c9
|
[
"MIT"
] | 2
|
2021-01-25T19:59:39.000Z
|
2021-12-07T09:29:01.000Z
|
repro_eval/__main__.py
|
irgroup/repro_eval
|
35a4cf083dbb5f4b29d6ef602a604f0686a537c9
|
[
"MIT"
] | 1
|
2021-04-16T16:21:16.000Z
|
2021-04-16T16:21:16.000Z
|
"""
Use repro_eval from the command line with e.g.
python -m repro_eval -t rpd -q qrel_orig -r orig_b rpd_b
python -m repro_eval -t rpd -q qrel_orig -r orig_b orig_a rpd_b rpd_a
python -m repro_eval -t rpd -m rmse -q qrel_orig -r orig_b rpd_b
python -m repro_eval -t rpl -q qrel_orig qrel_rpl -r orig_b rpl_b
python -m repro_eval -t rpl -q qrel_orig qrel_rpl -r orig_b orig_a rpl_b rpl_a
after having installed the Python package.
For other more specific examples also have a look at the README file.
Depending on the provided parameters and input run files,
evaluation measures will be printed.
"""
import argparse
from repro_eval.Evaluator import RpdEvaluator, RplEvaluator
from repro_eval.util import print_simple_line, print_base_adv
from repro_eval.util import arp
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--type')
parser.add_argument('-m', '--measure', nargs='+')
parser.add_argument('-q', '--qrels', nargs='+')
parser.add_argument('-r', '--runs', nargs='+')
args = parser.parse_args()
if args.type in ['rpd', 'reproducibility']:
if len(args.runs) == 4:
rpd_eval = RpdEvaluator(qrel_orig_path=args.qrels[0],
run_b_orig_path=args.runs[0],
run_a_orig_path=args.runs[1],
run_b_rep_path=args.runs[2],
run_a_rep_path=args.runs[3])
if len(args.runs) == 2:
rpd_eval = RpdEvaluator(qrel_orig_path=args.qrels[0],
run_b_orig_path=args.runs[0],
run_a_orig_path=None,
run_b_rep_path=args.runs[1],
run_a_rep_path=None)
rpd_eval.trim()
rpd_eval.evaluate()
measure_list = args.measure if args.measure is not None else []
# KTU
if 'ktu' in measure_list or args.measure is None:
ktu = rpd_eval.ktau_union()
print("Kendall's tau Union (KTU)")
print('------------------------------------------------------------------')
for topic, value in ktu.get('baseline').items():
value_adv = ktu.get('advanced').get(topic) if ktu.get('advanced') is not None else None
print_base_adv(topic, 'KTU', value, value_adv)
value_adv = arp(ktu.get('advanced')) if ktu.get('advanced') is not None else None
print_base_adv('ARP', 'KTU', arp(ktu.get('baseline')), value_adv)
print()
# RBO
if 'rbo' in measure_list or args.measure is None:
rbo = rpd_eval.rbo()
print("Rank-biased Overlap (RBO)")
print('------------------------------------------------------------------')
for topic, value in rbo.get('baseline').items():
value_adv = rbo.get('advanced').get(topic) if rbo.get('advanced') is not None else None
print_base_adv(topic, 'RBO', value, value_adv)
value_adv = arp(rbo.get('advanced')) if rbo.get('advanced') is not None else None
print_base_adv('ARP', 'RBO', arp(rbo.get('baseline')), value_adv)
print()
# RMSE
if 'rmse' in measure_list or args.measure is None:
rmse = rpd_eval.rmse()
print("Root mean square error (RMSE)")
print('------------------------------------------------------------------')
for measure, value in rmse.get('baseline').items():
value_adv = rmse.get('advanced').get(measure) if rmse.get('advanced') is not None else None
print_base_adv(measure, 'RMSE', value, value_adv)
print()
# ER
if 'er' in measure_list or args.measure is None and len(args.runs) == 4:
print("Effect ratio (ER)")
print('------------------------------------------------------------------')
er = rpd_eval.er()
for measure, value in er.items():
print_simple_line(measure, 'ER', value)
print()
# DRI
if 'dri' in measure_list or args.measure is None and len(args.runs) == 4:
print("Delta Relative Improvement (DRI)")
print('------------------------------------------------------------------')
dri = rpd_eval.dri()
for measure, value in dri.items():
print_simple_line(measure, 'DRI', value)
print()
# ttest
if 'ttest' in measure_list or args.measure is None:
pvals = rpd_eval.ttest()
print("Two-tailed paired t-test (p-value)")
print('------------------------------------------------------------------')
for measure, value in pvals.get('baseline').items():
value_adv = pvals.get('advanced').get(measure) if pvals.get('advanced') is not None else None
print_base_adv(measure, 'PVAL', value, value_adv)
print()
if args.type in ['rpl', 'replicability']:
if len(args.runs) == 4:
rpl_eval = RplEvaluator(qrel_orig_path=args.qrels[0],
run_b_orig_path=args.runs[0],
run_a_orig_path=args.runs[1],
run_b_rep_path=args.runs[2],
run_a_rep_path=args.runs[3],
qrel_rpl_path=args.qrels[1])
if len(args.runs) == 2:
rpl_eval = RplEvaluator(qrel_orig_path=args.qrels[0],
run_b_orig_path=args.runs[0],
run_a_orig_path=None,
run_b_rep_path=args.runs[1],
run_a_rep_path=None,
qrel_rpl_path=args.qrels[1])
rpl_eval.trim()
rpl_eval.evaluate()
measure_list = args.measure if args.measure is not None else []
# ER
if 'er' in measure_list or args.measure is None and len(args.runs) == 4:
print("Effect ratio (ER)")
print('------------------------------------------------------------------')
er = rpl_eval.er()
for measure, value in er.items():
print_simple_line(measure, 'ER', value)
print()
# DRI
if 'dri' in measure_list or args.measure is None and len(args.runs) == 4:
print("Delta Relative Improvement (DRI)")
print('------------------------------------------------------------------')
dri = rpl_eval.dri()
for measure, value in dri.items():
print_simple_line(measure, 'DRI', value)
print()
# ttest
if 'ttest' in measure_list or args.measure is None:
pvals = rpl_eval.ttest()
print("Two-tailed unpaired t-test (p-value)")
print('------------------------------------------------------------------')
for measure, value in pvals.get('baseline').items():
value_adv = pvals.get('advanced').get(measure) if pvals.get('advanced') is not None else None
print_base_adv(measure, 'PVAL', value, value_adv)
print()
if __name__ == "__main__":
main()
| 43.309942
| 109
| 0.498785
| 892
| 7,406
| 3.94843
| 0.146861
| 0.045429
| 0.040886
| 0.03322
| 0.754685
| 0.640829
| 0.609597
| 0.609597
| 0.58234
| 0.58234
| 0
| 0.005152
| 0.318526
| 7,406
| 170
| 110
| 43.564706
| 0.692689
| 0.086011
| 0
| 0.554622
| 0
| 0
| 0.171776
| 0.087961
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008403
| false
| 0
| 0.033613
| 0
| 0.042017
| 0.327731
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f809139d6c632c257d27b2da4aee81ff3ca5dcc2
| 2,377
|
py
|
Python
|
main.py
|
juligreen/towerdefense-prototype
|
1cdac58acf697ca856a60dec6533caed17acf656
|
[
"MIT"
] | null | null | null |
main.py
|
juligreen/towerdefense-prototype
|
1cdac58acf697ca856a60dec6533caed17acf656
|
[
"MIT"
] | null | null | null |
main.py
|
juligreen/towerdefense-prototype
|
1cdac58acf697ca856a60dec6533caed17acf656
|
[
"MIT"
] | null | null | null |
import math
from game_objects import Turret, Troop
players = []
class Location:
def __init__(self, x: int, y: int):
self.x = x
self.y = y
class Lane:
# for this prototype we are going to imagine our lanes as straight lines
def __init__(self, left_start_location: Location, right_start_location: Location):
self.left_start_location = left_start_location
self.right_start_location = right_start_location
def calculate_distance(entity1: Location, entity2: Location) -> float:
# distance between vectors: https://brilliant.org/wiki/distance-formula/
distance = math.sqrt((entity1.x - entity2.x) ** 2 + (entity1.y + entity2.y) ** 2)
return distance
class Player:
def __init__(self, position: str, location: Location):
self.position = position
self.location = location
self.turrets = []
self.troops = []
self.enemy_player: Player = Player()
self.health = 100
def add_turret(self, grid_location: Location, strenght_level: int):
turret = Turret(grid_location, strenght_level)
self.turrets.append(turret)
def add_troops(self, lane: Lane, count: int, strength_level: int):
troops = []
for _ in range(count):
troop = Troop(lane, strength_level, self.enemy_player.position, self.enemy_player)
troops.append(troop)
self.troops.append(troops)
def turret_fire_check(self):
for turret in self.turrets:
for troop in self.enemy_player.troops:
distance = calculate_distance(turret.location, troop.location)
if distance < turret.range:
turret.attack(troop)
break
def init():
players[0] = Player('left')
players[1] = Player('right')
players[0].enemy_player = players[1]
players[1].enemy_player = players[0]
init()
while True:
# most of this is pseudocode, as I have no way of handling user input currently
for index, player in enumerate(players):
if 'player places turret':
player.add_turret(Location(1, 1))
if 'player places troops':
player.add_troops('bla')
for troop in player.troops:
troop.move()
player.turret_fire_check()
if player.health <= 0:
print(f'Player {index} won the game!')
| 30.088608
| 94
| 0.636096
| 296
| 2,377
| 4.945946
| 0.320946
| 0.053279
| 0.040984
| 0.028689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011461
| 0.265881
| 2,377
| 78
| 95
| 30.474359
| 0.827507
| 0.092133
| 0
| 0
| 0
| 0
| 0.03714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145455
| false
| 0
| 0.036364
| 0
| 0.254545
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8094b25e0893a5bce69fe2d108d090003595a0e
| 7,110
|
py
|
Python
|
bib_processing.py
|
GAIGResearch/GAIGResearch.github.io
|
90d0555348ad8f3f500b6480168ad65fa0226dce
|
[
"MIT"
] | null | null | null |
bib_processing.py
|
GAIGResearch/GAIGResearch.github.io
|
90d0555348ad8f3f500b6480168ad65fa0226dce
|
[
"MIT"
] | null | null | null |
bib_processing.py
|
GAIGResearch/GAIGResearch.github.io
|
90d0555348ad8f3f500b6480168ad65fa0226dce
|
[
"MIT"
] | 2
|
2019-07-09T11:08:15.000Z
|
2020-12-04T14:55:00.000Z
|
import os
from pathlib import Path
from difflib import SequenceMatcher
supported_bibtex_types = {"article", "book", "booklet", "inbook", "incollection", "inproceedings", "manual",
"mastersthesis", "misc", "phdthesis", "proceedings", "techreport", "unpublished"}
supported_fields = ["author", "title", "year", "month", "pages", "note",
"journal", "booktitle",
"volume", "number", "series", "edition",
"editor", "publisher", "address",
"howpublished", "type",
"chapter",
"organization", "school", "institution"]
extra_fields = ["doi", "issn", "isbn", "keywords", "abstract", "url", "archivePrefix", "eprint", "timestamp", "biburl",
"bibsource"]
data_path = Path("_data/papers.yml")
bib_path = Path("bibfiles")
year_from = 2017
similarity_threshold = 0.8
def find_all_files(path_to_search):
"""Recursively find all bib files in root path given"""
list_of_files = os.listdir(path_to_search)
all_files = []
# Iterate over all the entries
for e in list_of_files:
# Create full path
full_path = path_to_search / e
# If entry is a directory then get the list of files in this directory
if os.path.isdir(full_path):
all_files = all_files + find_all_files(full_path)
elif full_path.with_suffix(".bib"):
all_files.append(full_path)
return all_files
def process_entry(entry_to_process):
"""
Turns a string of an entry into a dictionary mapping from fields to field values
:param entry_to_process
:return: dictionary.
"""
dict_entry = {}
entry_lines = entry_to_process.split("\n")
first_line = entry_lines[0].split("=")
entry_type = first_line[0].replace("@", "")
entry_id = first_line[1]
# Type validation
if entry_type.lower() not in supported_bibtex_types:
print("Type " + entry_type + " not supported for bibtex entry " + entry_id)
return dict_entry
dict_entry["id"] = entry_id
dict_entry["type"] = entry_type
# Process the rest of the fields
field_value = "" # Keep this up here to be able to access previous values in case of multi-line field
field = ""
for l in entry_lines:
split_line = l.split("=")
if len(split_line) == 1 and field != "": # No = found on this line, it's a multi-line field
field_value += " " + split_line[0].strip()
dict_entry[field] = field_value.strip()
else:
field = split_line[0].strip()
field_value = split_line[1].strip()
if field.lower() in supported_fields or field.lower() in extra_fields:
if field.lower() == "pages" and "--" not in field_value:
field_value = field_value.replace("-", "--")
dict_entry[field] = field_value
# Try to find pdf of this paper
pdf = find_pdf(entry_id, dict_entry["year"])
dict_entry["pdf"] = str(pdf).lower()
return dict_entry
def find_pdf(entry_id, year):
"""
Returns true if a pdf for this paper exists in the pdf/pub/year directory (must have name as paper ID)
"""
return os.path.isfile("pdf/pub/" + year + "/" + entry_id + ".pdf")
def output_entries(entries):
"""
Prints the given bibtex entries into yaml supported format
"""
with open(data_path.absolute(), 'w+', encoding='utf-8') as wf:
for entry in entries:
if int(entry["year"]) < year_from:
continue
wf.write("- id: " + entry["id"] + "\n")
for e in entry:
if e != "id":
if ":" in entry[e]:
entry[e] = '"' + entry[e] + '"'
wf.write(" " + e + ": " + entry[e] + "\n")
def check_equality(entry1, entry2):
"""
Checks if 2 entries are the same
"""
sim_fields = 0
common_fields = 0
for field1 in entry1:
for field2 in entry2:
if field1 == field2:
common_fields += 1
if similar(entry1[field1], entry2[field2]) >= similarity_threshold:
sim_fields += 1
if common_fields == 0:
return False
if sim_fields / common_fields >= similarity_threshold:
return True
return False
def similar(a, b):
"""
Checks if 2 strings are similar, returns a similarity measure.
"""
return SequenceMatcher(None, a, b).ratio()
def process_yml_entries(lines):
"""
Processes entries in yml format
:param lines: list of lines from yml file to process
:return: list of entries as dictionaries
"""
entry_list = []
entry = {}
ln = 0
for line in lines:
if "- id:" in line or ln == len(lines) - 1: # Starting a new entry
if len(entry) > 0:
entry_list.append(entry)
entry = {}
line = line.replace("\"", "")
if "- id:" in line:
line = line[1:] # Ignore first dash
stripped_line = line.strip()
if stripped_line != "": # Adding to current entry
split_line = stripped_line.split(':')
entry[split_line[0].strip()] = ':'.join(split_line[1:]).strip()
ln += 1
return entry_list
def main():
"""
Main function to process bibtex entries in a given path and output a file in yaml supported format.
"""
# Read in current entries
lines = data_path.read_text(encoding='utf-8').split('\n')
entries = process_yml_entries(lines)
# Find new entries
files = find_all_files(bib_path)
for bibfile in files:
entry = ""
full_pth = Path(bibfile)
lines = full_pth.read_text(encoding='utf-8').split('\n')
line_number = 0
for line in lines:
if "@" in line or line_number == len(lines)-1: # Starting a new entry
if entry != "":
entry = entry.translate({ord(c): None for c in '\\"{}~\'"'})
processed_entry = process_entry(entry)
entries.append(processed_entry)
entry = ""
if "@" in line:
line = line.replace("{", "=")
stripped_line = line.strip()
if stripped_line != "": # Adding to current entry
if stripped_line.endswith(","):
stripped_line = stripped_line[:-1]
entry += stripped_line + "\n"
line_number += 1
# Check for duplication
duplicate_entries = []
for i in range(len(entries)-1):
for j in range(i+1, len(entries)):
if check_equality(entries[i], entries[j]):
print("Duplicate found: " + entries[i]["id"] + " = " + entries[j]["id"])
duplicate_entries.append(j)
duplicate_entries.sort()
for i in range(len(duplicate_entries)):
e = duplicate_entries[i] - i
del entries[e]
# Finally, save entries
output_entries(entries)
if __name__ == "__main__":
main()
| 33.696682
| 119
| 0.568636
| 877
| 7,110
| 4.443558
| 0.255416
| 0.020785
| 0.009238
| 0.011547
| 0.084167
| 0.064665
| 0.05594
| 0.042597
| 0.028227
| 0.028227
| 0
| 0.009744
| 0.307173
| 7,110
| 210
| 120
| 33.857143
| 0.781364
| 0.165541
| 0
| 0.1
| 0
| 0
| 0.095337
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.021429
| 0
| 0.142857
| 0.021429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f80a066211d5845a2d19529db9ed13271bcad6dc
| 2,105
|
py
|
Python
|
browser.py
|
7Cortez7/instagram-giveaway-bot
|
43246e3ded06ea3a6cbf2ef20164b229fe90ee0e
|
[
"MIT"
] | null | null | null |
browser.py
|
7Cortez7/instagram-giveaway-bot
|
43246e3ded06ea3a6cbf2ef20164b229fe90ee0e
|
[
"MIT"
] | null | null | null |
browser.py
|
7Cortez7/instagram-giveaway-bot
|
43246e3ded06ea3a6cbf2ef20164b229fe90ee0e
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
import time
import userdata as udata
import random
randomUsers = set()
class Browser:
def __init__(self, link):
self.link = link
self.browser = webdriver.Chrome()
Browser.Instagram(self)
Browser.Login(self)
Browser.goFollowers(self)
def Instagram(self):
self.browser.get(self.link)
time.sleep(2)
def goFollowers(self):
self.browser.find_element_by_xpath("//*[@id=\"react-root\"]/section/main/div/header/section/ul/li[2]/a").click()
time.sleep(5)
Browser.scrollDown(self)
followers = self.browser.find_elements_by_css_selector("._7UhW9.xLCgt.qyrsm.KV-D4.se6yk.T0kll")
for follower in followers:
randomUsers.add(follower.text)
print("Çekiliş başlıyor! {totaluser} kişi katılmaya hak kazandı.".format(totaluser = len(randomUsers)))
time.sleep(5)
randomUsersList = list(randomUsers)
print("Kazanan:", random.choice(randomUsersList))
time.sleep(5)
exit()
def scrollDown(self):
jsCode = """
page = document.querySelector(".isgrP");
page.scrollTo(0, page.scrollHeight);
var pageEnd = page.scrollHeight;
return pageEnd;
"""
pageEnd = self.browser.execute_script(jsCode)
while True:
end = pageEnd
time.sleep(1)
pageEnd = self.browser.execute_script(jsCode)
if end == pageEnd:
break
def Login(self):
username = self.browser.find_element_by_name("username")
password = self.browser.find_element_by_name("password")
loginBtn = self.browser.find_element_by_css_selector("#loginForm > div > div:nth-child(3) > button > div")
username.send_keys(udata.username)
password.send_keys(udata.password)
time.sleep(1)
loginBtn.click()
time.sleep(2)
self.browser.get(self.link + udata.username)
time.sleep(2)
| 31.893939
| 121
| 0.59715
| 230
| 2,105
| 5.352174
| 0.443478
| 0.10723
| 0.060926
| 0.071487
| 0.180341
| 0.105605
| 0
| 0
| 0
| 0
| 0
| 0.010731
| 0.291686
| 2,105
| 65
| 122
| 32.384615
| 0.814889
| 0
| 0
| 0.188679
| 0
| 0
| 0.190686
| 0.055882
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09434
| false
| 0.037736
| 0.075472
| 0
| 0.207547
| 0.037736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f80b2ee49671a1d6b544de429dd777345fa6df27
| 246
|
py
|
Python
|
HackerRank/PythonHackerRankSolutions/Numpy/LinearAlgebra.py
|
accidentalgenius09/competitive-programming-solution
|
210746a7928dcd601ad9a735de52cf7135851070
|
[
"MIT"
] | 8
|
2020-08-03T01:53:13.000Z
|
2022-01-09T14:47:58.000Z
|
HackerRank/PythonHackerRankSolutions/Numpy/LinearAlgebra.py
|
accidentalgenius09/competitive-programming-solution
|
210746a7928dcd601ad9a735de52cf7135851070
|
[
"MIT"
] | null | null | null |
HackerRank/PythonHackerRankSolutions/Numpy/LinearAlgebra.py
|
accidentalgenius09/competitive-programming-solution
|
210746a7928dcd601ad9a735de52cf7135851070
|
[
"MIT"
] | 4
|
2020-09-29T11:28:53.000Z
|
2021-06-02T15:34:55.000Z
|
'''
Title : Linear Algebra
Subdomain : Numpy
Domain : Python
Author : codeperfectplus
Created : 10 May 2020
'''
import numpy
n=int(input())
a=numpy.array([input().split() for _ in range(n)],float)
print(round(numpy.linalg.det(a),2))
| 18.923077
| 56
| 0.670732
| 35
| 246
| 4.685714
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034314
| 0.170732
| 246
| 12
| 57
| 20.5
| 0.769608
| 0.46748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f810064772dd89a3265f0776de267483682a707d
| 23,282
|
py
|
Python
|
trtools/dumpSTR/tests/test_dumpSTR.py
|
Kulivox/TRTools
|
ea05f9126f5145405cced8fd85821ce929657b3a
|
[
"MIT"
] | 14
|
2020-04-20T15:38:52.000Z
|
2022-02-07T11:45:23.000Z
|
trtools/dumpSTR/tests/test_dumpSTR.py
|
Kulivox/TRTools
|
ea05f9126f5145405cced8fd85821ce929657b3a
|
[
"MIT"
] | 74
|
2020-03-02T23:34:53.000Z
|
2022-03-21T18:32:10.000Z
|
trtools/dumpSTR/tests/test_dumpSTR.py
|
Kulivox/TRTools
|
ea05f9126f5145405cced8fd85821ce929657b3a
|
[
"MIT"
] | 15
|
2018-10-29T19:41:33.000Z
|
2020-02-21T18:41:51.000Z
|
import argparse
import gzip
import os
import pytest
from ..dumpSTR import *
from trtools.testsupport.utils import assert_same_vcf, assert_same_file
# Set up base argparser
@pytest.fixture
def args(tmpdir):
args = argparse.ArgumentParser()
args.vcf = None
args.vcftype = "auto"
args.out = str(tmpdir / "test")
args.zip = False
args.min_locus_callrate = None
args.min_locus_hwep = None
args.min_locus_het = None
args.max_locus_het = None
args.use_length = False
args.filter_regions = None
args.filter_regions_names = None
args.filter_hrun = False
args.drop_filtered = False
args.hipstr_min_call_DP = None
args.hipstr_max_call_DP = None
args.hipstr_min_call_Q = None
args.hipstr_max_call_flank_indel = None
args.hipstr_max_call_stutter = None
args.hipstr_min_supp_reads = None
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = None
args.gangstr_expansion_prob_total = None
args.gangstr_filter_span_only = False
args.gangstr_filter_spanbound_only = False
args.gangstr_filter_badCI = None
#args.gangstr_require_support = None
args.gangstr_readlen = None
args.gangstr_min_call_DP = None
args.gangstr_max_call_DP = None
args.gangstr_min_call_Q = None
args.advntr_min_call_DP = None
args.advntr_max_call_DP = None
args.advntr_min_spanning = None
args.advntr_min_flanking = None
args.advntr_min_ML = None
args.eh_min_ADFL = None
args.eh_min_ADIR = None
args.eh_min_ADSP = None
args.eh_min_call_LC = None
args.eh_max_call_LC = None
args.popstr_min_call_DP = None
args.popstr_max_call_DP = None
args.popstr_require_support = None
args.num_records = None
args.die_on_warning = False
args.verbose = False
return args
@pytest.fixture
def testDumpSTRdir(vcfdir):
return vcfdir + "/dumpSTR_vcfs"
# Test no such file or directory
def test_WrongFile(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_non_existent.vcf")
if os.path.exists(fname):
os.remove(fname)
args.vcf = fname
retcode = main(args)
assert retcode==1
# Test a file that already has Filter IDs defined
# that we want to use that are of either the wrong number of type.
# Since cyvcf2 currently won't allow us to overwrite them,
# error out
def test_BadPreexistingFields(args, testDumpSTRdir, capsys):
fname = os.path.join(testDumpSTRdir, "bad_preexisting_hrun.vcf")
args.vcf = fname
retcode = main(args)
assert retcode == 1
captured = capsys.readouterr()
assert "HRUN" in captured.err
fname = os.path.join(testDumpSTRdir, "bad_preexisting_het_hwep.vcf")
args.vcf = fname
retcode = main(args)
assert retcode == 1
captured = capsys.readouterr()
assert "HWEP" in captured.err and "HET" in captured.err
fname = os.path.join(testDumpSTRdir, "bad_preexisting_filter_ac_refac.vcf")
args.vcf = fname
retcode = main(args)
assert retcode == 1
captured = capsys.readouterr()
assert ("FILTER" in captured.err and "AC" in captured.err
and "REFAC" in captured.err)
# Test a file that already has a HWE Filter ID defined
# if the field is of the correct type and number, as in this case
# we overwrite it and emit a warning instead of failing
# this allows dumpSTR to be run multiple times in succession
# on the same file
def test_WorrisomePreexistingFilter(args, testDumpSTRdir, capsys):
fname = os.path.join(testDumpSTRdir, "worrisome_preexisting_filter.vcf")
args.vcf = fname
args.min_locus_hwep = 0.5
retcode = main(args)
assert retcode == 0
captured = capsys.readouterr()
assert 'HWE0.5' in captured.err
# Test if basic inputs and threshold filters work for each file
def test_GangSTRFile(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "trio_chr21_gangstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.gangstr_min_call_DP = 10
args.gangstr_max_call_DP = 20
args.gangstr_min_call_Q = 0.99
args.gangstr_filter_span_only = True
args.gangstr_filter_spanbound_only = True
args.gangstr_filter_badCI = True
#args.gangstr_require_support = 2
args.gangstr_readlen = 100
retcode = main(args)
assert retcode==0
# Test expansion options
args.gangstr_expansion_prob_het = 0.8
retcode = main(args)
assert retcode==0
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = 0.8
retcode = main(args)
assert retcode==0
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = None
args.gangstr_expansion_prob_total = 0.8
retcode = main(args)
assert retcode==0
def test_HipSTRFile(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "trio_chr21_hipstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.hipstr_min_call_DP = 10
args.hipstr_max_call_DP = 100
args.hipstr_min_call_Q = 0.9
args.hipstr_min_supp_reads = 2
args.hipstr_max_call_flank_indel = 0.05
args.hipstr_max_call_stutter = 0.01
args.vcftype = 'hipstr'
retcode = main(args)
assert retcode==0
def test_AdVNTRFile(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_advntr.vcf.gz")
args.vcf = fname
args.num_records = 10
args.advntr_min_call_DP = 10
args.advntr_max_call_DP = 20
args.advntr_min_spanning = 2
args.advntr_min_flanking = 2
args.advntr_min_ML = 0
retcode = main(args)
assert retcode==0
def test_EHFile(args, testDumpSTRdir):
# TODO add EH options
fname = os.path.join(testDumpSTRdir, "NA12878_chr21_eh.sorted.vcf.gz")
args.vcf = fname
args.use_length = True
args.num_records = 10
retcode = main(args)
assert retcode==0
def test_PopSTRFile(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "NA12878_chr21_popstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.use_length = True
args.popstr_min_call_DP = 5
args.popstr_max_call_DP = 100
args.popstr_require_support = 2
retcode = main(args)
assert retcode==0
# confirm that producing zipped output doesn't crash
def test_zippedOutput(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "trio_chr21_gangstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.gangstr_min_call_DP = 10
args.gangstr_max_call_DP = 20
args.gangstr_min_call_Q = 0.99
args.gangstr_filter_span_only = True
args.gangstr_filter_spanbound_only = True
args.gangstr_filter_badCI = True
#args.gangstr_require_support = 2
args.gangstr_readlen = 100
args.zip = True
retcode = main(args)
assert retcode==0
# Test invalid options
def test_InvalidOptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "NA12878_chr21_popstr.sorted.vcf.gz")
args.vcf = fname
# HWE
args.min_locus_hwep = -1
retcode = main(args)
assert retcode==1
args.min_locus_hwep = 2
retcode = main(args)
assert retcode==1
# Het
args.min_locus_hwep = None
args.min_locus_het = -1
retcode = main(args)
assert retcode==1
args.min_locus_het = 2
retcode = main(args)
assert retcode==1
args.min_locus_het = None
args.max_locus_het = -1
retcode = main(args)
assert retcode==1
args.max_locus_het = 2
retcode = main(args)
assert retcode==1
args.min_locus_het = 0.5
args.max_locus_het = 0.2
retcode = main(args)
assert retcode==1
# Test locus-level filters
def test_LocusLevel(args, testDumpSTRdir):
tool_files = [
"trio_chr21_hipstr.sorted.vcf.gz",
"trio_chr21_gangstr.sorted.vcf.gz",
"NA12878_chr21_eh.sorted.vcf.gz",
"NA12878_chr21_popstr.sorted.vcf.gz",
"NA12878_chr21_popstr.sorted.vcf.gz",
"NA12878_chr21_advntr.sorted.vcf.gz"
]
for fname in tool_files:
args.vcf = os.path.join(testDumpSTRdir, fname)
args.num_records = 10
args.min_locus_callrate = 0.8
args.min_locus_hwep = 10e-4
args.min_locus_het = 0.1
args.max_locus_het = 0.3
args.use_length = True
args.drop_filtered = False
args.filter_hrun = True
if 'hipstr' in fname:
args.vcftype = 'hipstr'
else:
args.vcftype = 'auto'
assert main(args)==0
args.drop_filtered = True
assert main(args)==0
def test_RegionFilters(args, regiondir, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_gangstr.vcf.gz")
args.vcf = fname
args.num_records = 10
# Correct filters
args.filter_regions = os.path.join(regiondir, "test_regions1.bed.gz")
retcode = main(args)
assert retcode==0
args.filter_regions_names = "test"
retcode = main(args)
assert retcode==0
# Correct filters, multiple regions
args.filter_regions = os.path.join(regiondir, "test_regions1.bed.gz") + "," + os.path.join(regiondir, "test_regions2.bed.gz")
args.filter_regions_names = "test1,test2"
retcode = main(args)
assert retcode==0
# Mismatch between region names and regions
args.filter_regions_names = "test1"
retcode = main(args)
assert retcode==1
# Nonexistent regions file
args.filter_regions = os.path.join(regiondir, "test_nonexistent.bed")
retcode = main(args)
assert retcode==1
# File missing tabix
args.filter_regions = os.path.join(regiondir, "test_regions3.bed.gz")
assert main(args)==1
# File with no chr
args.filter_regions = os.path.join(regiondir, "test_regions4.bed.gz")
assert main(args)==0
args.vcf = os.path.join(testDumpSTRdir, "test_gangstr_nochr.vcf.gz")
assert main(args)==0
def test_InvalidHipstrOptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "trio_chr21_hipstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.hipstr_max_call_flank_indel = -1
args.vcftype = 'hipstr'
retcode = main(args)
assert retcode==1
args.hipstr_max_call_flank_indel = None
args.hipstr_max_call_flank_indel = 2
retcode = main(args)
assert retcode==1
args.hipstr_max_call_flank_indel = None
args.hipstr_max_call_stutter = -1
retcode = main(args)
assert retcode==1
args.hipstr_max_call_stutter = 2
retcode = main(args)
assert retcode==1
args.hipstr_max_call_stutter = None
args.hipstr_min_supp_reads = -1
retcode = main(args)
assert retcode==1
args.hipstr_min_supp_reads = None
args.hipstr_min_call_DP = -1
assert main(args)==1
args.hipstr_min_call_DP = None
args.hipstr_max_call_DP = -1
assert main(args)==1
args.hipstr_min_call_DP = 5
args.hipstr_max_call_DP = 2
assert main(args)==1
args.hipstr_min_call_DP = None
args.hipstr_max_call_DP = None
args.hipstr_min_call_Q = -1
assert main(args)==1
args.hipstr_min_call_Q = 2
assert main(args)==1
def test_InvalidGangSTROptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_gangstr.vcf.gz")
args.vcf = fname
args.num_records = 10
args.gangstr_min_call_DP = -1
assert main(args)==1
args.gangstr_min_call_DP = None
args.gangstr_max_call_DP = -1
assert main(args)==1
args.gangstr_min_call_DP = 5
args.gangstr_max_call_DP = 2
assert main(args)==1
args.gangstr_min_call_DP = None
args.gangstr_max_call_DP = None
args.gangstr_min_call_Q = -1
assert main(args)==1
args.gangstr_min_call_Q = 2
assert main(args)==1
args.gangstr_min_call_Q = None
args.gangstr_expansion_prob_het = -1
assert main(args)==1
args.gangstr_expansion_prob_het = 2
assert main(args)==1
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = -1
assert main(args)==1
args.gangstr_expansion_prob_hom = 2
assert main(args)==1
args.gangstr_expansion_prob_hom = None
args.gangstr_expansion_prob_total = -1
assert main(args)==1
args.gangstr_expansion_prob_total = 2
assert main(args)==1
args.gangstr_expansion_prob_total = None
'''
args.gangstr_require_support = -1
assert main(args)==1
args.gangstr_require_support = 2
assert main(args)==1
args.gangstr_readlen = 1
assert main(args)==1
'''
def test_InvalidAdVNTROptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_advntr.vcf.gz")
args.vcf = fname
args.num_records = 10
args.advntr_min_call_DP = -1
assert main(args)==1
args.advntr_min_call_DP = None
args.advntr_max_call_DP = -1
assert main(args)==1
args.advntr_min_call_DP = 5
args.advntr_max_call_DP = 2
assert main(args)==1
args.advntr_min_call_DP = None
args.advntr_max_call_DP = None
args.advntr_min_ML = -1
assert main(args)==1
args.advntr_min_ML = None
args.advntr_min_flanking = -1
assert main(args)==1
args.advntr_min_spanning = -1
assert main(args)==1
"""
def test_InvalidEHOptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "test_ExpansionHunter.vcf")
args.vcf = fname
args.num_records = 10
# TODO add once EH is implemented
"""
def test_InvalidPopSTROptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "NA12878_chr21_popstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.popstr_min_call_DP = -1
assert main(args)==1
args.popstr_min_call_DP = None
args.popstr_max_call_DP = -1
assert main(args)==1
args.popstr_min_call_DP = 5
args.popstr_max_call_DP = 2
assert main(args)==1
args.popstr_min_call_DP = None
args.popstr_max_call_DP = None
args.popstr_require_support = -1
assert main(args)==1
def test_InvalidGenotyperOptions(args, testDumpSTRdir):
fname = os.path.join(testDumpSTRdir, "NA12878_chr21_popstr.sorted.vcf.gz")
args.vcf = fname
args.num_records = 10
args.hipstr_min_call_DP = 10
assert main(args)==1
args.hipstr_min_call_DP = None
args.gangstr_min_call_DP = 10
assert main(args)==1
args.gangstr_min_call_DP = None
fname = os.path.join(testDumpSTRdir, "trio_chr21_hipstr.sorted..vcf.gz")
args.vcf = fname
args.popstr_min_call_DP = 10
assert main(args)==1
args.popstr_min_call_DP = None
args.advntr_min_call_DP = 10
assert main(args)==1
args.advntr_min_call_DP = None
args.eh_min_call_LC = 5
assert main(args)==1
args.eh_min_call_LC = None
def test_InvalidOutput(capsys, args, testDumpSTRdir, tmpdir):
fname = os.path.join(testDumpSTRdir, "NA12878_chr21_popstr.sorted.vcf.gz")
args.vcf = fname
# Fail when trying to output inside a nonexistant directory
args.out = str(tmpdir / "notadirectory" / "somefilename")
assert main(args) == 1
# To simulate a permissions issue: fail when trying to write a file in a location
# that is already a directory
capsys.readouterr()
(tmpdir / "foo.vcf").mkdir()
args.out = str(tmpdir / "foo")
assert main(args) == 1
# Make sure we produce a meaningful error message for this issue
assert 'is a directory' in str(capsys.readouterr())
def test_TwoDumpSTRRounds(args, testDumpSTRdir, tmpdir):
args.num_records = 10
fname = os.path.join(testDumpSTRdir, "test_gangstr.vcf.gz")
args.vcf = fname
args.min_locus_callrate = 0
args.zip = True
main(args) # produces DUMPDIR/test.vcf
args.vcf = str(tmpdir / "test.vcf.gz")
args.out = str(tmpdir / "test2")
assert main(args)==0
def test_BrokenVCF(args, testDumpSTRdir):
args.num_records = 10
fname = os.path.join(testDumpSTRdir, "test_broken.vcf.gz")
args.vcf = fname
args.die_on_warning = True
args.verbose = True
assert main(args)==1
"""
These tests run dumpSTR and compare its output
to output that has been generated by a pervious version of
dumpSTR and saved in the repo. The results are expected
to be identical.
These tests are too strict and will often break because
dumpSTR output has been intentionally changed
However, the presence of these tests is important because
it should prevent any unexpected changes in output.
If you've reviewed the change in output and find it acceptable,
use trtools/testsupport/sample_vcfs/dumpSTR_vcfs/create_test_files.sh
to regenerate the tests files with the new output.
"""
def test_output_locus_filters(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/trio_chr21_hipstr.sorted.vcf.gz'
args.min_locus_callrate = 0.5
args.min_locus_hwep = 0.5
args.min_locus_het = 0.05
args.max_locus_het = 0.45
args.filter_regions_names = 'foo_region'
args.filter_regions = testDumpSTRdir + '/sample_region.bed.gz'
args.vcftype = 'hipstr'
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
# there are also rounding errors with HipSTR field GLDIFF
# that aren't worth worrying about
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/locus_filters.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'},
format_ignore= {'GLDIFF'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/locus_filters' + ext,
ext)
# make sure locus level filters produce the same output when
# --drop-filtered is set
def test_output_drop_filtered(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/trio_chr21_hipstr.sorted.vcf.gz'
args.min_locus_callrate = 0.5
args.min_locus_hwep = 0.5
args.min_locus_het = 0.05
args.max_locus_het = 0.45
args.filter_regions_names = 'foo_region'
args.filter_regions = testDumpSTRdir + '/sample_region.bed.gz'
args.vcftype = 'hipstr'
args.drop_filtered = True
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
# there are also rounding errors with HipSTR field GLDIFF
# that aren't worth worrying about
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/drop_filtered.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'},
format_ignore= {'GLDIFF'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/locus_filters' + ext,
ext)
# test advntr call level filters
def test_output_advntr_filters(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/NA12878_chr21_advntr.sorted.vcf.gz'
args.advntr_min_call_DP = 50
args.advntr_max_call_DP = 2000
args.advntr_min_spanning = 1
args.advntr_min_flanking = 20
args.advntr_min_ML = 0.95
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/advntr_filters.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/advntr_filters' + ext,
ext)
# test hipstr call and locus level filters
def test_output_hipstr_filters(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/trio_chr21_hipstr.sorted.vcf.gz'
args.filter_hrun = True
args.use_length = True
args.max_locus_het = 0.45
args.min_locus_het = 0.05
args.min_locus_hwep = 0.5
args.hipstr_max_call_flank_indel = 0.05
args.hipstr_max_call_stutter = 0.3
args.hipstr_min_supp_reads = 10
args.hipstr_min_call_DP = 30
args.hipstr_max_call_DP = 200
args.hipstr_min_call_Q = 0.9
args.vcftype = 'hipstr'
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
# there are also rounding errors with HipSTR field GLDIFF
# that aren't worth worrying about
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/hipstr_filters.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'},
format_ignore= {'GLDIFF'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/hipstr_filters' + ext,
ext)
# test gangstr call level filters that don't begin
# with 'expansion' - those are tested on another file
def test_output_gangstr_most_filters(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/trio_chr21_gangstr.sorted.vcf.gz'
args.gangstr_min_call_DP = 10
args.gangstr_max_call_DP = 100
args.gangstr_min_call_Q = 0.9
args.gangstr_filter_span_only = True
args.gangstr_filter_spanbound_only = True
args.gangstr_filter_badCI = True
# args.gangstr_require_support = 10
# args.gangstr_readlen = 150
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/gangstr_filters_most.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/gangstr_filters_most' + ext,
ext)
# test gangstr call level filters that begin with
# 'expansion' - the other gangstr call level filters
# are tested on another file
def test_output_gangstr_expansion_filters(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/test_gangstr.vcf.gz'
args.gangstr_expansion_prob_het = 0.001
args.gangstr_expansion_prob_hom = 0.0005
args.gangstr_expansion_prob_total = 0.001
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/gangstr_filters_expansion.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/gangstr_filters_expansion' + ext,
ext)
# test popstr call level filters
def test_output_popstr_filters(args, testDumpSTRdir):
args.vcf = testDumpSTRdir + '/NA12878_chr21_popstr.sorted.vcf.gz'
args.popstr_min_call_DP = 30
args.popstr_max_call_DP = 200
args.popstr_require_support = 15
args.use_length = True
assert main(args) == 0
# expect changes in precision for HET and HWEP
# that will make them too much of a pain to compare
assert_same_vcf(args.out + '.vcf',
testDumpSTRdir + '/popstr_filters.vcf',
info_ignore = {'AC', 'REFAC', 'HET', 'HWEP'})
for ext in '.samplog.tab', '.loclog.tab':
assert_same_file(args.out + ext,
testDumpSTRdir + '/popstr_filters' + ext,
ext)
| 34.038012
| 129
| 0.683489
| 3,314
| 23,282
| 4.574532
| 0.102293
| 0.043272
| 0.046174
| 0.037599
| 0.755871
| 0.698153
| 0.63562
| 0.600989
| 0.53529
| 0.49314
| 0
| 0.024362
| 0.22425
| 23,282
| 683
| 130
| 34.087848
| 0.815016
| 0.111717
| 0
| 0.616698
| 0
| 0
| 0.101182
| 0.054055
| 0
| 0
| 0
| 0.002928
| 0.185958
| 1
| 0.055028
| false
| 0
| 0.011385
| 0.001898
| 0.070209
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f81075d9a768c275f1cbe075abbbe7e3dce2e3c6
| 2,554
|
py
|
Python
|
src/weekly_contest_251/1946_largest-number-after-mutating-substring.py
|
dongminlee94/leetcode-practice
|
4d33816d66df8ab447087a04b76008f6bec51f23
|
[
"MIT"
] | null | null | null |
src/weekly_contest_251/1946_largest-number-after-mutating-substring.py
|
dongminlee94/leetcode-practice
|
4d33816d66df8ab447087a04b76008f6bec51f23
|
[
"MIT"
] | null | null | null |
src/weekly_contest_251/1946_largest-number-after-mutating-substring.py
|
dongminlee94/leetcode-practice
|
4d33816d66df8ab447087a04b76008f6bec51f23
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
1946. Largest Number After Mutating Substring
https://leetcode.com/problems/largest-number-after-mutating-substring/
Example 1:
Input: num = "132", change = [9,8,5,0,3,6,4,2,6,8]
Output: "832"
Explanation: Replace the substring "1":
- 1 maps to change[1] = 8.
Thus, "132" becomes "832".
"832" is the largest number that can be created, so return it.
Example 2:
Input: num = "021", change = [9,4,3,5,7,2,1,9,0,6]
Output: "934"
Explanation: Replace the substring "021":
- 0 maps to change[0] = 9.
- 2 maps to change[2] = 3.
- 1 maps to change[1] = 4.
Thus, "021" becomes "934".
"934" is the largest number that can be created, so return it.
Example 3:
Input: num = "5", change = [1,4,7,5,3,2,5,6,9,4]
Output: "5"
Explanation: "5" is already the largest number that can be created, so return it.
"""
from typing import List
class Solution:
def maximumNumber1(self, num: str, change: List[int]) -> str:
"""
TC: O(N^2) / SC: O(N)
Time Limit Exceeded
"""
max_num = num
for i in range(len(num)):
changed_num = num[:i] + str(change[int(num[i])]) + num[i + 1 :]
if changed_num >= max_num:
max_num = changed_num
for j in range(1, len(num[i + 1 :]) + 1):
changed_num = (
changed_num[: i + j] + str(change[int(num[i + j])]) + changed_num[i + j + 1 :]
)
if changed_num >= max_num:
max_num = changed_num
else:
break
return max_num
def maximumNumber2(self, num: str, change: List[int]) -> str:
"""
TC: O(N) / SC: O(N)
"""
num_list = list(num)
changed = False
for i in range(len(num_list)):
if change[int(num_list[i])] > int(num_list[i]):
num_list[i] = str(change[int(num_list[i])])
changed = True
elif changed == True and change[int(num_list[i])] < int(num_list[i]):
break
return "".join(num_list)
def maximumNumber3(self, num: str, change: List[int]) -> str:
"""
TC: O(N^2) / SC: O(N)
"""
changed = False
for i in range(len(list(num))):
if str(change[int(num[i])]) > num[i]:
num = num[:i] + str(change[int(num[i])]) + num[i + 1 :] # TC: O(N)
changed = True
elif changed == True and str(change[int(num[i])]) < num[i]:
break
return num
| 30.047059
| 98
| 0.523101
| 378
| 2,554
| 3.473545
| 0.219577
| 0.04265
| 0.073115
| 0.068545
| 0.555979
| 0.453161
| 0.393755
| 0.323686
| 0.323686
| 0.281036
| 0
| 0.056647
| 0.322631
| 2,554
| 84
| 99
| 30.404762
| 0.702312
| 0.35787
| 0
| 0.305556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.027778
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f812c1ff23e3b82b8ed9c4bca10c6b857649c53a
| 2,358
|
py
|
Python
|
src/qbrobot/util/log.py
|
jucuguru/crypto-robot-basic
|
3addaaff9fb2f41d8e9dcd66bae7ae7f75216704
|
[
"BSD-2-Clause"
] | null | null | null |
src/qbrobot/util/log.py
|
jucuguru/crypto-robot-basic
|
3addaaff9fb2f41d8e9dcd66bae7ae7f75216704
|
[
"BSD-2-Clause"
] | null | null | null |
src/qbrobot/util/log.py
|
jucuguru/crypto-robot-basic
|
3addaaff9fb2f41d8e9dcd66bae7ae7f75216704
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
from qbrobot import qsettings
try :
from util import send_dingding
except ImportError:
DINGDING_CANUSE = False
else:
DINGDING_CANUSE = True
"""
class DingDingLogger
pass all args to logger.method, and call dingding.send_msg()
1. debug message don't send to dingding.
2. only send_msg( message ), can't pass multi args.
"""
class DingDingLogger:
def __init__(self, logger = None ):
self.logger = logger
def debug(self, msg, *args, **kwargs):
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.logger.info(msg, *args, **kwargs)
if DINGDING_CANUSE:
send_dingding.send_msg(msg, dingding_robot_id)
def warning(self, msg, *args, **kwargs):
self.logger.warning(msg, *args, **kwargs)
if DINGDING_CANUSE:
send_dingding.send_msg(msg, dingding_robot_id)
def error(self, msg, *args, **kwargs):
self.logger.error(msg, *args, **kwargs)
if DINGDING_CANUSE:
send_dingding.send_msg(msg, dingding_robot_id)
def log(self, lvl, msg, *args, **kwargs):
self.logger.log(lvl, msg, *args, **kwargs)
if DINGDING_CANUSE:
send_dingding.send_msg(msg, dingding_robot_id)
"""
handler = logging.handlers.RotatingFileHandler(str(logFile) + '.LOG', maxBytes = 1024 * 1024 * 500, backupCount = 5)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
logger = logging.getLogger(str(logFile))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
"""
def setup_custom_logger():
formatter = logging.Formatter(fmt=qsettings.LOG_FORMATTER)
file_name = qsettings.LOG_FILE
#file_name = None
if file_name :
handler = logging.FileHandler( file_name )
else:
handler = logging.StreamHandler()
#handler = logging.StreamHandler()
handler.setFormatter(formatter)
#print('setup_custom_logger', name)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(qsettings.LOG_LEVEL)
return logger
"""
if DINGDING_CANUSE :
print('setup_custom_logger dingding ')
return DingDingLogger( logger )
else:
return logger
"""
| 25.912088
| 116
| 0.651399
| 280
| 2,358
| 5.332143
| 0.275
| 0.046885
| 0.087073
| 0.056932
| 0.320161
| 0.255191
| 0.182853
| 0.182853
| 0.182853
| 0.182853
| 0
| 0.007743
| 0.233249
| 2,358
| 90
| 117
| 26.2
| 0.818031
| 0.035199
| 0
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.170732
| false
| 0
| 0.097561
| 0
| 0.317073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f81309425c4d43dc4fcef12218a6de6d14c72768
| 722
|
py
|
Python
|
Country cleaning/Chile/PRT/OfflineRB.py
|
Demonliquid/cars-python-cleaning
|
91c516a33c4522114dc024cfaf04f1c1d594f973
|
[
"MIT"
] | null | null | null |
Country cleaning/Chile/PRT/OfflineRB.py
|
Demonliquid/cars-python-cleaning
|
91c516a33c4522114dc024cfaf04f1c1d594f973
|
[
"MIT"
] | null | null | null |
Country cleaning/Chile/PRT/OfflineRB.py
|
Demonliquid/cars-python-cleaning
|
91c516a33c4522114dc024cfaf04f1c1d594f973
|
[
"MIT"
] | null | null | null |
# %%
import os
import pandas as pd
import numpy as np
import datetime
# %% CARGA DE DATOS
path = r'F:\Trabajo\Promotive\Chile\PRT\7\CSV\3'
os.chdir(path)
files = os.listdir(path)
files
# %%
files_xls = [f for f in files if f[-3:] == 'csv']
files_xls
# %%
columnas = ['PPU', 'MARCA', 'MODELO', 'ANO_FABRICACION', 'NUM_MOTOR', 'NUM_CHASIS', 'VIN']
chile = pd.DataFrame(columns=columnas)
# %%
for f in files_xls:
data = pd.read_csv(f, sep=";", encoding="latin-1")
chile = pd.concat([chile , data], ignore_index=True, join='outer')
# %%
chile = chile[columnas]
# %%
chile.drop_duplicates(subset="PPU", inplace=True)
# %%
chile.to_csv(r'F:\Trabajo\Promotive\Chile\PRT\Limpio\OfflineRB3.csv')
# %%
chile
# %%
| 17.609756
| 90
| 0.65651
| 109
| 722
| 4.256881
| 0.541284
| 0.051724
| 0.038793
| 0.077586
| 0.112069
| 0.112069
| 0
| 0
| 0
| 0
| 0
| 0.008157
| 0.15097
| 722
| 40
| 91
| 18.05
| 0.748777
| 0.060942
| 0
| 0
| 0
| 0
| 0.23988
| 0.134933
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.210526
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f815471c4b7feac192ccd8f44032afcd4c9605be
| 3,850
|
py
|
Python
|
datasets/lfw_crop.py
|
laoreja/face-identity-transformer
|
5569d93017ad9371deae7e2b35564523c64b501e
|
[
"BSD-3-Clause"
] | 13
|
2020-10-09T07:15:02.000Z
|
2022-03-28T20:51:30.000Z
|
datasets/lfw_crop.py
|
laoreja/face-identity-transformer
|
5569d93017ad9371deae7e2b35564523c64b501e
|
[
"BSD-3-Clause"
] | 2
|
2021-03-03T15:04:51.000Z
|
2021-06-02T03:42:03.000Z
|
datasets/lfw_crop.py
|
laoreja/face-identity-transformer
|
5569d93017ad9371deae7e2b35564523c64b501e
|
[
"BSD-3-Clause"
] | 5
|
2021-03-02T11:44:19.000Z
|
2021-07-09T16:42:02.000Z
|
import os.path as osp
import numpy as np
from PIL import Image
import torch.utils.data as data
import torch
__all__ = ['LFW_CROP']
EXTENSION_FACTOR = 2
class LFW_CROP(data.Dataset):
def __init__(self, train, transform, args):
self.root = osp.join(args.data_root, 'lfw')
self.transform = transform
landmark_path = osp.join(args.data_root, 'lfw_landmark.txt')
with open(landmark_path) as fd:
self.raw_annotations = [line.strip().split() for line in fd.readlines()]
for idx in range(len(self.raw_annotations)):
self.raw_annotations[idx] = self.raw_annotations[idx][0:1] + [
float(item) for item in self.raw_annotations[idx][1:]]
if not args.evaluate:
test_id_indices = set(np.random.choice(len(self.raw_annotations), size=args.test_size, replace=False))
self.raw_annotations = [anno for idx, anno in enumerate(self.raw_annotations) if
idx in test_id_indices]
self.anno_dict = {anno[0]: anno for anno in self.raw_annotations}
bbox_path = osp.join(args.data_root, 'lfw_detection.txt')
self.bbox_dict = {}
with open(bbox_path) as fd:
bbox_lines = [bbox_line.strip().split() for bbox_line in fd.readlines()]
for bbox_line in bbox_lines:
if bbox_line[0] not in self.anno_dict:
continue
oleft = float(bbox_line[1])
oup = float(bbox_line[2])
oright = float(bbox_line[3])
odown = float(bbox_line[4])
width = oright - oleft
new_width = width * EXTENSION_FACTOR
x_margin = (new_width - width) / 2
y_margin = (new_width - (odown - oup)) / 2 # MAY BE NEED CHANGE
box_left = max(int(oleft - x_margin), 0)
box_right = min(int(oright + x_margin), 249)
box_up = max(int(oup - y_margin), 0)
box_down = min(int(odown + y_margin), 249)
new_width = box_right - box_left
new_height = box_down - box_up
for i in range(5):
self.anno_dict[bbox_line[0]][2 * i + 1] = (self.anno_dict[bbox_line[0]][
2 * i + 1] - box_left) / new_width * 250.
self.anno_dict[bbox_line[0]][2 * i + 2] = (self.anno_dict[bbox_line[0]][
2 * i + 2] - box_up) / new_height * 250.
self.bbox_dict[bbox_line[0]] = [box_left,
box_up,
box_right,
box_down]
# extended left, right, up, down
def __len__(self):
return len(self.raw_annotations)
def __getitem__(self, index):
anno = self.anno_dict[self.raw_annotations[index][0]]
img_path = osp.join(self.root, anno[0])
label = 0
landmarks = torch.empty((5, 2), dtype=torch.float32)
for i in range(5):
landmarks[i, 0] = anno[2 * i + 1]
landmarks[i, 1] = anno[2 * i + 2]
img = Image.open(img_path).convert("RGB")
bbox = self.bbox_dict[anno[0]]
img = img.crop((bbox[0], bbox[1], bbox[2], bbox[3]))
if self.transform is not None:
img = self.transform(img)
return img, label, landmarks, img_path
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of imgs: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__str__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
| 37.745098
| 114
| 0.540779
| 508
| 3,850
| 3.852362
| 0.242126
| 0.053143
| 0.101175
| 0.033214
| 0.119571
| 0.086868
| 0.075626
| 0.049055
| 0.049055
| 0
| 0
| 0.023978
| 0.339221
| 3,850
| 101
| 115
| 38.118812
| 0.745283
| 0.012727
| 0
| 0.026667
| 0
| 0
| 0.037388
| 0
| 0.013333
| 0
| 0
| 0
| 0
| 1
| 0.053333
| false
| 0
| 0.066667
| 0.013333
| 0.173333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f816945723bd501f06ebbe8199fa11cd256a3a52
| 1,065
|
py
|
Python
|
test.py
|
JFF-Bohdan/pyimei
|
d881f4a11374d29828867e2de397d1fcc8413d25
|
[
"MIT"
] | 1
|
2021-07-29T17:39:34.000Z
|
2021-07-29T17:39:34.000Z
|
test.py
|
JFF-Bohdan/pyimei
|
d881f4a11374d29828867e2de397d1fcc8413d25
|
[
"MIT"
] | null | null | null |
test.py
|
JFF-Bohdan/pyimei
|
d881f4a11374d29828867e2de397d1fcc8413d25
|
[
"MIT"
] | 3
|
2018-08-07T08:01:01.000Z
|
2020-03-24T17:14:31.000Z
|
from pyimei import ImeiSupport
def checkImeisArray(imeis):
for imei in imeis:
if ImeiSupport.isValid(imei):
print("IMEI: '{}' is valid".format(imei))
else:
print("IMEI '{}' is NOT valid".format(imei))
#testing classes
ImeiSupport.test()
valid_imeis = [
356938035643809,
490154203237518,
"356938035643809"
]
invalid_imeis = [
358065019104263,
"357805023984941",
356938035643801
]
checkImeisArray(valid_imeis)
checkImeisArray(invalid_imeis)
print("Generating independent FAKE imeis...")
RANDOM_IMEIS_QTY = 5
for i in range(RANDOM_IMEIS_QTY):
print("\tfake IMEI[{}] = {}".format(i+1, ImeiSupport.generateNew()))
print("Generating sequental FAKE imeis:")
DEP_RANDOM_IMEIS_QTY = 5
startImei = ImeiSupport.generateNew()
currentImei = startImei
print("start IMEI: {}".format(startImei))
for i in range(RANDOM_IMEIS_QTY):
currentImei = ImeiSupport.next(currentImei)
print("\tfake IMEI[{}] = {}".format(i+1, currentImei))
print("DONE")
| 23.152174
| 73
| 0.66385
| 113
| 1,065
| 6.141593
| 0.39823
| 0.063401
| 0.080692
| 0.043228
| 0.135447
| 0.135447
| 0.072046
| 0
| 0
| 0
| 0
| 0.111772
| 0.210329
| 1,065
| 46
| 74
| 23.152174
| 0.713436
| 0.014085
| 0
| 0.060606
| 0
| 0
| 0.19602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.030303
| 0
| 0.060606
| 0.242424
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f818d292ca6f1460d6aa1027f16f35e13ba6829c
| 5,441
|
py
|
Python
|
fipomdp/experiments/NYC_experiment.py
|
xbrlej/FiPOMDP
|
b7a97aaaf43a43e5ee9b8776c0e7f6d0bb09392f
|
[
"MIT"
] | null | null | null |
fipomdp/experiments/NYC_experiment.py
|
xbrlej/FiPOMDP
|
b7a97aaaf43a43e5ee9b8776c0e7f6d0bb09392f
|
[
"MIT"
] | null | null | null |
fipomdp/experiments/NYC_experiment.py
|
xbrlej/FiPOMDP
|
b7a97aaaf43a43e5ee9b8776c0e7f6d0bb09392f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import logging
import platform
import time
from functools import partial
from statistics import stdev
from typing import List, Tuple, Dict, Union, Any
import psutil
from joblib import Parallel, delayed
from fimdp.objectives import BUCHI
from fipomdp import ConsPOMDP
from fipomdp.energy_solvers import ConsPOMDPBasicES
from fipomdp.experiments.NYC_environment import NYCPOMDPEnvironment
from fipomdp.experiments.UUV_experiment import simulate_observation
from fipomdp.pomcp import OnlineStrategy
from fipomdp.rollout_functions import basic, grid_manhattan_distance, product, consumption_based
def nyc_experiment(computed_cpomdp: ConsPOMDP, computed_solver: ConsPOMDPBasicES, capacity: int, targets: List[int], random_seed: int, logger) -> \
Tuple[int, bool, List[int], List[int], bool, int]:
logger = logger
if computed_cpomdp.belief_supp_cmdp is None or computed_solver.bs_min_levels[BUCHI] is None:
raise AttributeError(f"Given CPOMDP or its solver is not pre computed!")
# SPECIFY ROLLOUT FUNCTION
# rollout_function = basic
# grid_adjusted = partial(grid_manhattan_distance, grid_size=(20, 20), targets=[3, 12, 15])
rollout_function = consumption_based
#
# rollout_product = partial(product, a=10, b=20)
# rollout_function = rollout_product
# -----
# HYPER PARAMETERS
init_energy = capacity
init_obs = computed_cpomdp.state_with_name('42459137')
init_bel_supp = tuple([computed_cpomdp.state_with_name('42459137')])
exploration = 1
rollout_horizon = 100
max_iterations = 100
actual_horizon = 1000 # number of action to take
softmax_on = False
# -----
strategy = OnlineStrategy(
computed_cpomdp,
capacity,
init_energy,
init_obs,
init_bel_supp,
targets,
exploration,
rollout_function,
rollout_horizon=rollout_horizon,
random_seed=random_seed,
recompute=False,
solver=computed_solver,
logger=logger,
softmax_on=softmax_on
)
simulated_state = init_bel_supp[0]
path = [simulated_state]
logger.info(f"\nLAUNCHING with max iterations: {max_iterations}\n")
reward = 0
target_hit = False
decision_times = []
for j in range(actual_horizon):
pre_decision_time = time.time()
action = strategy.next_action(max_iterations)
simulated_state, new_obs = simulate_observation(computed_cpomdp, action, simulated_state)
path.append(simulated_state)
reward -= action.cons
if simulated_state in targets:
reward += 1000
target_hit = True
break
strategy.update_obs(new_obs)
decision_times.append(round(time.time() - pre_decision_time))
logger.info(f"\n--------EXPERIMENT FINISHED---------")
logger.info(f"--------RESULTS--------")
logger.info(f"For max iterations: {max_iterations}, target has been reached {target_hit} times.")
logger.info(f"Path of the agent was: {path}")
logger.info(f"Decision times: {decision_times}")
logger.info(f"Decision time average: {sum(decision_times)/len(decision_times)}, standard deviation: {stdev(decision_times)}")
logger.info(f"Target hit: {target_hit}, reward: {reward}")
return max_iterations, target_hit, path, decision_times, target_hit, reward
def log_experiment_with_seed(cpomdp, env, i, log_file_name, solver, targets):
handler = logging.FileHandler(f"./logs/{log_file_name}{i}.log", 'w')
formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
handler.setFormatter(formatter)
logger = logging.getLogger(f"{i}")
for handler in logger.handlers[:]:
logger.removeHandler(handler)
logger.addHandler(handler)
logger.level = logging.INFO
logger.info("START")
uname = platform.uname()
logger.info(f"Node name: {uname.node}")
logger.info(f"System: {uname.system}")
logger.info(f"Release: {uname.release}")
logger.info(f"Version: {uname.version}")
logger.info(f"Machine: {uname.machine}")
logger.info(f"Processor: {uname.processor}")
logger.info(f"RAM: {str(round(psutil.virtual_memory().total / (1024.0 ** 3)))} GB")
return nyc_experiment(cpomdp, solver, env.cmdp_env.capacity, targets, i, logger)
def main():
log_file_name = "NYCExperiments" # Change for your needs
logging_level = logging.INFO
# set to INFO (20) for logging to be active, set to DEBUG (10) for details,
# set to 5 for extreme debug
logging.basicConfig(
filename=f"{log_file_name}.log",
filemode="w", # Erase previous log
format="%(asctime)s %(levelname)-8s %(message)s",
level=logging_level,
datefmt="%Y-%m-%d %H:%M:%S",
)
env = NYCPOMDPEnvironment()
cpomdp, targets = env.get_cpomdp()
preprocessing_start = time.time()
cpomdp.compute_guessing_cmdp_initial_state([cpomdp.state_with_name('42459137')])
solver = ConsPOMDPBasicES(cpomdp, [cpomdp.state_with_name('42459137')], env.cmdp_env.capacity, targets)
solver.compute_buchi()
preprocessing_time = round(time.time() - preprocessing_start)
results = Parallel(n_jobs=10)(
delayed(log_experiment_with_seed)(cpomdp, env, i, log_file_name, solver, targets) for i in range(10))
logging.info(f"RESULTS (): {results}")
print(preprocessing_time)
if __name__ == "__main__":
main()
| 33.58642
| 147
| 0.695093
| 685
| 5,441
| 5.322628
| 0.316788
| 0.043884
| 0.045255
| 0.020845
| 0.105869
| 0.06418
| 0.03017
| 0.03017
| 0.03017
| 0.03017
| 0
| 0.018165
| 0.19059
| 5,441
| 161
| 148
| 33.795031
| 0.809718
| 0.081051
| 0
| 0
| 0
| 0.017699
| 0.178973
| 0.031501
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026549
| false
| 0
| 0.132743
| 0
| 0.176991
| 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8197ad55d7f3b5e1e727b66b9aaef3047efa623
| 3,317
|
py
|
Python
|
hikcamerabot/services/tasks/video.py
|
CamVipQ/hikvision-camera-bot
|
84afa0a4dc2fc1ebda71b5020520dc1c300cf3b2
|
[
"MIT"
] | 44
|
2019-03-07T00:25:44.000Z
|
2022-02-20T15:57:11.000Z
|
hikcamerabot/services/tasks/video.py
|
CamVipQ/hikvision-camera-bot
|
84afa0a4dc2fc1ebda71b5020520dc1c300cf3b2
|
[
"MIT"
] | 25
|
2019-02-17T13:37:27.000Z
|
2022-03-22T16:11:46.000Z
|
hikcamerabot/services/tasks/video.py
|
CamVipQ/hikvision-camera-bot
|
84afa0a4dc2fc1ebda71b5020520dc1c300cf3b2
|
[
"MIT"
] | 14
|
2019-06-28T05:40:10.000Z
|
2022-03-24T08:05:01.000Z
|
import asyncio
import logging
import os
import time
from addict import Addict
from aiogram.types import Message
from hikcamerabot.config.config import get_result_queue
from hikcamerabot.constants import Event, VideoGifType
from hikcamerabot.utils.utils import format_ts, gen_random_str
class RecordVideoTask:
_video_filename = {
VideoGifType.ALERT: '{0}-alert-{1}-{2}.mp4',
VideoGifType.REGULAR: '{0}-{1}-{2}.mp4',
}
_video_type_to_event = {
VideoGifType.ALERT: Event.ALERT_VIDEO,
VideoGifType.REGULAR: Event.RECORD_VIDEOGIF,
}
FILENAME_TIME_FORMAT = '%Y-%b-%d--%H-%M-%S'
def __init__(self, ffmpeg_cmd: str, storage_path: str, conf: Addict,
cam, video_type: str, context: Message = None):
self._log = logging.getLogger(self.__class__.__name__)
self._conf = conf
self._cam = cam
self._bot: 'CameraBot' = cam.bot
self._video_type = video_type
self._file_path = os.path.join(storage_path, self._get_filename())
self._ffmpeg_cmd_full = f'{ffmpeg_cmd} {self._file_path}'
self._context = context
self._event = self._video_type_to_event[self._video_type]
async def run(self) -> None:
if await self._record():
await self._send_result()
async def _record(self) -> bool:
"""Start Ffmpeg subprocess and return file path and video type."""
self._log.debug('Recording video gif from %s: %s',
self._conf.description, self._ffmpeg_cmd_full)
await self._start_ffmpeg_subprocess()
validated = await self._validate_file()
if not validated:
err_msg = f'Failed to record {self._file_path}'
self._log.error(err_msg)
await self._bot.send_message(
self._context.chat.id,
text=f'{err_msg}.\nEvent type: {self._event}\nCheck logs.',
reply_to_message_id=self._context.message_id if self._context else None,
)
return validated
async def _start_ffmpeg_subprocess(self) -> None:
proc = await asyncio.create_subprocess_shell(self._ffmpeg_cmd_full)
await proc.wait()
async def _validate_file(self) -> bool:
"""Validate recorded file existence and size."""
try:
is_empty = os.path.getsize(self._file_path) == 0
except FileNotFoundError:
self._log.error('Failed to validate %s: File does not exist',
self._file_path)
return False
except Exception:
self._log.exception('Failed to validate %s', self._file_path)
return False
if is_empty:
self._log.error('Failed to validate %s: File %s is empty',
self._file_path)
return not bool(is_empty)
async def _send_result(self):
await get_result_queue().put({
'event': self._event,
'video_path': self._file_path,
'cam': self._cam,
'message': self._context
})
def _get_filename(self) -> str:
return self._video_filename[self._video_type].format(
self._cam.id,
format_ts(time.time(), time_format=self.FILENAME_TIME_FORMAT),
gen_random_str())
| 35.666667
| 88
| 0.621948
| 410
| 3,317
| 4.707317
| 0.270732
| 0.037306
| 0.049741
| 0.026425
| 0.080829
| 0.034197
| 0.034197
| 0.034197
| 0
| 0
| 0
| 0.003753
| 0.277058
| 3,317
| 92
| 89
| 36.054348
| 0.801084
| 0
| 0
| 0.052632
| 0
| 0
| 0.104589
| 0.006556
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.118421
| 0.013158
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f81adf96e79c10244b5314e809ea884419299412
| 71,349
|
py
|
Python
|
HyperOXO/hypercube.py
|
drtjc/Hyper
|
83579186d915de603d27b8757dfc5a0f82c6770e
|
[
"MIT"
] | null | null | null |
HyperOXO/hypercube.py
|
drtjc/Hyper
|
83579186d915de603d27b8757dfc5a0f82c6770e
|
[
"MIT"
] | null | null | null |
HyperOXO/hypercube.py
|
drtjc/Hyper
|
83579186d915de603d27b8757dfc5a0f82c6770e
|
[
"MIT"
] | null | null | null |
""" Provides functionalilty for working with celled hypercubes.
Hypercubes are extensions of lines, squares and cubes into higher
dimensions. Celled hypercubes can be thought as a grid or lattice
structure. From this point, hypercubes is used to mean celled
hypercubes.
A hypercube can be described by its dimension and the number of
cells in any dimension. We denote this as h(d, n).
For example: h(2, 3) is a 3x3 grid; h(3, 4) is a 4x4x4 lattice.
A hypercube of dimension d may also be referred to as a d-cube.
A cell's position can be specified in coordinate style.
For example, given h(3, 4) and an agreed ordering of dimension
then some valid coordinates are (1,1,1), (2,1,3) and (4,4,4).
The term m-agonal is a short for "m-dimensional diagonal" and can be
thought of as a line of contiguous cells that span m dimensions.
For example, in a 3-cube you would find many 1-agonals, 2-agonals and
3-agonals. A 1-agonal is customarily known as a row, column or pillar.
In another example, if a line of contiguous cells in a 5-cell have the
property that 3 coordinates change, while the others remain constant,
these cells constitute a 3-agonal.
For a given h(d, n), 1 <= m <= n, a m-agonal always has n cells.
The term line is used to refer to any m-agonal in general.
A cell apppears in multiple lines, which are refered to as the
scope of the cell, or the scoped lines of the cell.
The combination of lines and scopes is referred to as the structure
of the hypercube.
For a given cell, we define its connected cells as those cells that
appear in the scoped lines of the given cell.
We define a slice as a sub-cube of a hypercube. For example,
consder h(2,3), a 3x3 hypercube. Let the dimensions be denoted as
d1 and d2, respectively, where 1 <= d1, d2 <= 3.
If we consider d1 as rows, and d2 as columns, then the slice that is
the first column is defined by d1 = 1, 2, 3, and d2 = 1. This has the
form h(1, 3).
The slice that is the top left 2x2 corner is defined by d1, d2 = 1, 2.
This has the form h(2, 2).
This module essentially has 2 classes of functions:
1. Those that use a numpy ndarray to implement the underlying
hypercube. These functions have the suffix _np. An array of d dimensions
may be referred to as a d-array
2. Those that do not implement the underlying hypercube but
provide information as coordinates that can be used with
a user-implementation of the hypercube. These functions have
the suffix _coord.
########################################################################
Type annotations are used in this module. In addition to the standard
types defined in the typing module, several aliases are also defined
which can be viewed in the source code.
"""
# numpy (and scipy) don't yet have type annotations
import numpy as np # type: ignore
from scipy.special import comb # type: ignore
import itertools as it
import numbers
import re
from typing import List, Callable, Union, Collection, Tuple, Any, Type, Deque
from typing import DefaultDict, TypeVar, Counter, Dict, Iterable, Generator, Sequence
Cell_coord = Tuple[int, ...]
Cube_np = TypeVar('Cube_np', np.ndarray, np.ndarray) # Cube_np should really be a numpy array representing h(d, n)
Line_np = TypeVar('Line_np', np.ndarray, np.ndarray) # Line_np should really be a 1d numpy array with n elements
Line_coord = List[Cell_coord]
Lines_np = List[Line_np]
Lines_enum_np = Dict[int, Line_np]
Lines_coord = List[Line_coord]
Lines_enum_coord = Dict[int, Line_coord]
Scopes_np = DefaultDict[Cell_coord, Lines_np]
Scopes_coord = DefaultDict[Cell_coord, Lines_coord]
Scopes_enum = DefaultDict[Cell_coord, List[int]]
Scopes = Union[Scopes_np, Scopes_coord, Scopes_enum]
Structure_np = Tuple[Cube_np, Lines_np, Scopes_np]
Structure_enum_np = Tuple[Cube_np, Lines_enum_np, Scopes_enum]
Structure_coord = Tuple[Lines_coord, Scopes_coord]
Structure_enum_coord = Tuple[Lines_enum_coord, Scopes_enum]
Connected_cells = DefaultDict[Cell_coord, List[Cell_coord]]
def num_lines_grouped(d: int, n: int) -> Generator[int, None, None]:
"""
num_lines_grouped(d: int, n: int) -> Generator[int, None, None]:
Calculate the number of lines in a hypercube, grouped by the
number of dimensions spanned.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Yields
-------
The number of lines in a hypercube, grouped by number of
dimensions spanned.
Notes
-----
Consider a hypercube h(d, n).
Let l be the number of lines, then
l = sum{i=1, i=d} [ dCi * n^(d-i) * (2^i)/2 ]
where dCi is 'd choose i'.
Sketch of proof:
Let l_i be the number of i-agonals (lines that span exactly
i dimensions). For example, consider the following square (2-cube):
[[0, 1],
[2, 3]]
The 1-agonals are [0, 1], [2, 3], [0, 2] and [1, 3] and l_1 = 4.
The 2-agonals are [0, 3] and [1, 2] and l_2 = 2.
Hence l = l_1 + l_2 = 6
It is trivially true that the l is the sum of l_i, i.e.,
l = sum{i=1, i=d} l_i
Next we show how l_i can be calculated. Firstly, we argue
that the distinct number of h(i, n) is dCi * n^(d-i).
The number of ways of choosing i dimensions from d is dCi.
For example if d=3 and i=2, then the 3 combinations of
2 dimensions (squares) are (1, 2), (1, 3) and (2, 3).
Given a fixed set of i dimension, the number of remaining dimensions
is d-i, and the number of cells in these dimensions is n^(d-i).
Any one of these cells could be chosen relative to the
fixed i dimensions.
Hence the distinct number of h(i, n) is dCi * n^(d-i).
Finally, for any h(i, n), the number of i-agonals is (2^i)/2.
This is because an i-cube has 2^i corners and a line has 2 corners.
Hence l_i = dCi * n^(d-i) * (2^i)/2 and thus:
l = sum{i=1, i=d} [ dCi * n^(d-i) * (2^i)/2 ]
Examples
--------
>>> list(num_lines_grouped(2, 3))
[6, 2]
>>> list(num_lines_grouped(3, 4))
[48, 24, 4]
"""
for i in range(1, d + 1):
yield comb(d, i, True) * (n ** (d - i)) * (2 ** (i - 1))
def num_lines(d: int, n: int) -> int:
"""
num_lines(d: int, n: int) -> int:
Calculate the number of lines in a hypercube.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Returns
-------
The number of lines in a hypercube.
See Also
--------
num_lines_grouped
Notes
-----
There are two ways to calculate the number of lines:
1. Call the function num_lines_grouped and sum the number of lines
spanning each dimension.
2. Directly, using the formula:
((n+2)**d-n**d)/2
Sketch of proof:
Embed the n**d hypercube in an (n+2)**d hypercube which extends one
cell further in each dimension. Then each winning line in the n**d
hypercube terminates in exactly two "border" cells of the enlarged
hypercube, and these two borders are unique to that line. Moreover,
every border cell is at the end of a line, so that (n+2)**d border
cells are in two-to-one correspondence with the winning lines.
(See Hypercube -Tic-Tac-Toe: Solomon W.Golomb and Alfred W. Hales)
Examples
--------
>>> num_lines(2, 3)
8
>>> num_lines(3, 4)
76
"""
# return sum(list(num_lines_grouped(d, n)))
return int(((n+2)**d-n**d)/2)
def get_diagonals_np(hc: Cube_np) -> Generator[Line_np, None, None]:
"""
get_diagonals_np(hc: Cube_np) -> Generator[Line_np, None, None]:
Calculate the d-agonals of a d-cube h(d, n).
Parameters
----------
hc
A d-cube whose d-agonals are to be calculated
Yields
-------
numpy.ndarray views of the d-gonals of `hc`.
Notes
-----
The number of corners of `hc` is 2^d. The number of d-agonals
is 2^d / 2 since two connecting corners form a line.
Examples
--------
>>> import numpy as np
>>> hc = np.arange(8).reshape(2, 2, 2)
>>> hc
array([[[0, 1],
[2, 3]],
<BLANKLINE>
[[4, 5],
[6, 7]]])
>>> diagonals = list(get_diagonals_np(hc))
>>> diagonals
[array([0, 7]), array([1, 6]), array([4, 3]), array([5, 2])]
>>> hc[0, 0, 0] = 99
>>> diagonals
[array([99, 7]), array([1, 6]), array([4, 3]), array([5, 2])]
"""
# The function is recursive. How it works is best shown by example.
# 1d: hc = [0, 1] then the diagonal is also [0, 1].
# 2d: hc = [[0, 1],
# [2, 3]]
# The numpy diagonal method gives the main diagonal = [0, 3], a 1d array
# which is recursively passed to the function.
# To get the opposite diagonal we first use the numpy flip function to
# reverse the order of the elements along the given dimension, 0 in this case.
# This gives [[2, 3],
# 0, 1]]
# The numpy diagonal method gives the main diagonal = [2, 1], a 1d array
# which is recursively passed to the function.
# 3d: hc = [[[0, 1],
# [2, 3]],
# [[4, 5],
# [6, 7]]]
# The numpy diagonal method gives the main diagonals in the 3rd dimension
# as rows.
# [[0, 6],
# [1, 7]]
# Note that the diagonals of this array are [0, 7] and [6, 1] which are
# retrieved by a recurive call to the function.
# We now have 2 of the 4 3-agonals of the orginal 3-cube hc.
# To get the opposite 3-agonals we first use the numpy flip function which
# gives
# [[[4, 5],
# [6, 7]],
# [[0, 1],
# [2, 3]]]
# and a call to the numpy diagonal method gives
# [[4, 2],
# [5, 3]]
# The diagonals of this array are [4, 3] and [2, 5]
# We now have all four 3-agonals of the original 3-cube hc.
if hc.ndim == 1:
yield hc
else:
yield from get_diagonals_np(hc.diagonal())
yield from get_diagonals_np(np.flip(hc, 0).diagonal())
def get_lines_grouped_np(hc: Cube_np) -> Generator[Lines_np, None, None]:
"""
get_lines_grouped_np(hc: Cube_np) ->
Generator[Lines_np, None, None]:
Generate the lines of a hypercube, grouped by the number of
dimensions spanned.
Parameters
----------
hc
The hypercube whose lines are to be calculated
Yields
-------
numpy.ndarray views of the lines in `hc`, grouped by the
numbers of dimensions spanned.
See Also
--------
get_lines_i_np
Examples
--------
>>> import numpy as np
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = list(get_lines_grouped_np(hc))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3])],
[array([0, 3]), array([2, 1])]]
>>> hc[0, 0] = 99
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[array([99, 2]), array([1, 3]), array([99, 1]), array([2, 3])],
[array([99, 3]), array([2, 1])]]
"""
for i in range(hc.ndim):
yield from get_lines_i_np(hc, i)
def get_lines_i_np(hc: Cube_np, i: int) -> Generator[Lines_np, None, None]:
"""
get_lines_i_np(hc: Cube_np, i: int) ->
Generator[Lines_np, None, None]:
Generates the lines of a hypercube that span the specified
number of dimensions.
Parameters
----------
hc
The hypercube whose lines are to be calculated
i
The number of dimensions that the returned lines must span
Yields
-------
numpy.ndarray views of the lines in `hc` that span
`i` dimensions.
See Also
--------
num_lines_grouped
Notes
-----
The notes section for the function num_lines_grouped provides a
sketchof a constructive proof for the number of lines in a
hypercube. This has been used to implement this function.
Examples
--------
>>> import numpy as np
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = list(get_lines_i_np(hc, 0))
>>> lines
[[array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3])]]
>>> lines = list(get_lines_i_np(hc, 1))
>>> lines
[[array([0, 3]), array([2, 1])]]
>>> hc[0, 0] = 99
>>> lines
[[array([99, 3]), array([2, 1])]]
"""
d = hc.ndim
n = hc.shape[0]
lines = []
# loop over all possible combinations of i dimensions
for i_comb in it.combinations(range(d), r = i + 1):
# a cell could be in any position in the other dimensions
other_d = set(range(d)) - set(i_comb)
for cell in it.product(range(n), repeat = d - i - 1):
# take a slice of selected i dimensions given a cell
sl = slice_ndarray(hc, other_d, cell)
# get all possible lines from slice
lines.extend(list(get_diagonals_np(sl)))
yield lines
def get_lines_np(hc: Cube_np) -> Generator[Line_np, None, None]:
"""
get_lines_np(hc: Cube_np) -> Generator[Line_np, None, None]:
Returns the lines in a hypercube
Parameters
----------
hc
The hypercube whose lines are to be calculated
Yields
-------
numpy.ndarray views of the lines in `hc`.
See Also
--------
get_lines_grouped_np
Examples
--------
>>> import numpy as np
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = list(get_lines_np(hc))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3]),
array([0, 3]), array([2, 1])]
>>> len(lines)
6
>>> hc[0, 0] = 99
>>> lines #doctest: +NORMALIZE_WHITESPACE
[array([99, 2]), array([1, 3]), array([99, 1]), array([2, 3]),
array([99, 3]), array([2, 1])]
"""
grouped = get_lines_grouped_np(hc)
flat = (x for y in grouped for x in y)
yield from flat # return flat works as well but yield from this is explicit as to being a generator
def get_scopes_np(lines: Lines_np, d: int) -> Scopes_np:
"""
get_scopes_np(lines: Lines_np, d: int) -> Scopes_np:
Calculate the scope of each cell in a hypercube
Parameters
----------
lines
The returned value from get_lines_np(hc) where hc is of the
form np.arange(n ** d, dtype = intx__).reshape([n] * d).
That is, hc is populated with the values 0,1,2,...,n^d - 1.
dim
The dimension of the hypercube that was used to
generate `lines`.
Returns
-------
A dictionary with keys equal to the coordinates of each cell in
the hypercube. For each cell key, the value is the cell's
scope - a list of numpy.ndarray views that are lines containing
the cell.
See Also
--------
get_lines_np
Notes
-----
The implementation of this function uses np.unravel_index, and
relies uopn the lines parameter being generated from an array
populated with values 0,1,2,...
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = list(get_lines_np(hc))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3]),
array([0, 3]), array([2, 1])]
>>> scopes = get_scopes_np(lines, 2)
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [array([0, 2]), array([0, 1]), array([0, 3])],
(0, 1): [array([1, 3]), array([0, 1]), array([2, 1])],
(1, 0): [array([0, 2]), array([2, 3]), array([2, 1])],
(1, 1): [array([1, 3]), array([2, 3]), array([0, 3])]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [array([0, 2]), array([0, 1]), array([0, 3])]),
((0, 1), [array([1, 3]), array([0, 1]), array([2, 1])]),
((1, 0), [array([0, 2]), array([2, 3]), array([2, 1])]),
((1, 1), [array([1, 3]), array([2, 3]), array([0, 3])])]
>>> hc[0, 0] = 99
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [array([99, 2]), array([99, 1]), array([99, 3])],
(0, 1): [array([1, 3]), array([99, 1]), array([2, 1])],
(1, 0): [array([99, 2]), array([2, 3]), array([2, 1])],
(1, 1): [array([1, 3]), array([2, 3]), array([99, 3])]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [array([99, 2]), array([99, 1]), array([99, 3])]),
((0, 1), [array([1, 3]), array([99, 1]), array([2, 1])]),
((1, 0), [array([99, 2]), array([2, 3]), array([2, 1])]),
((1, 1), [array([1, 3]), array([2, 3]), array([99, 3])])]
"""
n = lines[0].size
shape = [n] * d
scopes: Scopes_np = DefaultDict(list)
for line in lines:
for j in range(n):
cell = np.unravel_index(line[j], shape)
scopes[cell].append(line)
return scopes
def structure_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) -> Structure_np:
"""
structure_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) ->
Structure_np:
Return a hypercube, its lines, and the scopes of its cells.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
zeros
If true, all values in array are 0, else they are 0,1,2,...
OFFSET
The number of cells is n^d. If this greater than
(2^31 - OFFSET - 1) then we use np.int64 (instead of np.int32)
as the dtype of the numpy array.
Returns
-------
The hypercube (as a numpy array), its lines, and the scopes of
its cells.
See Also
--------
get_lines_np
get_scopes_np
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> struct = structure_np(2, 2)
>>> struct[0]
array([[0, 0],
[0, 0]])
>>> struct[1] #doctest: +NORMALIZE_WHITESPACE
[array([0, 0]), array([0, 0]), array([0, 0]), array([0, 0]),
array([0, 0]), array([0, 0])]
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [array([0, 0]), array([0, 0]), array([0, 0])],
(0, 1): [array([0, 0]), array([0, 0]), array([0, 0])],
(1, 0): [array([0, 0]), array([0, 0]), array([0, 0])],
(1, 1): [array([0, 0]), array([0, 0]), array([0, 0])]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [array([0, 0]), array([0, 0]), array([0, 0])]),
((0, 1), [array([0, 0]), array([0, 0]), array([0, 0])]),
((1, 0), [array([0, 0]), array([0, 0]), array([0, 0])]),
((1, 1), [array([0, 0]), array([0, 0]), array([0, 0])])]
>>> struct = structure_np(2, 2, False)
>>> struct[0]
array([[0, 1],
[2, 3]])
>>> struct[1] #doctest: +NORMALIZE_WHITESPACE
[array([0, 2]), array([1, 3]), array([0, 1]), array([2, 3]),
array([0, 3]), array([2, 1])]
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [array([0, 2]), array([0, 1]), array([0, 3])],
(0, 1): [array([1, 3]), array([0, 1]), array([2, 1])],
(1, 0): [array([0, 2]), array([2, 3]), array([2, 1])],
(1, 1): [array([1, 3]), array([2, 3]), array([0, 3])]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [array([0, 2]), array([0, 1]), array([0, 3])]),
((0, 1), [array([1, 3]), array([0, 1]), array([2, 1])]),
((1, 0), [array([0, 2]), array([2, 3]), array([2, 1])]),
((1, 1), [array([1, 3]), array([2, 3]), array([0, 3])])]
"""
# number of cells is n^d. If this greater than (2^31 - OFFSET - 1)
# then we use int64. This is because the get_scopes
# function populates the arrays with values 0,1,2, ...
dtype = np.int64 if n ** d > 2 ** 31 - OFFSET - 1 else np.int32
hc = np.arange(n ** d, dtype = dtype).reshape([n] * d)
lines = list(get_lines_np(hc))
scopes = get_scopes_np(lines, d)
if zeros:
hc.fill(0)
return (hc, lines, scopes)
def get_lines_enum_np(hc: Cube_np) -> Lines_enum_np:
"""
get_lines_enum_np(hc: Cube_np) -> Lines_enum_np
Returns emunerated lines of a hypercube
Parameters
----------
hc
The hypercube whose lines are to be calculated
Returns
-------
Enumerated numpy.ndarray views of the lines in `hc`.
See Also
--------
get_lines_np
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = get_lines_enum_np(hc)
>>> pprint(lines) #doctest: +SKIP
{0: array([0, 2]), 1: array([1, 3]), 2: array([0, 1]),
3: array([2, 3]), 4: array([0, 3]), 5: array([2, 1])}
>>> sorted(lines.items()) #doctest: +NORMALIZE_WHITESPACE
[(0, array([0, 2])), (1, array([1, 3])), (2, array([0, 1])),
(3, array([2, 3])), (4, array([0, 3])), (5, array([2, 1]))]
"""
lines: Lines_enum_np = dict()
idx = 0
for line in get_lines_np(hc):
lines[idx] = line
idx += 1
return lines
def get_scopes_enum_np(lines: Lines_enum_np, d: int) -> Scopes_enum:
"""
get_scopes_enum_np(lines: Lines_enum_np, d: int) -> Scopes_enum:
Calculate the scope of each cell in a hypercube
Parameters
----------
lines
The returned value from get_lines_enum_np(hc) where hc is of the
form np.arange(n ** d, dtype = intxx).reshape([n] * d).
That is, hc is populated with the values 0,1,2,...,n^d - 1.
dim
The dimension of the hypercube that was used to
generate `lines`.
Returns
-------
A dictionary with keys equal to each cell coordinates of the
hypercube. For each cell key, the value is the cell's
scope - a list of line enumerations that are lines containing
the cell.
See Also
--------
get_lines_enum_np
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> hc = np.arange(4).reshape(2, 2)
>>> hc
array([[0, 1],
[2, 3]])
>>> lines = get_lines_enum_np(hc)
>>> pprint(lines) #doctest: +SKIP
{0: array([0, 2]), 1: array([1, 3]), 2: array([0, 1]),
3: array([2, 3]), 4: array([0, 3]), 5: array([2, 1])}
>>> sorted(lines.items()) #doctest: +NORMALIZE_WHITESPACE
[(0, array([0, 2])), (1, array([1, 3])), (2, array([0, 1])),
(3, array([2, 3])), (4, array([0, 3])), (5, array([2, 1]))]
>>> scopes = get_scopes_enum_np(lines, 2)
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])]
"""
n = lines[0].size
shape = [n] * d
scopes: Scopes_enum = DefaultDict(list)
for idx, line in lines.items():
for j in range(n):
cell = np.unravel_index(line[j], shape)
scopes[cell].append(idx)
return scopes
def structure_enum_np(d: int, n: int, zeros: bool = True, OFFSET: int = 0) -> Structure_enum_np:
"""
structure_enum_np(d: int, n: int, zeros: bool = True,
OFFSET: int = 0) ->
Structure_enum_np:
Return a hypercube, its enumerated lines and the scopes of
its cell scopes.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
zeros
If true, all values in array are 0, else they are 0,1,2,...
base: int
Tne number of cells is n^d. If this greater than
(2^31 - OFFSET - 1) then we use np.int64 (instead of np.int32)
as the dtype of the numpy array.
Returns
-------
A tuple containing the hypercube, its enumerated lines, and the
scopes of its cells.
See Also
--------
get_lines_enum_np
get_scopes_enum_np
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> struct = structure_enum_np(2, 2)
>>> struct[0]
array([[0, 0],
[0, 0]])
>>> pprint(struct[1]) #doctest: +SKIP
{0: array([0, 0]), 1: array([0, 0]), 2: array([0, 0]),
3: array([0, 0]), 4: array([0, 0]), 5: array([0, 0])}
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[(0, array([0, 0])), (1, array([0, 0])), (2, array([0, 0])),
(3, array([0, 0])), (4, array([0, 0])), (5, array([0, 0]))]
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])]
>>> struct = structure_enum_np(2, 2, False)
>>> struct[0]
array([[0, 1],
[2, 3]])
>>> pprint(struct[1]) #doctest: +SKIP
{0: array([0, 2]), 1: array([1, 3]), 2: array([0, 1]),
3: array([2, 3]), 4: array([0, 3]), 5: array([2, 1])}
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[(0, array([0, 2])), (1, array([1, 3])), (2, array([0, 1])),
(3, array([2, 3])), (4, array([0, 3])), (5, array([2, 1]))]
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]), ((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]), ((1, 1), [1, 3, 4])]
"""
# number of cells is n^d. If this greater than (2^31 - OFFSET - 1)
# then we use int64. This is because the the get_scopes
# function populates the arrays with values 0,1,2, ...
dtype = np.int64 if n ** d > 2 ** 31 - OFFSET - 1 else np.int32
hc = np.arange(n ** d, dtype = dtype).reshape([n] * d)
lines = get_lines_enum_np(hc)
scopes = get_scopes_enum_np(lines, d)
if zeros:
hc.fill(0)
return (hc, lines, scopes)
def connected_cells_np(lines: Lines_enum_np, scopes: Scopes_enum, d: int) -> Connected_cells:
"""
connected_cells_np(lines: Lines_enum_np,
scopes: Scopes_enum, d: int) -> Connected_cells:
Calculate the connected cells for a cube.
Parameters
----------
lines
The enumerated lines of the hypercube
scopes
The enumerated scopes of the hypercube
Returns
------
A dictionary with keys beings cell coordinates and values the
connected cell coordinates.
See Also
--------
structure_enum_np
Examples
--------
>>> from pprint import pprint
>>> d = 2
>>> n = 3
>>> struct = structure_enum_np(d, n, False)
>>> struct[1] #doctest: +NORMALIZE_WHITESPACE
{0: array([0, 3, 6]),
1: array([1, 4, 7]),
2: array([2, 5, 8]),
3: array([0, 1, 2]),
4: array([3, 4, 5]),
5: array([6, 7, 8]),
6: array([0, 4, 8]),
7: array([6, 4, 2])}
>>> pprint(struct[2]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 3, 6],
(0, 1): [1, 3],
(0, 2): [2, 3, 7],
(1, 0): [0, 4],
(1, 1): [1, 4, 6, 7],
(1, 2): [2, 4],
(2, 0): [0, 5, 7],
(2, 1): [1, 5],
(2, 2): [2, 5, 6]})
>>> sorted(struct[2].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 3, 6]),
((0, 1), [1, 3]),
((0, 2), [2, 3, 7]),
((1, 0), [0, 4]),
((1, 1), [1, 4, 6, 7]),
((1, 2), [2, 4]),
((2, 0), [0, 5, 7]),
((2, 1), [1, 5]),
((2, 2), [2, 5, 6])]
>>> connected_cells = connected_cells_np(struct[1], struct[2], d)
>>> pprint(connected_cells) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2),
(1, 0), (0, 2)],
(0, 1): [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)],
(0, 2): [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1),
(2, 2), (0, 2)],
(1, 0): [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)],
(1, 1): [(0, 1),
(1, 2),
(0, 0),
(0, 2),
(2, 1),
(2, 0),
(2, 2),
(1, 0),
(1, 1)],
(1, 2): [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)],
(2, 0): [(0, 0), (0, 2), (2, 1), (2, 0), (2, 2),
(1, 0), (1, 1)],
(2, 1): [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)],
(2, 2): [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1),
(2, 2), (0, 2)]})
>>> sorted(connected_cells.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)]),
((0, 1), [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)]),
((0, 2), [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (0, 2)]),
((1, 0), [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)]),
((1, 1), [(0, 1), (1, 2), (0, 0), (0, 2), (2, 1), (2, 0),
(2, 2), (1, 0), (1, 1)]),
((1, 2), [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)]),
((2, 0), [(0, 0), (0, 2), (2, 1), (2, 0), (2, 2), (1, 0), (1, 1)]),
((2, 1), [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)]),
((2, 2), [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (0, 2)])]
"""
n = lines[0].size
shape = [n] * d
connected_cells: Connected_cells = DefaultDict(list)
for cell, lines_enums in scopes.items():
for line_enum in lines_enums:
for j in range(n):
cc = np.unravel_index(lines[line_enum][j], shape)
connected_cells[cell].append(cc)
connected_cells[cell] = list(set(connected_cells[cell]))
return connected_cells
def get_diagonals_coord(d: int, n: int) -> Generator[Line_coord, None, None]:
"""
get_diagonals_coord(d: int, n: int) ->
Generator[Line_coord, None, None]:
Calculates the d-agonals coordinates of h(d, n).
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Yields
-------
d-gonals coordinates of the diagonals in h(d,n).
Notes
-----
The number of corners of h(d, n) is 2^d. The number of d-agonals
is 2^d / 2 since two connecting corners form a line.
Examples
--------
>>> diags = get_diagonals_coord(2, 3)
>>> list(diags)
[[(0, 0), (1, 1), (2, 2)], [(0, 2), (1, 1), (2, 0)]]
"""
# comments below use an example with h(2, 3)
# get an iterator of all corners. E.g.: (0,0), (0,2), (2,0), (2,2)
corners_all = it.product([0, n - 1], repeat = d)
# restrict to corners with 0 as first coordinate. E.g.: (0,0), (0,2)
corners_0 = [corner for corner in corners_all if corner[0] == 0]
for corner in corners_0:
# create the diagonals for each corner
diagonal: Line_coord = []
diagonal.append(corner) # add corner as first cell in diagonal
# add rest of diagonal
for i in range(1, n):
# find next cell. Start by decrementing coords.
# E.g.: (0,0) -> (-1,-1); (0,2) -> (-1,1)
# E.g.: (0,0) -> (-2,-2); (0,2) -> (-2,0)
tmp = tuple(c - i for c in corner)
# Take absolute values of coords.
# E.g.: (-1,-1) -> (1,1); (-1,1) -> (1,1)
# E.g.: (-2,-2) -> (2,2); (-2,0) -> (2,0)
coords = tuple(abs(t) for t in tmp)
diagonal.append(coords)
yield diagonal
def get_lines_grouped_coord(d: int, n: int) -> Generator[Lines_coord, None, None]:
"""
get_lines_grouped_coord(d: int, n: int) ->
Generator[Lines_coord, None, None]:
Generate the lines of a hypercube, h(d, n), grouped by the
number of dimensions spanned.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Yields
-------
lines (as coordinates) in h(d, n).
See Also
--------
get_lines_i_coord
Examples
--------
>>> lines = list(get_lines_grouped_coord(2, 2))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[[(0, 0), (1, 0)], [(0, 1), (1, 1)], [(0, 0), (0, 1)],
[(1, 0), (1, 1)]], [[(0, 0), (1, 1)], [(0, 1), (1, 0)]]]
"""
for i in range(d):
yield from get_lines_i_coord(d, n, i)
def get_lines_i_coord(d: int, n: int, i: int) -> Generator[Lines_coord, None, None]:
"""
get_lines_i_coord(d: int, n: int, i: int) ->
Generator[Lines_coord, None, None]:
Generates the lines of a hypercube that span the specified number
of dimensions
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
i
The number of dimensions that the returned lines must span
Yields
-------
Lines in h(d, n) that span `i` dimensions.
See Also
--------
num_lines_grouped
Notes
-----
The notes section for the function num_lines_grouped provides a
sketch of a constructive proof for the number of lines in a
hypercube. This has been used to implement this function.
Examples
--------
>>> lines = list(get_lines_grouped_coord(2, 2))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[[(0, 0), (1, 0)], [(0, 1), (1, 1)], [(0, 0), (0, 1)],
[(1, 0), (1, 1)]], [[(0, 0), (1, 1)], [(0, 1), (1, 0)]]]
"""
lines = []
diagonals = list(get_diagonals_coord(i + 1, n))
# loop over all possible combinations of i dimensions
for i_comb in it.combinations(range(d), r = i + 1):
# a cell could be in any position in the other dimensions
other_d = set(range(d)) - set(i_comb)
for cell in it.product(range(n), repeat = d - i - 1):
diags: Lines_coord = []
for diagonal in diagonals:
diag = []
for c in diagonal:
diag.append(insert_into_tuple(c, other_d, cell))
diags.append(diag)
lines.extend(diags)
yield lines
def get_lines_coord(d: int, n: int) -> Generator[Line_coord, None, None]:
"""
get_lines_coord(d: int, n: int) ->
Generator[Line_coord, None, None]:
Returns the lines in a hypercube
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Yields
-------
Lines in h(d, n).
See Also
--------
get_lines_grouped_coord
Examples
--------
>>> lines = list(get_lines_coord(2, 2))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[(0, 0), (1, 0)], [(0, 1), (1, 1)], [(0, 0), (0, 1)],
[(1, 0), (1, 1)], [(0, 0), (1, 1)], [(0, 1), (1, 0)]]
>>> len(lines)
6
"""
grouped = get_lines_grouped_coord(d, n)
flat = (x for y in grouped for x in y)
yield from flat # return flat works as well but yield from this is explicit as to being a generator
def get_scopes_coord(lines: Lines_coord, d: int) -> Scopes_coord:
"""
get_scopes_coord(lines: Lines_coord, d: int) -> Scopes_coord:
Calculate the scope of each cell in a hypercube
Parameters
----------
lines
The returned value from get_lines_coord(d, n).
dim
The dimension of the hypercube that was used to
generate `lines`.
Returns
-------
A dictionary with keys equal to the coordinates of each cell in
the hypercube. For each cell key, the value is the cell's
scope - a list of coordinates that are lines containing
the cell.
See Also
--------
get_lines_coord
Examples
--------
>>> from pprint import pprint
>>> lines = list(get_lines_coord(2, 2))
>>> lines #doctest: +NORMALIZE_WHITESPACE
[[(0, 0), (1, 0)],
[(0, 1), (1, 1)],
[(0, 0), (0, 1)],
[(1, 0), (1, 1)],
[(0, 0), (1, 1)],
[(0, 1), (1, 0)]]
>>> scopes = get_scopes_coord(lines, 2)
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]],
(0, 1): [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]],
(1, 0): [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]],
(1, 1): [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]]),
((0, 1), [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]]),
((1, 0), [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]]),
((1, 1), [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]])]
"""
n = len(lines[0])
scopes: Scopes_coord = DefaultDict(list)
cells = it.product(range(n), repeat = d) # get all possible cells
for cell in cells:
for line in lines:
if cell in line:
scopes[cell].append(line)
return scopes
def structure_coord(d: int, n: int) -> Structure_coord:
"""
structure_coord(d: int, n: int) -> Structure_coord:
Return lines, and the scopes of its cells, for h(d, n)
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Returns
-------
Lines, and the scopes of its cells, for h(d, n)
See Also
--------
get_lines_coord
get_scopes_coord
Examples
--------
>>> from pprint import pprint
>>> struct = structure_coord(2, 2)
>>> struct[0] #doctest: +NORMALIZE_WHITESPACE
[[(0, 0), (1, 0)], [(0, 1), (1, 1)], [(0, 0), (0, 1)],
[(1, 0), (1, 1)], [(0, 0), (1, 1)], [(0, 1), (1, 0)]]
>>> pprint(struct[1]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]],
(0, 1): [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]],
(1, 0): [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]],
(1, 1): [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]]})
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [[(0, 0), (1, 0)], [(0, 0), (0, 1)], [(0, 0), (1, 1)]]),
((0, 1), [[(0, 1), (1, 1)], [(0, 0), (0, 1)], [(0, 1), (1, 0)]]),
((1, 0), [[(0, 0), (1, 0)], [(1, 0), (1, 1)], [(0, 1), (1, 0)]]),
((1, 1), [[(0, 1), (1, 1)], [(1, 0), (1, 1)], [(0, 0), (1, 1)]])]
"""
lines = list(get_lines_coord(d, n))
scopes = get_scopes_coord(lines, d)
return (lines, scopes)
def get_lines_enum_coord(d: int, n: int) -> Lines_enum_coord:
"""
get_lines_enum_coord(d: int, n: int) -> Lines_enum_coord:
Returns enumerated lines of a hypercube
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Yields
-------
Enumerated lines in h(d, n).
See Also
--------
get_lines_coord
Examples
--------
>>> lines = get_lines_enum_coord(2, 2)
>>> lines #doctest: +NORMALIZE_WHITESPACE
{0: [(0, 0), (1, 0)],
1: [(0, 1), (1, 1)],
2: [(0, 0), (0, 1)],
3: [(1, 0), (1, 1)],
4: [(0, 0), (1, 1)],
5: [(0, 1), (1, 0)]}
"""
lines: Lines_enum_coord = dict()
idx = 0
for line in get_lines_coord(d, n):
lines[idx] = line
idx += 1
return lines
def get_scopes_enum_coord(lines: Lines_enum_coord, d: int) -> Scopes_enum:
"""
get_scopes_enum_coord(lines: Lines_enum_coord, d: int) ->
Scopes_enum:
Calculate the scope of each cell in a hypercube
Parameters
----------
lines
The returned value from get_lines_enum_coord(d, n).
dim
The dimension of the hypercube that was used to
generate `lines`.
Returns
-------
A dictionary with keys equal to each cell coordinates of the
hypercube. For each cell key, the value is the cell's
scope - a list of line enumerations that are lines containing
the cell.
See Also
--------
get_lines_enum_coord
Examples
--------
>>> from pprint import pprint
>>> lines = get_lines_enum_coord(2, 2)
>>> lines #doctest: +NORMALIZE_WHITESPACE
{0: [(0, 0), (1, 0)],
1: [(0, 1), (1, 1)],
2: [(0, 0), (0, 1)],
3: [(1, 0), (1, 1)],
4: [(0, 0), (1, 1)],
5: [(0, 1), (1, 0)]}
>>> scopes = get_scopes_enum_coord(lines, 2)
>>> pprint(scopes) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(scopes.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]),
((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]),
((1, 1), [1, 3, 4])]
"""
n = len(lines[0])
scopes: Scopes_enum = DefaultDict(list)
cells = it.product(range(n), repeat = d) # get all possible cells
for cell in cells:
for idx, line in lines.items():
if cell in line:
scopes[cell].append(idx)
return scopes
def structure_enum_coord(d: int, n: int) -> Structure_enum_coord:
"""
structure_enum_coord(d: int, n: int) ->
Structure_enum_coord:
Return enumerated lines, and the scopes of its cells, for h(d, n)
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Returns
-------
Enumerated lines, and the scopes of its cells, for h(d, n)
See Also
--------
get_lines_enum_coord
get_scopes_enum_coord
Examples
--------
>>> from pprint import pprint
>>> struct = structure_enum_coord(2, 2)
>>> struct[0] #doctest: +NORMALIZE_WHITESPACE
{0: [(0, 0), (1, 0)],
1: [(0, 1), (1, 1)],
2: [(0, 0), (0, 1)],
3: [(1, 0), (1, 1)],
4: [(0, 0), (1, 1)],
5: [(0, 1), (1, 0)]}
>>> pprint(struct[1]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 2, 4],
(0, 1): [1, 2, 5],
(1, 0): [0, 3, 5],
(1, 1): [1, 3, 4]})
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 2, 4]),
((0, 1), [1, 2, 5]),
((1, 0), [0, 3, 5]),
((1, 1), [1, 3, 4])]
"""
lines = get_lines_enum_coord(d, n)
scopes = get_scopes_enum_coord(lines, d)
return (lines, scopes)
def connected_cells_coord(lines: Lines_enum_coord, scopes: Scopes_enum) -> Connected_cells:
"""
connected_cells_coord(lines: Lines_enum_coord, scopes: Scopes_enum)
-> Connected_cells:
Calculate the connected cells for a cube.
Parameters
----------
lines
The enumerated lines of the hypercube
scopes
The enumerated scopes of the hypercube
Returns
------
A dictionary with keys beings cell coordinates and values the
connected cell coordinates.
See Also
--------
structure_enum_coord
Examples
--------
>>> from pprint import pprint
>>> struct = structure_enum_coord(2, 3)
>>> struct[0] #doctest: +NORMALIZE_WHITESPACE
{0: [(0, 0), (1, 0), (2, 0)],
1: [(0, 1), (1, 1), (2, 1)],
2: [(0, 2), (1, 2), (2, 2)],
3: [(0, 0), (0, 1), (0, 2)],
4: [(1, 0), (1, 1), (1, 2)],
5: [(2, 0), (2, 1), (2, 2)],
6: [(0, 0), (1, 1), (2, 2)],
7: [(0, 2), (1, 1), (2, 0)]}
>>> pprint(struct[1]) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [0, 3, 6],
(0, 1): [1, 3],
(0, 2): [2, 3, 7],
(1, 0): [0, 4],
(1, 1): [1, 4, 6, 7],
(1, 2): [2, 4],
(2, 0): [0, 5, 7],
(2, 1): [1, 5],
(2, 2): [2, 5, 6]})
>>> sorted(struct[1].items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [0, 3, 6]),
((0, 1), [1, 3]),
((0, 2), [2, 3, 7]),
((1, 0), [0, 4]),
((1, 1), [1, 4, 6, 7]),
((1, 2), [2, 4]),
((2, 0), [0, 5, 7]),
((2, 1), [1, 5]),
((2, 2), [2, 5, 6])]
>>> connected_cells = connected_cells_coord(*struct)
>>> pprint(connected_cells) #doctest: +SKIP
defaultdict(<class 'list'>,
{(0, 0): [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2),
(1, 0), (0, 2)],
(0, 1): [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)],
(0, 2): [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1),
(2, 2), (0, 2)],
(1, 0): [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)],
(1, 1): [(0, 1),
(1, 2),
(0, 0),
(0, 2),
(2, 1),
(2, 0),
(2, 2),
(1, 0),
(1, 1)],
(1, 2): [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)],
(2, 0): [(0, 0), (2, 1), (2, 0), (1, 1), (2, 2),
(1, 0), (0, 2)],
(2, 1): [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)],
(2, 2): [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1),
(2, 2), (0, 2)]})
>>> sorted(connected_cells.items()) #doctest: +NORMALIZE_WHITESPACE
[((0, 0), [(0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)]),
((0, 1), [(0, 1), (0, 0), (2, 1), (1, 1), (0, 2)]),
((0, 2), [(1, 2), (0, 1), (0, 0), (2, 0), (1, 1), (2, 2), (0, 2)]),
((1, 0), [(1, 2), (0, 0), (2, 0), (1, 0), (1, 1)]),
((1, 1), [(0, 1), (1, 2), (0, 0), (0, 2), (2, 1), (2, 0),
(2, 2), (1, 0), (1, 1)]),
((1, 2), [(1, 2), (0, 2), (2, 2), (1, 0), (1, 1)]),
((2, 0), [(0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (1, 0), (0, 2)]),
((2, 1), [(0, 1), (2, 1), (2, 0), (2, 2), (1, 1)]),
((2, 2), [(1, 2), (0, 0), (2, 1), (2, 0), (1, 1), (2, 2), (0, 2)])]
"""
connected_cells: Connected_cells = DefaultDict(list)
for cell, lines_enums in scopes.items():
for line_enum in lines_enums:
connected_cells[cell].extend(lines[line_enum])
connected_cells[cell] = list(set(connected_cells[cell]))
return connected_cells
def get_scope_cell_coord(d: int, n: int, cell: Cell_coord) -> Generator[Line_coord, None, None]:
"""
get_scope_cell_coord(d: int, n: int, cell: Cell_coord) ->
Generator[Line_coord, None, None]:
Calculate the scope for a cell.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
cell
The cell whose scope is to be calculated
Yields
------
Lines that form the scope of `cell`.
See Also
--------
get_scopes_coord
Notes
-----
The scope for a specific cell can also be found by calling
get_scopes_coord and indexing with the cell. get_scopes_coord
calculates the scope for every cell and stores this in a dictionary.
get_scope_cell_coord only calculates the scope for the
specified cell.
Examples
--------
>>> d = 3
>>> n = 4
>>> list(get_scope_cell_coord(d, n, (1,2,3))) # doctest: +NORMALIZE_WHITESPACE
[[(0, 2, 3), (1, 2, 3), (2, 2, 3), (3, 2, 3)],
[(1, 0, 3), (1, 1, 3), (1, 2, 3), (1, 3, 3)],
[(1, 2, 0), (1, 2, 1), (1, 2, 2), (1, 2, 3)],
[(0, 3, 3), (1, 2, 3), (2, 1, 3), (3, 0, 3)]]
"""
# loop over the numbers of dimensions
for i in range(d):
# for each combination of i dimensions
for i_comb in it.combinations(range(d), r = i + 1):
# increment call coordinates along all potential lines
incr = it.product([-1, 1], repeat = i + 1)
seen: Line_coord = []
for j in incr:
# store potential lines. Could use a list but deque
# makes it clear we are moving "up and down" the line
d_line: Deque[Cell_coord] = Deque((cell,))
# since we are moving "up and down" we don't need
# to move "down and up" as well
j_neg = tuple(-x for x in list(j))
if j_neg not in seen:
seen.append(j)
for k in range(1, n):
jk = tuple(x * k for x in list(j)) # size of increments
# record cells positions of increments
d_line.appendleft(increment_cell_coord(cell, i_comb, jk))
d_line.append(increment_cell_coord(cell, i_comb, jk, False))
# some calculated cells will simply not be part of the board
line = remove_invalid_cells_coord(n, list(d_line))
# we only want lines that are winning lines
if len(line) == n:
yield line
def scopes_size(scopes: Scopes) -> Counter:
"""
scopes_size(scopes: Scopes) -> Counter:
Calculate the different scope lengths.
Parameters
----------
scopes
Dictionary of cells (keys) and their scopes
Returns
-------
Counter of scopes lengths (key) and their frequency (values).
See Also
--------
get_scopes_np
get_scopes_coord
Examples
--------
>>> import numpy as np
>>> scopes = structure_np(2, 3)[2]
>>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1})
True
>>> scopes = structure_enum_np(2, 3)[2]
>>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1})
True
>>> scopes = structure_coord(2, 3)[1]
>>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1})
True
>>> scopes = structure_enum_coord(2, 3)[1]
>>> scopes_size(scopes) == Counter({2: 4, 3: 4, 4: 1})
True
"""
return Counter([len(scope) for scope in scopes.values()])
def scopes_size_cell(scopes: Scopes) -> DefaultDict[int, List[Cell_coord]]:
"""
scopes_size_cell(scopes: Scopes) ->
DefaultDict[int, List[Cell_coord]]:
Group cells by length of their scope.
Parameters
----------
scopes
Dictionary of cells (keys) and their scopes
Returns
-------
Dictonary of scopes lengths (key) and the list of cells with
scopes of that length.
See Also
--------
get_scopes_np
get_scopes_coord
get_scopes_enum
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> scopes = structure_np(2, 3)[2]
>>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP
defaultdict(<class 'list'>,
{2: [(1, 0), (0, 1), (2, 1), (1, 2)],
3: [(0, 0), (2, 0), (0, 2), (2, 2)],
4: [(1, 1)]})
>>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE
[(2, [(1, 0), (0, 1), (2, 1), (1, 2)]),
(3, [(0, 0), (2, 0), (0, 2), (2, 2)]),
(4, [(1, 1)])]
>>> scopes = structure_enum_np(2, 3)[2]
>>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP
defaultdict(<class 'list'>,
{2: [(1, 0), (0, 1), (2, 1), (1, 2)],
3: [(0, 0), (2, 0), (0, 2), (2, 2)],
4: [(1, 1)]})
>>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE
[(2, [(1, 0), (0, 1), (2, 1), (1, 2)]),
(3, [(0, 0), (2, 0), (0, 2), (2, 2)]),
(4, [(1, 1)])]
>>> scopes = structure_coord(2, 3)[1]
>>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP
defaultdict(<class 'list'>,
{2: [(0, 1), (1, 0), (1, 2), (2, 1)],
3: [(0, 0), (0, 2), (2, 0), (2, 2)],
4: [(1, 1)]})
>>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE
[(2, [(0, 1), (1, 0), (1, 2), (2, 1)]),
(3, [(0, 0), (0, 2), (2, 0), (2, 2)]),
(4, [(1, 1)])]
>>> scopes = structure_enum_coord(2, 3)[1]
>>> pprint(scopes_size_cell(scopes)) #doctest: +SKIP
defaultdict(<class 'list'>,
{2: [(0, 1), (1, 0), (1, 2), (2, 1)],
3: [(0, 0), (0, 2), (2, 0), (2, 2)],
4: [(1, 1)]})
>>> sorted(scopes_size_cell(scopes).items()) #doctest: +NORMALIZE_WHITESPACE
[(2, [(0, 1), (1, 0), (1, 2), (2, 1)]),
(3, [(0, 0), (0, 2), (2, 0), (2, 2)]),
(4, [(1, 1)])]
"""
scopes_size_cell: DefaultDict[int, List[Cell_coord]] = DefaultDict(list)
for cell, scope in scopes.items():
scopes_size_cell[len(scope)].append(cell)
return scopes_size_cell
####################################################################################################
# The following 3 functions are for the displaying of a hypercube to a terminal.
# It is assumed that an numpy ndarray has been used to represent the hypercube
def display_np(hc: Cube_np, display_cell: Callable[[Any], Tuple[str, str, str]] = None, ul = False) -> str:
"""
display_np(hc: Cube_np, display_cell: Callable[[Any],
Tuple[str, str, str]] = None, ul = False) ->
str:
Construct a string to display the hypercube in the terminal.
Parameters
----------
hc
The hypercube to be displayed
display_cell
A callback function called with the value of each cell value.
It returns a tuple of strings - the character/string to be
displayed, and any formatting to be applied (typically ansi
color sequences). See Examples for how colors are specified.
If display_cell is not provided, the cell value is displayed.
ul
display_np calls itself recursively (see Notes). This parameter
is used to track whether a cell is on the bottom row of a
2-d array. It has direct impact when the user calls dislay_np
unless the array is 1-d, in which case it determines if cell
values are underlined when displayed.
Returns
-------
A string that can be printed to the terminal to display the
hypercube.
See Also
--------
underline
join_multiline
Notes
-----
The '|' character is used to represent the board horizontally.
Cell contents are underlined in order to represent the board
vertically. For example, the character 'X' is underlined to
give 'X̲'. This function is recursive, it starts with hypercube and
keeps removing dimensions until at a single cell, which can be
given a string value. We are trying to display d dimensions in
two dimensions. To do this, odd dimensions are
shown horizontally; even dimensions are shown vertically.
Examples
--------
>>> import numpy as np
>>> from pprint import pprint
>>> def dc(v: Any) -> Tuple[str, str, str]:
...
... # define colors - could also use colorama module
... # red foreground + yellow background
... pre_fmt = '\033[31;43m'
... post_fmt = '\033[0m' # removes color settings
...
... if v > 0:
... return 'X', pre_fmt, post_fmt
... elif v < 0:
... return 'O', pre_fmt, post_fmt
... else:
... return ' ', '', ''
>>> d = 3
>>> n = 3
>>> hc = np.zeros((n,) * d, dtype = int)
>>> hc[0, 0, 0] = 1
>>> hc[1, 1, 1] = -1
>>> disp = display_np(hc, dc)
>>> print(disp) #doctest: +SKIP
X̲|_|_ _|_|_ _|_|_
_|_|_ _|O̲|_ _|_|_
| | | | | |
"""
if hc.size == 1: # hc is a single cell
if display_cell is None:
s, pre_fmt, post_fmt = str(hc), '', ''
else:
s, pre_fmt, post_fmt = display_cell(hc)
# underline displayed string (to repsent board structure) unless
# string is in the bottom row of array
if ul:
s = '_' * len(s) if s.isspace() else underline(s)
return pre_fmt + s + post_fmt
# hc is not a single cell
d = hc.ndim
# break the array into sub arrays along the first dimension
sub_hc = [hc[i] for i in range(hc.shape[0])]
# constuct a string for each sub array
sub_hc_str = []
for c, a in enumerate(sub_hc):
if d == 2 and c == len(sub_hc) - 1:
# sub arr is 2-dimensional and last row - don't underline
ul = False
elif d != 1:
ul = True
sub_hc_str.append(display_np(a, display_cell, ul))
# join the sub strings
if d % 2 == 0: # even number of dimensions - display down the screen
if d == 2:
return ''.join('\n'.join(sub_hc_str))
else:
sp = '\n' + '\n' * (int((d / 2) ** 1.5) - 1) # increase space between higher dimesions
return sp.join(sub_hc_str)
else: # odd number of dimensions - display across the screen
if d == 1:
return '|'.join(sub_hc_str)
else:
return join_multiline(sub_hc_str, ' ' + ' ' * int((d - 2) ** 1.5) + ' ', False)
def underline(s: str, alpha_only = True) -> str:
"""
underline(s: str, alpha_only = True) -> str
Underlines a string.
Parameters
----------
s
The string to be underlined
Returns
-------
An underlined string
Notes
-----
The code appears only to work properly with alphabetic characters.
Examples
--------
>>> underline('X')
'X̲'
>>> underline('XX')
'X̲X̲'
>>> underline('1')
'1'
>>> underline('1', False)
'1̲'
"""
try:
if alpha_only:
s_ = ""
for chr in str(s):
if chr.isalpha():
s_ = s_ + chr + "\u0332"
else:
s_ = s_ + chr
return s_
else:
return ''.join([chr + "\u0332" for chr in str(s)])
except:
return s
def join_multiline(iter: Iterable[str], divider: str = ' ', divide_empty_lines: bool = False,
fill_value: str = '_') -> str:
"""
join_multiline(iter: Iterable[str], divider: str = ' ',
divide_empty_lines: bool = False,
fill_value: str = '_') -> str
Join multiline string line by line.
Parameters
----------
iter
An iterable of multiline (or single line) strings
divider
String to divide the corresponding lines in each iterable
divide_empty_lines
If the corresponding line in each iterable is blank, then
determines if the lines are still divided by divider, or
divided by ''.
fill_value
If the number of lines in each multiline string in iter
differs, then fill_value is used to fill in values of the
shorter strings.
Returns
-------
The joined string.
Examples
--------
>>> # note that newline has to be escaped to work in doctest
examples below.
>>> ml_1 = 'AA\\nMM\\nXX'
>>> ml_2 = 'BB\\nNN\\nYY'
>>> ml_3 = 'CC\\nOO\\nZZ'
>>> ml = join_multiline([ml_1, ml_2, ml_3])
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA BB CC
MM NN OO
XX YY ZZ
>>> ml = join_multiline([ml_1, ml_2, ml_3], divider = '_')
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA_BB_CC
MM_NN_OO
XX_YY_ZZ
>>> ml_3 = 'CC\\nOO'
>>> ml = join_multiline([ml_1, ml_2, ml_3], fill_value = '@')
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA BB CC
MM NN OO
XX YY @
>>> ml_1 = 'AA\\n\\nMM'
>>> ml_2 = 'BB\\n\\nNN'
>>> ml_3 = 'CC\\n\\nZZ'
>>> ml = join_multiline([ml_1, ml_2, ml_3], divider = '_')
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA_BB_CC
<BLANKLINE>
MM_NN_ZZ
>>> ml = join_multiline([ml_1, ml_2, ml_3], '_', True)
>>> print(ml) #doctest: +NORMALIZE_WHITESPACE
AA_BB_CC
__
MM_NN_ZZ
"""
# for each multiline block, split into individual lines
spl = [x.split('\n') for x in iter]
# create list of tuples with tuple i containing line i from each multiline block
tl = [i for i in it.zip_longest(*spl, fillvalue = fill_value)]
if divide_empty_lines:
st = [divider.join(t) for t in tl]
else:
st = []
for t in tl:
if all([not x.strip() for x in t]):
st.append('')
else:
st.append(divider.join(t))
# finally, join each string separated by a new line
return '\n'.join(st)
####################################################################################################
####################################################################################################
# The following functions are helper functions
def slice_ndarray(arr: Cube_np, dims: Collection[int], coords: Collection[int]) -> Cube_np:
"""
slice_ndarray(arr: Cube_np, dims: Collection[int],
coords: Collection[int]) ->
Cube_np:
Returns a slice of a hypercube.
Parameters
----------
arr
The hypercube to be sliced
dims
The dims to slice along
coords
The coordinates corresponding to the dimensions being sliced
Returns
-------
A view of a slice of `arr`.
Raises
------
ValueError
If length of `dims` is not equal to length of `coords`
Examples
--------
>>> import numpy as np
>>> arr = np.arange(8).reshape(2, 2, 2)
>>> arr
array([[[0, 1],
[2, 3]],
<BLANKLINE>
[[4, 5],
[6, 7]]])
>>> slice_ndarray(arr, (0,), (0,))
array([[0, 1],
[2, 3]])
>>> slice_ndarray(arr, (1, 2), (0, 0))
array([0, 4])
"""
# create a list of slice objects, one for each dimension of the array
# Note: slice(None) is the same as ":". E.g. arr[:, 4] = arr[slice(none), 4)]
sl: List[Union[slice, int]] = [slice(None)] * arr.ndim
if len(dims) != len(coords):
raise ValueError("dims and coords must be of the same length")
for dim, coord in zip(dims, coords):
sl[dim] = coord
return arr[tuple(sl)]
def insert_into_tuple(tup: Tuple, pos: Union[int, Collection[int]], val: Any) -> Tuple[int, ...]:
"""
insert_into_tuple(tup: Tuple, pos: Union[int, Collection[int]],
val: Any) ->
Tuple[int, ...]:
Insert values into a tuple.
Parameters
----------
tup
the tuple into which values are to be inserted
pos
The positions into which values are to be inserted
val
The values corresponding to the positions in `pos`
Returns
-------
A copy of `tup` with values inserted.
Raises
------
ValueError
If length of `pos` is not equal to length of `val`
Examples
--------
>>> tup = (0, 1, 2, 3)
>>> pos = (5, 1)
>>> val = (9, 8)
>>> insert_into_tuple(tup, pos, val)
(0, 8, 1, 2, 3, 9)
>>> insert_into_tuple(tup, (), ())
(0, 1, 2, 3)
"""
tl = list(tup)
if isinstance(pos, int):
tl.insert(pos, val)
else:
if len(pos) != len(val):
raise ValueError("pos and val must be of the same length")
if len(pos) == 0:
return tup
# sort pos so from low to high; sort val correspondingly
stl = list(zip(*sorted(zip(pos, val))))
for p, v in zip(stl[0], stl[1]):
tl.insert(p, v)
return tuple(tl)
def increment_cell_coord(cell: Cell_coord, pos: Sequence[int], incr: Sequence[int], add: bool = True) -> Cell_coord:
"""
increment_cell_coord(cell: Cell_coord, pos: Sequence[int],
incr: Sequence[int], add: bool = True) ->
Cell_coord:
Increments coordinates of a cell.
Parameters
----------
cell
the cell which will have coordinates incremented
pos
The coordinates which are to be incremented
incr
The increment values at the specified coordinates
add
If True, the the increments are added, else they are subtracted
Returns
-------
A copy of `cell` with incremented coordinates.
Raises
------
ValueError
If length of `pos` is not equal to length of `val`
Examples
--------
>>> cell = (1, 2, 1)
>>> pos = (0, 2)
>>> incr = (1, -1)
>>> increment_cell_coord(cell, pos, incr)
(2, 2, 0)
>>> increment_cell_coord(cell, pos, incr, False)
(0, 2, 2)
"""
if len(pos) != len(incr):
raise ValueError("pos and incr must be of the same length")
if len(pos) == 0:
return cell
cl = list(cell)
for i in range(len(pos)):
if add:
cl[pos[i]] += incr[i]
else:
cl[pos[i]] -= incr[i]
return tuple(cl)
def str_to_tuple(d: int, n: int, cell: str, offset: int = 1) -> Cell_coord:
"""
str_to_tuple(d: int, n: int, cell: str, offset: int = 1) ->
Cell_coord:
Returns cells coordinates provided as a string as a tuple
of integers.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
cell
Cell coordinates specified as a string (see Notes).
Will accept a non-string argument which will be cast
to a string.
offset
idx offset - typically 0 or 1.
Raises
------
ValueError
1. if digits are not separated and the n is greater than 9
2. Incorrect numbers of coordinates provided
3. One or more coordinates is not valid
Notes
-----
If the string is all digits then assumes that each digit is a
coordinate. If non-digit characters are provided then assumes that
these split coordinates.
Returns
-------
A tuple containing the cell coordinates.
Examples
--------
>>> d = 3
>>> n = 3
>>> str_to_tuple(d, n, '123')
(0, 1, 2)
>>> str_to_tuple(d, n, '012', offset = 0)
(0, 1, 2)
>>> str_to_tuple(d, n, '1,2::3')
(0, 1, 2)
>>> str_to_tuple(d, n, 123)
(0, 1, 2)
>>> str_to_tuple(d, n, '12')
Traceback (most recent call last):
...
ValueError: Incorrect number of coordinates provided
>>> str_to_tuple(d, n, '125')
Traceback (most recent call last):
...
ValueError: One or more coordinates are not valid
>>> d = 3
>>> n = 10
>>> str_to_tuple(d, n, '123') #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Board is too big for each dimension to be specified
by single digit
"""
cell = str(cell)
# check to see if there are any non-digits
nd = re.findall(r'\D+', cell)
if len(nd) == 0:
if n > 9:
raise ValueError("Board is too big for each dimension to be specified by single digit")
else:
tup = tuple(int(coord) - offset for coord in cell)
else: # there are non-digits, use these as separators
tup = tuple(int(coord) - offset for coord in re.findall(r'\d+', cell))
# check that correct number of coordinates specified
if len(tup) != d:
raise ValueError("Incorrect number of coordinates provided")
# check that each coordinate is valid
if all(t in range(n) for t in tup):
return tup
else:
raise ValueError("One or more coordinates are not valid")
def remove_invalid_cells_coord(n:int, line: Line_coord) -> Line_coord:
"""
remove_invalid_cells_coord(n:int, line: Line_coord) -> Line_coord
Remove cells that do not have valid coordinates.
Parameters
----------
n
The number of cells in any dimension
line
list of tuples representing cell coordinates (possibly invalid)
Returns
-------
list of tuples representing valid cell coordinate
Examples
--------
>>> n = 3
>>> line = [(1, 2, 0), (-1, 0, 3), (0, 1, 2), (1, 2, 3)]
>>> remove_invalid_cells_coord(n, line)
[(1, 2, 0), (0, 1, 2)]
"""
rl = []
for cell in line:
if all(coord in range(n) for coord in cell):
rl.append(cell)
return rl
####################################################################################################
# used in internal testing
def _lines_np_coord_check(d: int, n: int) -> bool:
"""
_lines_np_coord_check(d: int, n: int) -> bool
Checks if lines_np and lines_coord give the same lines.
Parameters
----------
d
The number of dimensions of the hypercube
n
The number of cells in any dimension
Returns
-------
True if lines_np and lines_coord give the same lines.
False otherwise.
See Also
--------
get_lines_np
get_lines_coord
Notes
-----
This function is a private function used in testing.
"""
dtype = np.int64 if n ** d > 2 ** 31 else np.int32
arr = np.arange(n ** d, dtype = dtype).reshape([n] * d)
lines_np = get_lines_np(arr)
lines_coord = get_lines_coord(d, n)
t_np = [tuple(sorted(l.tolist())) for l in lines_np] # type: ignore
t_coord = [tuple(sorted([arr[c] for c in l])) for l in lines_coord]
return set(t_np) == set(t_coord)
| 30.374202
| 116
| 0.510575
| 10,336
| 71,349
| 3.442047
| 0.064725
| 0.016246
| 0.010962
| 0.007196
| 0.63364
| 0.590213
| 0.56025
| 0.541333
| 0.518088
| 0.502909
| 0
| 0.060476
| 0.31238
| 71,349
| 2,348
| 117
| 30.387138
| 0.66454
| 0.704887
| 0
| 0.31348
| 0
| 0
| 0.021765
| 0
| 0.003135
| 0
| 0
| 0
| 0
| 1
| 0.106583
| false
| 0
| 0.021944
| 0
| 0.22884
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f81e6f765fb2c951a1b3a358bc3ab07fe69f4752
| 11,140
|
py
|
Python
|
simpa_tests/manual_tests/acoustic_forward_models/KWaveAcousticForwardConvenienceFunction.py
|
IMSY-DKFZ/simpa
|
b8bddcf43a4bff2564f0ec208dc511b82e49bfb4
|
[
"MIT"
] | 3
|
2022-03-14T15:40:09.000Z
|
2022-03-20T02:34:25.000Z
|
simpa_tests/manual_tests/acoustic_forward_models/KWaveAcousticForwardConvenienceFunction.py
|
jgroehl/simpa
|
e56f0802e5a8555ee8bb139dd4f776025e7e9267
|
[
"MIT"
] | 3
|
2022-03-18T07:19:12.000Z
|
2022-03-30T12:15:19.000Z
|
simpa_tests/manual_tests/acoustic_forward_models/KWaveAcousticForwardConvenienceFunction.py
|
IMSY-DKFZ/simpa
|
b8bddcf43a4bff2564f0ec208dc511b82e49bfb4
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
from simpa.core.device_digital_twins import SlitIlluminationGeometry, LinearArrayDetectionGeometry, PhotoacousticDevice
from simpa import perform_k_wave_acoustic_forward_simulation
from simpa.core.simulation_modules.reconstruction_module.reconstruction_module_delay_and_sum_adapter import \
reconstruct_delay_and_sum_pytorch
from simpa import MCXAdapter, ModelBasedVolumeCreationAdapter, \
GaussianNoise
from simpa.utils import Tags, Settings, TISSUE_LIBRARY
from simpa.core.simulation import simulate
from simpa.io_handling import load_data_field
import numpy as np
from simpa.utils.path_manager import PathManager
from simpa_tests.manual_tests import ManualIntegrationTestClass
import matplotlib.pyplot as plt
# FIXME temporary workaround for newest Intel architectures
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
class KWaveAcousticForwardConvenienceFunction(ManualIntegrationTestClass):
"""
This class test the convenience function for acoustic forward simulation.
It first creates a volume and runs an optical forward simulation.
Then the function is actually tested.
Lastly the generated time series data is reconstructed to compare whether everything worked.
"""
def setup(self):
"""
Runs a pipeline consisting of volume creation and optical simulation. The resulting hdf5 file of the
simple test volume is saved at SAVE_PATH location defined in the path_config.env file.
"""
self.path_manager = PathManager()
self.VOLUME_TRANSDUCER_DIM_IN_MM = 75
self.VOLUME_PLANAR_DIM_IN_MM = 20
self.VOLUME_HEIGHT_IN_MM = 25
self.SPACING = 0.25
self.RANDOM_SEED = 4711
self.VOLUME_NAME = "TestKWaveAcousticForwardConvenienceFunction_" + str(self.RANDOM_SEED)
np.random.seed(self.RANDOM_SEED)
# These parameters set the general properties of the simulated volume
self.general_settings = {
Tags.RANDOM_SEED: self.RANDOM_SEED,
Tags.VOLUME_NAME: self.VOLUME_NAME,
Tags.SIMULATION_PATH: self.path_manager.get_hdf5_file_save_path(),
Tags.SPACING_MM: self.SPACING,
Tags.DIM_VOLUME_Z_MM: self.VOLUME_HEIGHT_IN_MM,
Tags.DIM_VOLUME_X_MM: self.VOLUME_TRANSDUCER_DIM_IN_MM,
Tags.DIM_VOLUME_Y_MM: self.VOLUME_PLANAR_DIM_IN_MM,
Tags.WAVELENGTHS: [700]
}
self.settings = Settings(self.general_settings)
self.settings.set_volume_creation_settings({
Tags.SIMULATE_DEFORMED_LAYERS: True,
Tags.STRUCTURES: self.create_example_tissue()
})
self.settings.set_optical_settings({
Tags.OPTICAL_MODEL_NUMBER_PHOTONS: 1e7,
Tags.OPTICAL_MODEL_BINARY_PATH: self.path_manager.get_mcx_binary_path(),
Tags.OPTICAL_MODEL: Tags.OPTICAL_MODEL_MCX,
Tags.ILLUMINATION_TYPE: Tags.ILLUMINATION_TYPE_PENCIL,
Tags.LASER_PULSE_ENERGY_IN_MILLIJOULE: 50,
Tags.MCX_ASSUMED_ANISOTROPY: 0.9
})
self.settings["noise_model"] = {
Tags.NOISE_MEAN: 0.0,
Tags.NOISE_STD: 0.4,
Tags.NOISE_MODE: Tags.NOISE_MODE_ADDITIVE,
Tags.DATA_FIELD: Tags.DATA_FIELD_INITIAL_PRESSURE,
Tags.NOISE_NON_NEGATIVITY_CONSTRAINT: True
}
self.device = PhotoacousticDevice(device_position_mm=np.array([self.VOLUME_TRANSDUCER_DIM_IN_MM/2,
self.VOLUME_PLANAR_DIM_IN_MM/2,
0]))
self.device.set_detection_geometry(LinearArrayDetectionGeometry(device_position_mm=
self.device.device_position_mm, pitch_mm=0.25,
number_detector_elements=200))
self.device.add_illumination_geometry(SlitIlluminationGeometry(slit_vector_mm=[100, 0, 0]))
# run pipeline including volume creation and optical mcx simulation
self.pipeline = [
ModelBasedVolumeCreationAdapter(self.settings),
MCXAdapter(self.settings),
GaussianNoise(self.settings, "noise_model")
]
def teardown(self):
os.remove(self.settings[Tags.SIMPA_OUTPUT_PATH])
def perform_test(self):
simulate(self.pipeline, self.settings, self.device)
self.test_convenience_function()
def test_convenience_function(self):
# load initial pressure
initial_pressure = load_data_field(self.path_manager.get_hdf5_file_save_path() + "/" +
self.VOLUME_NAME + ".hdf5",
Tags.DATA_FIELD_INITIAL_PRESSURE, wavelength=700)
image_slice = np.s_[:, 40, :]
self.initial_pressure = np.rot90(initial_pressure[image_slice], -1)
# define acoustic settings and run simulation with convenience function
acoustic_settings = {
Tags.ACOUSTIC_SIMULATION_3D: True,
Tags.ACOUSTIC_MODEL_BINARY_PATH: self.path_manager.get_matlab_binary_path(),
Tags.KWAVE_PROPERTY_ALPHA_POWER: 0.00,
Tags.KWAVE_PROPERTY_SENSOR_RECORD: "p",
Tags.KWAVE_PROPERTY_PMLInside: False,
Tags.KWAVE_PROPERTY_PMLSize: [31, 32],
Tags.KWAVE_PROPERTY_PMLAlpha: 1.5,
Tags.KWAVE_PROPERTY_PlotPML: False,
Tags.RECORDMOVIE: False,
Tags.MOVIENAME: "visualization_log",
Tags.ACOUSTIC_LOG_SCALE: True,
Tags.MODEL_SENSOR_FREQUENCY_RESPONSE: False
}
time_series_data = perform_k_wave_acoustic_forward_simulation(initial_pressure=self.initial_pressure,
detection_geometry=self.device.
get_detection_geometry(),
speed_of_sound=1540, density=1000,
alpha_coeff=0.0)
# reconstruct the time series data to compare it with initial pressure
self.settings.set_reconstruction_settings({
Tags.RECONSTRUCTION_MODE: Tags.RECONSTRUCTION_MODE_PRESSURE,
Tags.RECONSTRUCTION_BMODE_BEFORE_RECONSTRUCTION: True,
Tags.RECONSTRUCTION_BMODE_METHOD: Tags.RECONSTRUCTION_BMODE_METHOD_HILBERT_TRANSFORM,
Tags.DATA_FIELD_SPEED_OF_SOUND: 1540,
Tags.SPACING_MM: 0.25,
Tags.SENSOR_SAMPLING_RATE_MHZ: 40,
})
self.reconstructed = reconstruct_delay_and_sum_pytorch(
time_series_data.copy(), self.device.get_detection_geometry(), self.settings)
def visualise_result(self, show_figure_on_screen=True, save_path=None):
'''plot initial pressure and reconstructed image volume to manually compare'''
plt.subplot(2, 2, 1)
plt.title("Initial Pressure Pipeline")
plt.imshow(self.initial_pressure)
plt.subplot(2, 2, 2)
plt.title("Reconstructed Image Pipeline")
plt.imshow(np.rot90(self.reconstructed, -1))
plt.tight_layout()
if show_figure_on_screen:
plt.show()
else:
if save_path is None:
save_path = ""
plt.savefig(save_path + f"TestKWaveConvenienceFunction.png")
plt.close()
def create_example_tissue(self):
"""
This is a very simple example script of how to create a tissue definition.
It contains a muscular background, an epidermis layer on top of the muscles
and a blood vessel.
"""
background_dictionary = Settings()
background_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.constant(1e-10, 1e-10, 1.0)
background_dictionary[Tags.STRUCTURE_TYPE] = Tags.BACKGROUND
muscle_dictionary = Settings()
muscle_dictionary[Tags.PRIORITY] = 1
muscle_dictionary[Tags.STRUCTURE_START_MM] = [0, 0, 0]
muscle_dictionary[Tags.STRUCTURE_END_MM] = [0, 0, 100]
muscle_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.constant(0.05, 100, 0.9)
muscle_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = True
muscle_dictionary[Tags.ADHERE_TO_DEFORMATION] = True
muscle_dictionary[Tags.STRUCTURE_TYPE] = Tags.HORIZONTAL_LAYER_STRUCTURE
vessel_1_dictionary = Settings()
vessel_1_dictionary[Tags.PRIORITY] = 3
vessel_1_dictionary[Tags.STRUCTURE_START_MM] = [self.VOLUME_TRANSDUCER_DIM_IN_MM/2,
0, 10]
vessel_1_dictionary[Tags.STRUCTURE_END_MM] = [
self.VOLUME_TRANSDUCER_DIM_IN_MM/2, self.VOLUME_PLANAR_DIM_IN_MM, 10]
vessel_1_dictionary[Tags.STRUCTURE_RADIUS_MM] = 3
vessel_1_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.blood()
vessel_1_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = True
vessel_1_dictionary[Tags.ADHERE_TO_DEFORMATION] = False
vessel_1_dictionary[Tags.STRUCTURE_TYPE] = Tags.CIRCULAR_TUBULAR_STRUCTURE
vessel_2_dictionary = Settings()
vessel_2_dictionary[Tags.PRIORITY] = 3
vessel_2_dictionary[Tags.STRUCTURE_START_MM] = [self.VOLUME_TRANSDUCER_DIM_IN_MM/2 - 10,
0, 5]
vessel_2_dictionary[Tags.STRUCTURE_END_MM] = [
self.VOLUME_TRANSDUCER_DIM_IN_MM/2 - 10, self.VOLUME_PLANAR_DIM_IN_MM, 5]
vessel_2_dictionary[Tags.STRUCTURE_RADIUS_MM] = 2
vessel_2_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.blood()
vessel_2_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = True
vessel_2_dictionary[Tags.ADHERE_TO_DEFORMATION] = False
vessel_2_dictionary[Tags.STRUCTURE_TYPE] = Tags.CIRCULAR_TUBULAR_STRUCTURE
epidermis_dictionary = Settings()
epidermis_dictionary[Tags.PRIORITY] = 8
epidermis_dictionary[Tags.STRUCTURE_START_MM] = [0, 0, 1]
epidermis_dictionary[Tags.STRUCTURE_END_MM] = [0, 0, 1.1]
epidermis_dictionary[Tags.MOLECULE_COMPOSITION] = TISSUE_LIBRARY.epidermis()
epidermis_dictionary[Tags.CONSIDER_PARTIAL_VOLUME] = True
epidermis_dictionary[Tags.ADHERE_TO_DEFORMATION] = True
epidermis_dictionary[Tags.STRUCTURE_TYPE] = Tags.HORIZONTAL_LAYER_STRUCTURE
tissue_dict = Settings()
tissue_dict[Tags.BACKGROUND] = background_dictionary
tissue_dict["muscle"] = muscle_dictionary
tissue_dict["epidermis"] = epidermis_dictionary
tissue_dict["vessel_1"] = vessel_1_dictionary
tissue_dict["vessel_2"] = vessel_2_dictionary
return tissue_dict
if __name__ == '__main__':
test = KWaveAcousticForwardConvenienceFunction()
test.run_test(show_figure_on_screen=False)
| 48.859649
| 119
| 0.668223
| 1,262
| 11,140
| 5.559429
| 0.248019
| 0.063854
| 0.049173
| 0.023945
| 0.295468
| 0.236745
| 0.172891
| 0.100912
| 0.043615
| 0.043044
| 0
| 0.021028
| 0.26149
| 11,140
| 227
| 120
| 49.07489
| 0.831773
| 0.107989
| 0
| 0.017241
| 0
| 0
| 0.024221
| 0.007735
| 0
| 0
| 0
| 0.004405
| 0
| 1
| 0.034483
| false
| 0
| 0.068966
| 0
| 0.114943
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f81ea939afded2dfd41116deec7708196341c5d1
| 10,881
|
py
|
Python
|
oc_ocdm/counter_handler/filesystem_counter_handler.py
|
arcangelo7/oc_ocdm
|
128d062ce9d858024aafd26d7d238c7a26cc8914
|
[
"0BSD"
] | 1
|
2020-12-17T15:33:01.000Z
|
2020-12-17T15:33:01.000Z
|
oc_ocdm/counter_handler/filesystem_counter_handler.py
|
arcangelo7/oc_ocdm
|
128d062ce9d858024aafd26d7d238c7a26cc8914
|
[
"0BSD"
] | 26
|
2021-01-08T08:32:23.000Z
|
2022-03-29T10:01:40.000Z
|
oc_ocdm/counter_handler/filesystem_counter_handler.py
|
arcangelo7/oc_ocdm
|
128d062ce9d858024aafd26d7d238c7a26cc8914
|
[
"0BSD"
] | 3
|
2021-04-16T08:44:44.000Z
|
2022-02-15T11:09:22.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Silvio Peroni <essepuntato@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
from __future__ import annotations
import os
from shutil import copymode, move
from tempfile import mkstemp
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import BinaryIO, Tuple, List, Dict
from oc_ocdm.counter_handler.counter_handler import CounterHandler
class FilesystemCounterHandler(CounterHandler):
initial_line_len: int = 3
trailing_char: str = " "
def __init__(self, info_dir: str) -> None:
if info_dir is None or len(info_dir) <= 0:
raise ValueError("info_dir parameter is required!")
if info_dir[-1] != os.sep:
info_dir += os.sep
self.info_dir: str = info_dir
self.datasets_dir: str = info_dir + 'datasets' + os.sep
self.short_names: List[str] = ["an", "ar", "be", "br", "ci", "de", "id", "pl", "ra", "re", "rp"]
self.metadata_short_names: List[str] = ["di"]
self.info_files: Dict[str, str] = {key: ("info_file_" + key + ".txt")
for key in self.short_names}
self.prov_files: Dict[str, str] = {key: ("prov_file_" + key + ".txt")
for key in self.short_names}
def set_counter(self, new_value: int, entity_short_name: str, prov_short_name: str = "",
identifier: int = 1) -> None:
if new_value < 0:
raise ValueError("new_value must be a non negative integer!")
if prov_short_name == "se":
file_path: str = self.get_prov_path(entity_short_name)
else:
file_path: str = self.get_info_path(entity_short_name)
self._set_number(new_value, file_path, identifier)
def read_counter(self, entity_short_name: str, prov_short_name: str = "", identifier: int = 1) -> int:
if prov_short_name == "se":
file_path: str = self.get_prov_path(entity_short_name)
else:
file_path: str = self.get_info_path(entity_short_name)
return self._read_number(file_path, identifier)[0]
def increment_counter(self, entity_short_name: str, prov_short_name: str = "", identifier: int = 1) -> int:
if prov_short_name == "se":
file_path: str = self.get_prov_path(entity_short_name)
else:
file_path: str = self.get_info_path(entity_short_name)
return self._add_number(file_path, identifier)
def get_info_path(self, short_name: str) -> str:
return self.info_dir + self.info_files[short_name]
def get_prov_path(self, short_name: str) -> str:
return self.info_dir + self.prov_files[short_name]
def get_metadata_path(self, short_name: str, dataset_name: str) -> str:
return self.datasets_dir + dataset_name + os.sep + 'metadata_' + short_name + '.txt'
def __initialize_file_if_not_existing(self, file_path: str):
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
if not os.path.isfile(file_path):
with open(file_path, "wb") as file:
first_line: str = self.trailing_char * (self.initial_line_len - 1) + "\n"
file.write(first_line.encode("ascii"))
def _read_number(self, file_path: str, line_number: int) -> Tuple[int, int]:
if line_number <= 0:
raise ValueError("line_number must be a positive non-zero integer number!")
self.__initialize_file_if_not_existing(file_path)
cur_number: int = 0
cur_line_len: int = 0
try:
with open(file_path, "rb") as file:
cur_line_len = self._get_line_len(file)
line_offset = (line_number - 1) * cur_line_len
file.seek(line_offset)
line = file.readline(cur_line_len).decode("ascii")
cur_number = int(line.rstrip(self.trailing_char + "\n"))
except ValueError:
cur_number = 0
except Exception as e:
print(e)
return cur_number, cur_line_len
def _add_number(self, file_path: str, line_number: int = 1) -> int:
if line_number <= 0:
raise ValueError("line_number must be a positive non-zero integer number!")
self.__initialize_file_if_not_existing(file_path)
cur_number, cur_line_len = self._read_number(file_path, line_number)
cur_number += 1
cur_number_len: int = len(str(cur_number)) + 1
if cur_number_len > cur_line_len:
self._increase_line_len(file_path, new_length=cur_number_len)
cur_line_len = cur_number_len
with open(file_path, "r+b") as file:
line_offset: int = (line_number - 1) * cur_line_len
file.seek(line_offset)
line: str = str(cur_number).ljust(cur_line_len - 1, self.trailing_char) + "\n"
file.write(line.encode("ascii"))
file.seek(-cur_line_len, os.SEEK_CUR)
self._fix_previous_lines(file, cur_line_len)
return cur_number
def _set_number(self, new_value: int, file_path: str, line_number: int = 1) -> None:
if new_value < 0:
raise ValueError("new_value must be a non negative integer!")
if line_number <= 0:
raise ValueError("line_number must be a positive non-zero integer number!")
self.__initialize_file_if_not_existing(file_path)
cur_line_len = self._read_number(file_path, line_number)[1]
cur_number_len: int = len(str(new_value)) + 1
if cur_number_len > cur_line_len:
self._increase_line_len(file_path, new_length=cur_number_len)
cur_line_len = cur_number_len
with open(file_path, "r+b") as file:
line_offset: int = (line_number - 1) * cur_line_len
file.seek(line_offset)
line: str = str(new_value).ljust(cur_line_len - 1, self.trailing_char) + "\n"
file.write(line.encode("ascii"))
file.seek(-cur_line_len, os.SEEK_CUR)
self._fix_previous_lines(file, cur_line_len)
@staticmethod
def _get_line_len(file: BinaryIO) -> int:
cur_char: str = file.read(1).decode("ascii")
count: int = 1
while cur_char is not None and len(cur_char) == 1 and cur_char != "\0":
cur_char = file.read(1).decode("ascii")
count += 1
if cur_char == "\n":
break
# Undo I/O pointer updates
file.seek(0)
if cur_char is None:
raise EOFError("Reached end-of-file without encountering a line separator!")
elif cur_char == "\0":
raise ValueError("Encountered a NULL byte!")
else:
return count
def _increase_line_len(self, file_path: str, new_length: int = 0) -> None:
if new_length <= 0:
raise ValueError("new_length must be a positive non-zero integer number!")
with open(file_path, "rb") as cur_file:
if self._get_line_len(cur_file) >= new_length:
raise ValueError("Current line length is greater than new_length!")
fh, abs_path = mkstemp()
with os.fdopen(fh, "wb") as new_file:
with open(file_path, "rt", encoding="ascii") as old_file:
for line in old_file:
number: str = line.rstrip(self.trailing_char + "\n")
new_line: str = str(number).ljust(new_length - 1, self.trailing_char) + "\n"
new_file.write(new_line.encode("ascii"))
# Copy the file permissions from the old file to the new file
copymode(file_path, abs_path)
# Replace original file
os.remove(file_path)
move(abs_path, file_path)
@staticmethod
def _is_a_valid_line(buf: bytes) -> bool:
string: str = buf.decode("ascii")
return (string[-1] == "\n") and ("\0" not in string[:-1])
def _fix_previous_lines(self, file: BinaryIO, line_len: int) -> None:
if line_len < self.initial_line_len:
raise ValueError("line_len should be at least %d!" % self.initial_line_len)
while file.tell() >= line_len:
file.seek(-line_len, os.SEEK_CUR)
buf: bytes = file.read(line_len)
if self._is_a_valid_line(buf) or len(buf) < line_len:
break
else:
file.seek(-line_len, os.SEEK_CUR)
fixed_line: str = (self.trailing_char * (line_len - 1)) + "\n"
file.write(fixed_line.encode("ascii"))
file.seek(-line_len, os.SEEK_CUR)
def set_metadata_counter(self, new_value: int, entity_short_name: str, dataset_name: str) -> None:
if new_value < 0:
raise ValueError("new_value must be a non negative integer!")
if dataset_name is None:
raise ValueError("dataset_name must be provided!")
if entity_short_name not in self.metadata_short_names:
raise ValueError("entity_short_name is not a known metadata short name!")
file_path: str = self.get_metadata_path(entity_short_name, dataset_name)
return self._set_number(new_value, file_path, 1)
def read_metadata_counter(self, entity_short_name: str, dataset_name: str) -> int:
if dataset_name is None:
raise ValueError("dataset_name must be provided!")
if entity_short_name not in self.metadata_short_names:
raise ValueError("entity_short_name is not a known metadata short name!")
file_path: str = self.get_metadata_path(entity_short_name, dataset_name)
return self._read_number(file_path, 1)[0]
def increment_metadata_counter(self, entity_short_name: str, dataset_name: str) -> int:
if dataset_name is None:
raise ValueError("dataset_name must be provided!")
if entity_short_name not in self.metadata_short_names:
raise ValueError("entity_short_name is not a known metadata short name!")
file_path: str = self.get_metadata_path(entity_short_name, dataset_name)
return self._add_number(file_path, 1)
| 42.503906
| 111
| 0.637074
| 1,531
| 10,881
| 4.246244
| 0.15872
| 0.041994
| 0.048454
| 0.020766
| 0.566067
| 0.518843
| 0.487156
| 0.451469
| 0.427934
| 0.408553
| 0
| 0.006504
| 0.265233
| 10,881
| 255
| 112
| 42.670588
| 0.806629
| 0.082621
| 0
| 0.391304
| 0
| 0
| 0.095544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097826
| false
| 0
| 0.038043
| 0.016304
| 0.217391
| 0.005435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f81fb7d0b255f47fb45c7a694f335756c5c2bb24
| 3,823
|
py
|
Python
|
backend_app/serializers.py
|
ilveroluca/backend
|
91b80b154c4e1e45587797cc41bf2b2b75c23e68
|
[
"MIT"
] | null | null | null |
backend_app/serializers.py
|
ilveroluca/backend
|
91b80b154c4e1e45587797cc41bf2b2b75c23e68
|
[
"MIT"
] | null | null | null |
backend_app/serializers.py
|
ilveroluca/backend
|
91b80b154c4e1e45587797cc41bf2b2b75c23e68
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from backend_app import models
class AllowedPropertySerializer(serializers.ModelSerializer):
class Meta:
model = models.AllowedProperty
fields = '__all__'
# exclude = ['id']
class DatasetSerializer(serializers.ModelSerializer):
class Meta:
model = models.Dataset
fields = ['id', 'name', 'path', 'task_id']
write_only_fields = ['name', 'path', 'task_id'] # Only for post
class InferenceSerializer(serializers.ModelSerializer):
project_id = serializers.IntegerField()
class Meta:
model = models.Inference
fields = ['project_id', 'modelweights_id', 'dataset_id']
# exclude = ['stats']
class InferenceSingleSerializer(serializers.ModelSerializer):
project_id = serializers.IntegerField()
image_url = serializers.URLField()
class Meta:
model = models.Inference
exclude = ['stats', 'dataset_id', 'logfile']
# write_only_fields = ['modelweights_id', 'image_url', 'project_id']
class ModelSerializer(serializers.ModelSerializer):
class Meta:
model = models.Model
fields = ['id', 'name', 'location', 'task_id']
class ModelWeightsSerializer(serializers.ModelSerializer):
class Meta:
model = models.ModelWeights
fields = ['id', 'name', 'celery_id', "model_id", "dataset_id", "pretrained_on"]
read_only_fields = ['location', 'celery_id', 'logfile']
write_only_fields = ['id']
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = models.Project
fields = '__all__'
# fields = ['id', 'name', 'task_id', 'modelweights_id', 'inference_id']
# exclude = ['task', 'modelweights']
class PropertyListSerializer(serializers.ModelSerializer):
class Meta:
model = models.Property
# fields = ['id', 'name']
fields = '__all__'
class PropertyTrainSerializer(serializers.ModelSerializer):
value = serializers.CharField()
class Meta:
model = models.Property
fields = ['id', 'name', 'value']
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = models.Task
fields = '__all__'
class TrainSerializer(serializers.Serializer):
dataset_id = serializers.IntegerField()
model_id = serializers.IntegerField()
project_id = serializers.IntegerField()
properties = PropertyTrainSerializer(many=True)
weights_id = serializers.IntegerField(allow_null=True)
class TrainingSettingSerializer(serializers.ModelSerializer):
class Meta:
model = models.TrainingSetting
fields = '__all__'
# exclude = ['id']
class StopProcessSerializer(serializers.Serializer):
process_id = serializers.UUIDField()
# RESPONSES SERIALIZERS
class GeneralResponse(serializers.Serializer):
result = serializers.CharField()
class GeneralErrorResponse(serializers.Serializer):
result = serializers.CharField()
error = serializers.CharField()
class InferenceResponseSerializer(serializers.Serializer):
result = serializers.CharField()
process_id = serializers.UUIDField()
class OutputsResponse(serializers.Serializer):
outputs = serializers.ListField(
child=serializers.ListField(
child=serializers.ListField(child=serializers.Field(), min_length=2, max_length=2)))
class TrainResponse(serializers.Serializer):
result = serializers.CharField()
process_id = serializers.UUIDField()
class StatusStatusResponse(serializers.Serializer):
process_type = serializers.CharField()
process_status = serializers.CharField()
process_data = serializers.CharField()
class StatusResponse(serializers.Serializer):
result = serializers.CharField()
status = StatusStatusResponse()
| 27.905109
| 96
| 0.698666
| 347
| 3,823
| 7.512968
| 0.244957
| 0.109705
| 0.059072
| 0.084388
| 0.416187
| 0.303797
| 0.125815
| 0.092827
| 0.06214
| 0.06214
| 0
| 0.000649
| 0.193565
| 3,823
| 136
| 97
| 28.110294
| 0.844956
| 0.074549
| 0
| 0.373494
| 0
| 0
| 0.064626
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.024096
| 0
| 0.674699
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8207cbc88a40509eaabe2f12c2e9fb96d02736a
| 1,154
|
py
|
Python
|
app/cvp.py
|
ekiminatorn/murmur-rest
|
594060264cd6ea594d5c07f40163782946f48eb2
|
[
"Unlicense",
"MIT"
] | 73
|
2015-01-08T19:58:36.000Z
|
2022-01-25T20:44:07.000Z
|
app/cvp.py
|
ekiminatorn/murmur-rest
|
594060264cd6ea594d5c07f40163782946f48eb2
|
[
"Unlicense",
"MIT"
] | 34
|
2015-01-08T19:52:34.000Z
|
2022-03-15T08:36:30.000Z
|
app/cvp.py
|
ekiminatorn/murmur-rest
|
594060264cd6ea594d5c07f40163782946f48eb2
|
[
"Unlicense",
"MIT"
] | 33
|
2015-01-08T19:22:40.000Z
|
2022-01-19T06:28:37.000Z
|
"""
cvp.py
Functions for generating CVP feeds.
:copyright: (C) 2014 by github.com/alfg.
:license: MIT, see README for more details.
"""
def cvp_player_to_dict(player):
"""
Convert a player object from a Tree to a CVP-compliant dict.
"""
return {
"session": player.session,
"userid": player.userid,
"name": player.name,
"deaf": player.deaf,
"mute": player.mute,
"selfDeaf": player.selfDeaf,
"selfMute": player.selfMute,
"suppress": player.suppress,
"onlinesecs": player.onlinesecs,
"idlesecs": player.idlesecs
}
def cvp_chan_to_dict(channel):
"""
Convert a channel from a Tree object to a CVP-compliant dict, recursively.
"""
return {
"id": channel.c.id,
"parent": channel.c.parent,
"name": channel.c.name,
"description": channel.c.description,
"channels": [cvp_chan_to_dict(c) for c in channel.children],
"users": [cvp_player_to_dict(p) for p in channel.users],
"position": channel.c.position,
"temporary": channel.c.temporary,
"links": channel.c.links
}
| 26.837209
| 78
| 0.604853
| 140
| 1,154
| 4.9
| 0.385714
| 0.081633
| 0.03207
| 0.043732
| 0.055394
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004695
| 0.261698
| 1,154
| 42
| 79
| 27.47619
| 0.800469
| 0.230503
| 0
| 0.08
| 0
| 0
| 0.147059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8238013e026edf0a1b82a52242ee8f202d32c83
| 693
|
py
|
Python
|
func.py
|
CrownCrafter/School
|
488810b223ad746d7d1b396e609ce8f90f25662c
|
[
"MIT"
] | null | null | null |
func.py
|
CrownCrafter/School
|
488810b223ad746d7d1b396e609ce8f90f25662c
|
[
"MIT"
] | null | null | null |
func.py
|
CrownCrafter/School
|
488810b223ad746d7d1b396e609ce8f90f25662c
|
[
"MIT"
] | 1
|
2021-02-06T04:28:17.000Z
|
2021-02-06T04:28:17.000Z
|
def cyl(h, r):
area_cyl = 2 * 3.14 * r * h
return(area_cyl)
def con(r, l):
area_con = 3.14 * r * l
return(area_con)
def final_price(cost):
tax = 0.18 * cost
re_price = cost + tax
return(re_price)
print("Enter Values of cylindrical part of tent ")
h = float(input("Height : "))
r = float(input("radius : "))
csa_cyl = cyl(h, r)
l = float(input("Enter slant height "))
csa_con = con(r, l)
canvas_area = csa_cyl + csa_con
print("Area of canvas = ", canvas_area, " m^2")
unit_price = float(input("Enter cost of 1 m^2 "))
total_price = unit_price * canvas_area
print("Total cost of canvas before tax ",total_price)
print("Inluding tax"+ str(final_price(total_price)))
| 28.875
| 53
| 0.658009
| 121
| 693
| 3.603306
| 0.322314
| 0.018349
| 0.022936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023297
| 0.194805
| 693
| 23
| 54
| 30.130435
| 0.758065
| 0
| 0
| 0
| 0
| 0
| 0.235209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0
| 0
| 0.136364
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f823c6094a403ab6a62faccb2e76b2e2b2d997a0
| 1,282
|
py
|
Python
|
pymoku/plotly_support.py
|
manekawije/Liquid
|
284991ceca70ec3fcd0cca7e19f4100463600a6c
|
[
"MIT"
] | null | null | null |
pymoku/plotly_support.py
|
manekawije/Liquid
|
284991ceca70ec3fcd0cca7e19f4100463600a6c
|
[
"MIT"
] | null | null | null |
pymoku/plotly_support.py
|
manekawije/Liquid
|
284991ceca70ec3fcd0cca7e19f4100463600a6c
|
[
"MIT"
] | null | null | null |
# Plotly integration for the Moku:Lab Datalogger
# Copyright 2016 Liquid Instruments Pty. Ltd.
from pymoku import InvalidOperationException
def stream_init(moku, uname, api_key, str_id1, str_id2, npoints=100, mode='lines', line={}):
line = ';'.join([ '='.join(i) for i in list(line.items())])
settings = [
('plotly.uname', uname),
('plotly.api_key', api_key),
('plotly.strid1', str_id1),
('plotly.strid2', str_id2),
('plotly.displaysize', str(npoints)),
('plotly.mode', mode),
('plotly.line', line),
]
moku._set_properties(settings)
def stream_url(moku):
return moku._get_property_single('plotly.url')
def plot_frame(dataframe, uname=None, api_key=None, mode='lines', line={}):
try:
import plotly.plotly as ply
import plotly.tools as ptls
from plotly.graph_objs import Scatter, Layout, Data, Figure
except ImportError:
raise InvalidOperationException("Please install the Python plotly bindings")
if uname and api_key:
ply.sign_in(uname, api_key)
c1 = dataframe.ch1
c2 = dataframe.ch2
x = list(range(len(c1)))
t1 = Scatter(x=x, y=c1, mode=mode, line=line)
t2 = Scatter(x=x, y=c2, mode=mode, line=line)
layout = Layout(title="Moku:Lab Frame Grab")
data = Data([t1, t2])
fig = Figure(data=data, layout=layout)
return ply.plot(fig)
| 26.163265
| 92
| 0.705148
| 190
| 1,282
| 4.652632
| 0.447368
| 0.040724
| 0.024887
| 0.022624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021878
| 0.144306
| 1,282
| 49
| 93
| 26.163265
| 0.783956
| 0.070203
| 0
| 0
| 0
| 0
| 0.146218
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.151515
| 0.030303
| 0.30303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f82c17e0d48a8946b94491663089d67afc63ece3
| 1,185
|
py
|
Python
|
tracpro/msgs/migrations/0005_inboxmessage.py
|
rapidpro/tracpro
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
[
"BSD-3-Clause"
] | 5
|
2015-07-21T15:58:31.000Z
|
2019-09-14T22:34:00.000Z
|
tracpro/msgs/migrations/0005_inboxmessage.py
|
rapidpro/tracpro
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
[
"BSD-3-Clause"
] | 197
|
2015-03-24T15:26:04.000Z
|
2017-11-28T19:24:37.000Z
|
tracpro/msgs/migrations/0005_inboxmessage.py
|
rapidpro/tracpro
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
[
"BSD-3-Clause"
] | 10
|
2015-03-24T12:26:36.000Z
|
2017-02-21T13:08:57.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('orgs', '0013_auto_20150715_1831'),
('contacts', '0004_auto_20150324_1024'),
('msgs', '0004_message_pollrun'),
]
operations = [
migrations.CreateModel(
name='InboxMessage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rapidpro_message_id', models.IntegerField()),
('text', models.CharField(max_length=640, null=True)),
('archived', models.BooleanField(default=False)),
('created_on', models.DateTimeField(null=True)),
('delivered_on', models.DateTimeField(null=True)),
('sent_on', models.DateTimeField(null=True)),
('contact_from', models.ForeignKey(related_name='inbox_messages', to='contacts.Contact')),
('org', models.ForeignKey(related_name='inbox_messages', verbose_name='Organization', to='orgs.Org')),
],
),
]
| 38.225806
| 118
| 0.599156
| 113
| 1,185
| 6.035398
| 0.566372
| 0.046921
| 0.092375
| 0.109971
| 0.244868
| 0.117302
| 0
| 0
| 0
| 0
| 0
| 0.045249
| 0.254008
| 1,185
| 30
| 119
| 39.5
| 0.726244
| 0.017722
| 0
| 0
| 0
| 0
| 0.203959
| 0.039587
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f831926e75acbe42ce6d5e5261d3946d9b9dfea1
| 1,176
|
py
|
Python
|
_example/xor_embedded/make.py
|
backwardn/go-tflite
|
30f5e2a268d2eb053f758636609c5c379a3016b5
|
[
"MIT"
] | 3
|
2020-01-09T02:57:30.000Z
|
2020-07-17T15:56:50.000Z
|
_example/xor_embedded/make.py
|
backwardn/go-tflite
|
30f5e2a268d2eb053f758636609c5c379a3016b5
|
[
"MIT"
] | null | null | null |
_example/xor_embedded/make.py
|
backwardn/go-tflite
|
30f5e2a268d2eb053f758636609c5c379a3016b5
|
[
"MIT"
] | null | null | null |
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import RMSprop
from tensorflow.lite.python import lite
X_train = np.array([[0.0, 0.0],
[1.0, 0.0],
[0.0, 1.0],
[1.0, 1.0]])
Y_train = np.array([0.0,
1.0,
1.0,
0.0])
model = Sequential()
output_count_layer0 = 2
model.add(
Dense(
output_count_layer0,
input_shape=(2, ),
activation='sigmoid')) # Need to specify input shape for input layer
output_count_layer1 = 1
model.add(Dense(output_count_layer1, activation='linear'))
model.compile(
loss='mean_squared_error', optimizer=RMSprop(), metrics=['accuracy'])
BATCH_SIZE = 4
history = model.fit(
X_train, Y_train, batch_size=BATCH_SIZE, epochs=3600, verbose=1)
X_test = X_train
Y_test = Y_train
score = model.evaluate(X_test, Y_test, verbose=0)
model.save('xor_model.h5')
converter = lite.TFLiteConverter.from_keras_model_file('xor_model.h5')
tflite_model = converter.convert()
open('public/xor_model.tflite', 'wb').write(tflite_model)
| 30.947368
| 75
| 0.662415
| 170
| 1,176
| 4.394118
| 0.405882
| 0.026774
| 0.024096
| 0.016064
| 0.128514
| 0.026774
| 0
| 0
| 0
| 0
| 0
| 0.043243
| 0.213435
| 1,176
| 37
| 76
| 31.783784
| 0.764324
| 0.036565
| 0
| 0.058824
| 0
| 0
| 0.077807
| 0.020336
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.147059
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f835c7244c8f288b00b860e6cef6f64c28c3ea69
| 473
|
py
|
Python
|
app/sso/user/models.py
|
ChristianKreuzberger/django-oauth-sso
|
b019e2e8232ae141b50b8270e79e0617e24f54bb
|
[
"MIT"
] | null | null | null |
app/sso/user/models.py
|
ChristianKreuzberger/django-oauth-sso
|
b019e2e8232ae141b50b8270e79e0617e24f54bb
|
[
"MIT"
] | null | null | null |
app/sso/user/models.py
|
ChristianKreuzberger/django-oauth-sso
|
b019e2e8232ae141b50b8270e79e0617e24f54bb
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
"""
Extends the basic django user model with a longer first and last name
"""
first_name = models.CharField(
_("first name"),
max_length=128,
blank=True
)
last_name = models.CharField(
_("last name"),
max_length=128,
blank=True
)
| 21.5
| 73
| 0.649049
| 58
| 473
| 5.155172
| 0.551724
| 0.100334
| 0.12709
| 0.107023
| 0.167224
| 0.167224
| 0
| 0
| 0
| 0
| 0
| 0.017341
| 0.268499
| 473
| 21
| 74
| 22.52381
| 0.846821
| 0.145877
| 0
| 0.285714
| 0
| 0
| 0.049096
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f837af8b513ac4ce60f3ce335c72f8849a0bd813
| 1,710
|
py
|
Python
|
src/fusanet_utils/features/base.py
|
fusa-project/fusa-net-utils
|
b8740c67c0c789889b7abce477c894d77c70a20c
|
[
"MIT"
] | null | null | null |
src/fusanet_utils/features/base.py
|
fusa-project/fusa-net-utils
|
b8740c67c0c789889b7abce477c894d77c70a20c
|
[
"MIT"
] | null | null | null |
src/fusanet_utils/features/base.py
|
fusa-project/fusa-net-utils
|
b8740c67c0c789889b7abce477c894d77c70a20c
|
[
"MIT"
] | null | null | null |
import logging
from abc import ABC, abstractmethod
from os.path import isfile, splitext
import pathlib
import torch
from .waveform import get_waveform
logger = logging.getLogger(__name__)
class Feature(ABC):
def __init__(self, params):
self.params = params
super().__init__()
@abstractmethod
def compute(self, waveform: torch.Tensor):
pass
def create_path(self, waveform_path: pathlib.Path) -> pathlib.Path:
feature_name = type(self).__name__
file_name = waveform_path.stem + "_" + feature_name + ".pt"
for k, part in enumerate(waveform_path.parts[::-1]):
if part == 'datasets':
break
pre_path = pathlib.Path(*waveform_path.parts[:-(k+1)])
pos_path = pathlib.Path(*waveform_path.parts[-k:-1])
(pre_path / "features" / pos_path).mkdir(parents=True, exist_ok=True)
return pre_path / "features" / pos_path / file_name
def write_to_disk(self, waveform_path: str, global_normalizer = None) -> None:
feature_path = self.create_path(pathlib.Path(waveform_path))
if not feature_path.exists() or self.params["overwrite"]:
logger.debug(f"Writing features for {waveform_path}")
waveform = get_waveform(waveform_path, self.params, global_normalizer)
feature = self.compute(waveform)
torch.save(feature, feature_path)
def read_from_disk(self, waveform_path: str) -> torch.Tensor:
feature_path = self.create_path(pathlib.Path(waveform_path))
if feature_path.exists():
return torch.load(feature_path)
else:
raise FileNotFoundError("Feature file not found")
| 35.625
| 83
| 0.657895
| 210
| 1,710
| 5.090476
| 0.338095
| 0.12348
| 0.084191
| 0.086062
| 0.241347
| 0.157156
| 0.157156
| 0.157156
| 0.093545
| 0.093545
| 0
| 0.002297
| 0.236257
| 1,710
| 47
| 84
| 36.382979
| 0.816233
| 0
| 0
| 0.054054
| 0
| 0
| 0.055588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0.027027
| 0.162162
| 0
| 0.378378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f838fea76677e89d488005a23aab7f853eac184d
| 11,397
|
py
|
Python
|
app.py
|
KendraObika/Froggit
|
3734d74de6b7febabb6c1645b61e42928203cf63
|
[
"MIT"
] | null | null | null |
app.py
|
KendraObika/Froggit
|
3734d74de6b7febabb6c1645b61e42928203cf63
|
[
"MIT"
] | null | null | null |
app.py
|
KendraObika/Froggit
|
3734d74de6b7febabb6c1645b61e42928203cf63
|
[
"MIT"
] | null | null | null |
"""
Primary module for Froggit
This module contains the main controller class for the Froggit application. There
is no need for any additional classes in this module. If you need more classes, 99%
of the time they belong in either the lanes module or the models module. If you are
unsure about where a new class should go, post a question on Piazza.
Kendra Obika kao78
December 20 2020
"""
from consts import *
from game2d import *
from level import *
import introcs
from kivy.logger import Logger
# PRIMARY RULE: Froggit can only access attributes in level.py via getters/setters
# Froggit is NOT allowed to access anything in lanes.py or models.py.
class Froggit(GameApp):
"""
The primary controller class for the Froggit application
This class extends GameApp and implements the various methods necessary for
processing the player inputs and starting/running a game.
Method start begins the application.
Method update either changes the state or updates the Level object
Method draw displays the Level object and any other elements on screen
Because of some of the weird ways that Kivy works, you SHOULD NOT create an
initializer __init__ for this class. Any initialization should be done in
the start method instead. This is only for this class. All other classes
behave normally.
Most of the work handling the game is actually provided in the class Level.
Level should be modeled after subcontrollers.py from lecture, and will have
its own update and draw method.
The primary purpose of this class is managing the game state: when is the
game started, paused, completed, etc. It keeps track of that in a hidden
attribute
Attribute view: The game view, used in drawing (see examples from class)
Invariant: view is an instance of GView and is inherited from GameApp
Attribute input: The user input, used to control the frog and change state
Invariant: input is an instance of GInput and is inherited from GameApp
"""
# HIDDEN ATTRIBUTES
# Attribute _state: The current state of the game (taken from consts.py)
# Invariant: _state is one of STATE_INACTIVE, STATE_LOADING, STATE_PAUSED,
# STATE_ACTIVE, STATE_CONTINUE, or STATE_COMPLETE
#
# Attribute _level: The subcontroller for a level, managing the frog and obstacles
# Invariant: _level is a Level object or None if no level is currently active
#
# Attribute _title: The title of the game
# Invariant: _title is a GLabel, or None if there is no title to display
#
# Attribute _text: A message to display to the player
# Invariant: _text is a GLabel, or None if there is no message to display
# LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY
# Attribute _cover: A background underneath text to display to the player
# Invariant: _cover is a GLabel, or None if there is no text to display
# DO NOT MAKE A NEW INITIALIZER!
# THREE MAIN GAMEAPP METHODS
def start(self):
"""
Initializes the application.
This method is distinct from the built-in initializer __init__ (which
you should not override or change). This method is called once the
game is running. You should use it to initialize any game specific
attributes.
This method should make sure that all of the attributes satisfy the
given invariants. When done, it sets the _state to STATE_INACTIVE and
creates both the title (in attribute _title) and a message (in attribute
_text) saying that the user should press a key to play a game.
"""
#no need for assert statements bc no parameters
#initialize any game specific attributes
self._level = None
self._title = None
self._text = None
self._cover = None
self._state = STATE_INACTIVE
#invariants of _state
if self._state == STATE_ACTIVE:
self._text = None
if self._state != STATE_INACTIVE:
self._title = None
#when done, setting to inactive, creating title and message
self._state = STATE_INACTIVE
self._title = GLabel(text="FROGGIT",font_name=ALLOY_FONT,font_size=\
ALLOY_LARGE,x=self.width//2,y=self.height//1.75,linecolor="dark green")
self._text = GLabel(text="PRESS 'S' TO START",font_name=ALLOY_FONT,\
font_size=ALLOY_MEDIUM,x=self.width//2,y=self.height//2.5)
def update(self, dt):
"""
Updates the game objects each frame.
It is the method that does most of the work. It is NOT in charge of
playing the game. That is the purpose of the class Level. The primary
purpose of this game is to determine the current state, and -- if the
game is active -- pass the input to the Level object _level to play the
game.
As part of the assignment, you are allowed to add your own states.
However, at a minimum you must support the following states:
STATE_INACTIVE, STATE_LOADING, STATE_ACTIVE, STATE_PAUSED,
STATE_CONTINUE, and STATE_COMPLETE. Each one of these does its own
thing and might even needs its own helper. We describe these below.
STATE_INACTIVE: This is the state when the application first opens.
It is a paused state, waiting for the player to start the game. It
displays the title and a simple message on the screen. The application
remains in this state so long as the player never presses a key.
STATE_LOADING: This is the state that creates a new level and shows it on
the screen. The application switches to this state if the state was
STATE_INACTIVE in the previous frame, and the player pressed a key.
This state only lasts one animation frame (the amount of time to load
the data from the file) before switching to STATE_ACTIVE. One of the
key things about this state is that it resizes the window to match the
level file.
STATE_ACTIVE: This is a session of normal gameplay. The player can
move the frog towards the exit, and the game will move all obstacles
(cars and logs) about the screen. All of this should be handled inside
of class Level (NOT in this class). Hence the Level class should have
an update() method, just like the subcontroller example in lecture.
STATE_PAUSED: Like STATE_INACTIVE, this is a paused state. However,
the game is still visible on the screen.
STATE_CONTINUE: This state restores the frog after it was either killed
or reached safety. The application switches to this state if the state
was STATE_PAUSED in the previous frame, and the player pressed a key.
This state only lasts one animation frame before switching to STATE_ACTIVE.
STATE_COMPLETE: The wave is over (all lives are lost or all frogs are safe),
and is either won or lost.
You are allowed to add more states if you wish. Should you do so, you should
describe them here.
Parameter dt: The time in seconds since last update
Precondition: dt is a number (int or float)
"""
if self._state == STATE_INACTIVE and self.input.is_key_down('s'):
self._title = None
self._text = None
self._state = STATE_LOADING
if self._state == STATE_LOADING:
dic = self.load_json(DEFAULT_LEVEL)
hitdic = self.load_json(OBJECT_DATA)
self._level = Level(dic, hitdic)
self.width = self._level.getWidth()
self.height = self._level.getHeight()
self._state = STATE_ACTIVE
if self._state == STATE_ACTIVE and not self.isPaused():
self._level.update(dt, self.input)
if self._state == STATE_PAUSED:
if self._level.noLives():
self.youLoseText(self._level)
self._state = STATE_COMPLETE
elif self._level.pauseGame():
self.pausedTexts(self._level)
if self.input.is_key_down('c'):
self._state = STATE_CONTINUE
elif self._level.endGame():
self.youWinText(self._level)
self._state = STATE_COMPLETE
if self._state == STATE_CONTINUE:
self._level.resetFrog()
self._state = STATE_ACTIVE
def draw(self):
"""
Draws the game objects to the view.
Every single thing you want to draw in this game is a GObject. To draw a
GObject g, simply use the method g.draw(self.view). It is that easy!
Many of the GObjects (such as the cars, logs, and exits) are attributes
in either Level or Lane. In order to draw them, you either need to add
getters for these attributes or you need to add a draw method to
those two classes. We suggest the latter. See the example subcontroller.py
from the lesson videos.
"""
# IMPLEMENT ME
if self._text != None and self._title != None:
self._title.draw(self.view)
self._text.draw(self.view)
if self._state != STATE_INACTIVE:
self._level.draw(self.view)
if self._state == STATE_PAUSED or self._state == STATE_COMPLETE:
self._cover.draw(self.view)
self._text.draw(self.view)
# HELPER METHODS FOR THE STATES GO HERE
def isPaused(self):
"""
If pauseGame or endGame is prompted, we change the state to pause
"""
if self._level.pauseGame() or self._level.endGame():
self._state = STATE_PAUSED
def pausedTexts(self, level):
"""
Initializes the messages on the pause screen
Parameter level: Represents a single level of the game
Precondition: level is a Level object
"""
self._text = GLabel(height=GRID_SIZE,x= level.getCenter().x,\
y = level.getCenter().y, text="PRESS 'C' TO CONTINUE",\
font_name=ALLOY_FONT,font_size=ALLOY_SMALL, linecolor = "white")
self._cover = GLabel(width=self.width,height=GRID_SIZE,x=self._text.x,\
y = self._text.y, fillcolor="dark green")
def youLoseText(self, level):
"""
Initializes the messages on the you lose screen
Parameter level: Represents a single level of the game
Precondition: level is a Level object
"""
self._text = GLabel(height=GRID_SIZE,x= level.getCenter().x,\
y = level.getCenter().y, text="YOU LOSE",\
font_name=ALLOY_FONT,font_size=ALLOY_SMALL, linecolor = "white")
self._cover = GLabel(width=self.width,height=GRID_SIZE,x=self._text.x,\
y = self._text.y, fillcolor="dark green")
def youWinText(self, level):
"""
Initializes the messages on the you win screen
Parameter level: Represents a single level of the game
Precondition: level is a Level object
"""
self._text = GLabel(height=GRID_SIZE,x= level.getCenter().x,\
y = level.getCenter().y, text="YOU WIN!",\
font_name=ALLOY_FONT,font_size=ALLOY_SMALL, linecolor = "white")
self._cover = GLabel(width=self.width,height=GRID_SIZE,x=self._text.x,\
y = self._text.y, fillcolor="dark green")
| 41.443636
| 87
| 0.668246
| 1,643
| 11,397
| 4.537432
| 0.224589
| 0.022938
| 0.035681
| 0.019316
| 0.313213
| 0.245473
| 0.208719
| 0.176258
| 0.15721
| 0.146747
| 0
| 0.002167
| 0.271124
| 11,397
| 274
| 88
| 41.594891
| 0.895269
| 0.59542
| 0
| 0.35443
| 0
| 0
| 0.030877
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088608
| false
| 0
| 0.063291
| 0
| 0.164557
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f83ba25f5a20e6c46fa842756d48009b7d4b11f6
| 4,444
|
py
|
Python
|
neural_semigroups/mace4_semigroups_dataset.py
|
zarebulic/neural-semigroup-experiment
|
c554acb17d264ba810009f8b86c35ee9f8c4d1f4
|
[
"Apache-2.0"
] | 6
|
2020-04-05T23:24:54.000Z
|
2021-11-15T11:17:09.000Z
|
neural_semigroups/mace4_semigroups_dataset.py
|
zarebulic/neural-semigroup-experiment
|
c554acb17d264ba810009f8b86c35ee9f8c4d1f4
|
[
"Apache-2.0"
] | 23
|
2020-03-15T09:09:54.000Z
|
2022-03-29T22:32:23.000Z
|
neural_semigroups/mace4_semigroups_dataset.py
|
zarebulic/neural-semigroup-experiment
|
c554acb17d264ba810009f8b86c35ee9f8c4d1f4
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019-2021 Boris Shminke
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import sqlite3
from typing import Callable, Optional
import torch
from tqdm import tqdm
from neural_semigroups.semigroups_dataset import SemigroupsDataset
from neural_semigroups.utils import connect_to_db
class Mace4Semigroups(SemigroupsDataset):
"""
a ``torch.util.data.Dataset`` wrapper for the data of ``mace4`` output
stored in a ``sqlite`` database
>>> import shutil
>>> from neural_semigroups.constants import TEST_TEMP_DATA
>>> import os
>>> from neural_semigroups.generate_data_with_mace4 import (
... generate_data_with_mace4)
>>> shutil.rmtree(TEST_TEMP_DATA, ignore_errors=True)
>>> os.mkdir(TEST_TEMP_DATA)
>>> database = os.path.join(TEST_TEMP_DATA,"test.db")
>>> torch.manual_seed(42) # doctest: +ELLIPSIS
<torch...
>>> generate_data_with_mace4([
... "--max_dim", "2",
... "--min_dim", "2",
... "--number_of_tasks", "1",
... "--database_name", database])
>>> mace4_semigroups = Mace4Semigroups(
... root=database,
... cardinality=2,
... transform=lambda x: x
... )
>>> mace4_semigroups[0][0]
tensor([[0, 0],
[0, 0]])
>>> mace4_semigroups.get_table_from_output("not a mace4 output file")
Traceback (most recent call last):
...
ValueError: wrong mace4 output file format!
"""
_where_clause = "WHERE output LIKE '%Process % exit (max_models)%'"
def __init__(
self,
cardinality: int,
root: str,
transform: Optional[Callable] = None,
):
"""
:param root: a full path to an ``sqlite`` database file
which has a table ``mace_output`` with a string column ``output``
:param cardinality: the cardinality of semigroups
:param transform: a function/transform that takes a Cayley table
and returns a transformed version.
"""
super().__init__(root, cardinality, transform)
self.load_data_from_mace_output()
def get_table_from_output(self, output: str) -> torch.Tensor:
"""
gets a Cayley table of a magma from the output of ``mace4``
:param output: output of ``mace4``
:returns: a Cayley table
"""
search_result = re.search(
r".*function\(\*\(_,_\), \[(.*)]\)\..*", output, re.DOTALL
)
if search_result is None:
raise ValueError("wrong mace4 output file format!")
input_lines = search_result.groups()[0]
# pylint: disable=not-callable
cayley_table = torch.tensor(
list(
map(
int,
input_lines.translate(
str.maketrans("", "", " \t\n])")
).split(","),
)
)
).view(self.cardinality, self.cardinality)
return cayley_table
def get_additional_info(self, cursor: sqlite3.Cursor) -> int:
"""
gets some info from an SQLite database with ``mace4`` outputs
:param cursor: an SQLite database cursor
:returns: a total number of rows in a table, a magma dimension
"""
cursor.execute(
f"SELECT COUNT(*) FROM mace_output {self._where_clause}"
)
row_count = cursor.fetchone()[0]
return row_count
def load_data_from_mace_output(self) -> None:
"""loads data generated by ``mace4`` from an ``sqlite`` database"""
cursor = connect_to_db(self.root)
row_count = self.get_additional_info(cursor)
cursor.execute(f"SELECT output FROM mace_output {self._where_clause}")
features = []
for _ in tqdm(range(row_count)):
output = cursor.fetchone()[0]
features.append(self.get_table_from_output(output))
self.tensors = (torch.stack(features),)
| 34.71875
| 78
| 0.617912
| 532
| 4,444
| 4.994361
| 0.37782
| 0.022582
| 0.030109
| 0.023711
| 0.065487
| 0.048927
| 0
| 0
| 0
| 0
| 0
| 0.013863
| 0.269577
| 4,444
| 127
| 79
| 34.992126
| 0.804683
| 0.489424
| 0
| 0
| 0
| 0
| 0.115972
| 0.01119
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.14
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f83bb94361c259b35e4ff208fa028f2496100f01
| 7,501
|
py
|
Python
|
samples/data_inspect_utils.py
|
shachargluska/centerpose
|
01c2c8bfa9d3ee91807f2ffdcc48728d104265bd
|
[
"MIT"
] | 245
|
2019-11-29T02:55:25.000Z
|
2022-03-30T07:30:18.000Z
|
samples/data_inspect_utils.py
|
shachargluska/centerpose
|
01c2c8bfa9d3ee91807f2ffdcc48728d104265bd
|
[
"MIT"
] | 24
|
2019-11-29T10:05:00.000Z
|
2022-03-30T07:16:06.000Z
|
samples/data_inspect_utils.py
|
FishLiuabc/centerpose
|
555d753cd82693476f91f78c53aa4147f5a83015
|
[
"MIT"
] | 45
|
2019-11-29T05:12:02.000Z
|
2022-03-21T02:20:36.000Z
|
from __future__ import absolute_import, division, print_function
import cv2
import random
import numpy as np
import colorsys
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.patches import Polygon
from skimage.measure import find_contours
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max()))
else:
text += ("min: {:10} max: {:10}".format("",""))
text += " {}".format(array.dtype)
print(text)
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# centre
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def apply_keypoint(image, keypoint, num_joints=17):
image = image.astype(np.uint8)
edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[3, 5], [4, 6], [5, 6],
[5, 7], [7, 9], [6, 8], [8, 10],
[5, 11], [6, 12], [11, 12],
[11, 13], [13, 15], [12, 14], [14, 16]]
for j in range(num_joints):
if keypoint[j][2]>0.:
cv2.circle(image,
(keypoint[j, 0], keypoint[j, 1]), 3, (255,255,255), 2)
stickwidth = 2
for j, e in enumerate(edges):
if keypoint[e[0],2] > 0. and keypoint[e[1],2] > 0.:
centerA = keypoint[e[0],:2]
centerB = keypoint[e[1],:2]
cv2.line(image,(centerA[0], centerA[1]),(centerB[0], centerB[1]),(255, 255,255),2)
return image
def display_instances(image, boxes, masks, keypoints, class_id=1, class_name='person',
scores=None, title="",
figsize=(16, 16), ax=None,
show_mask=True, show_bbox=True,
show_keypoint=True,
colors=None, captions=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: 1 for person
class_name: class name of the dataset
scores: (optional) confidence scores for each box
title: (optional) Figure title
show_mask, show_bbox: To show masks and bounding boxes or not
figsize: (optional) the size of the image
colors: (optional) An array or colors to use with each object
captions: (optional) A list of strings to use as captions for each object
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[0]
# If no axis is passed, create one and automatically call show()
auto_show = False
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
auto_show = True
# Generate random colors
colors = colors or random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
if show_bbox:
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
if not captions:
class_id = class_id
score = scores[i] if scores is not None else None
label = class_name
caption = "{} {:.3f}".format(label, score) if score else label
else:
caption = captions[i]
ax.text(x1, y1 + 8, caption,
color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[i, :, :]
keypoint = keypoints[i]
if show_mask:
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
if show_keypoint:
masked_image = apply_keypoint(masked_image, keypoint)
ax.imshow(masked_image.astype(np.uint8))
if auto_show:
plt.show()
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [num_instances, height, width]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[0], 4], dtype=np.int32)
for i in range(mask.shape[0]):
m = mask[i, :, :]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2])
return boxes.astype(np.int32)
| 35.382075
| 94
| 0.55046
| 1,033
| 7,501
| 3.93514
| 0.264279
| 0.00861
| 0.005904
| 0.007872
| 0.032964
| 0.0123
| 0.0123
| 0
| 0
| 0
| 0
| 0.047572
| 0.321824
| 7,501
| 211
| 95
| 35.549763
| 0.751523
| 0.240101
| 0
| 0.059701
| 0
| 0
| 0.025332
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 1
| 0.052239
| false
| 0
| 0.067164
| 0
| 0.156716
| 0.022388
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f83c3a927ff9df79fe83f0ce7fdfd551b1c6f921
| 7,741
|
py
|
Python
|
dapy/filters/particle.py
|
hassaniqbal209/data-assimilation
|
ec52d655395dbed547edf4b4f3df29f017633f1b
|
[
"MIT"
] | 11
|
2020-07-29T07:46:39.000Z
|
2022-03-17T01:28:07.000Z
|
dapy/filters/particle.py
|
hassaniqbal209/data-assimilation
|
ec52d655395dbed547edf4b4f3df29f017633f1b
|
[
"MIT"
] | 1
|
2020-07-14T11:49:17.000Z
|
2020-07-29T07:43:22.000Z
|
dapy/filters/particle.py
|
hassaniqbal209/data-assimilation
|
ec52d655395dbed547edf4b4f3df29f017633f1b
|
[
"MIT"
] | 10
|
2020-07-14T11:34:24.000Z
|
2022-03-07T09:08:12.000Z
|
"""Particle filters for inference in state space models."""
import abc
from typing import Tuple, Dict, Callable, Any, Optional
import numpy as np
from numpy.random import Generator
from scipy.special import logsumexp
from scipy.sparse import csr_matrix
from dapy.filters.base import AbstractEnsembleFilter
from dapy.models.base import AbstractModel
import dapy.ot as optimal_transport
class AbstractParticleFilter(AbstractEnsembleFilter):
"""Abstract base class for particle filters."""
def _calculate_weights(
self,
model: AbstractModel,
states: np.ndarray,
observation: np.ndarray,
time_index: int,
) -> np.ndarray:
"""Calculate importance weights for particles given observations."""
log_weights = model.log_density_observation_given_state(
observation, states, time_index
)
log_sum_weights = logsumexp(log_weights)
return np.exp(log_weights - log_sum_weights)
@abc.abstractmethod
def _assimilation_transform(
self, rng: Generator, state_particles: np.ndarray, weights: np.ndarray
) -> np.ndarray:
pass
def _assimilation_update(
self,
model: AbstractModel,
rng: Generator,
state_particles: np.ndarray,
observation: np.ndarray,
time_index: int,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
weights = self._calculate_weights(
model, state_particles, observation, time_index
)
state_mean = (weights[:, None] * state_particles).sum(0)
state_std = (
np.sum(weights[:, None] * (state_particles - state_mean) ** 2, axis=0)
** 0.5
)
state_particles = self._assimilation_transform(rng, state_particles, weights)
return state_particles, state_mean, state_std
class BootstrapParticleFilter(AbstractParticleFilter):
"""Bootstrap particle filter (sequential importance resampling).
The filtering distribution at each observation time index is approximated by
alternating propagating an ensemble of state particles forward through time under
the model dynamics and resampling according to weights calculated from the
conditional probability densities of the observations at the current time index
given the state particle values. Here the resampling step uses multinomial
resampling.
References:
1. Gordon, N.J.; Salmond, D.J.; Smith, A.F.M. (1993). Novel approach to
nonlinear / non-Gaussian Bayesian state estimation. Radar and Signal
Processing, IEE Proceedings F. 140 (2): 107--113.
2. Del Moral, Pierre (1996). Non Linear Filtering: Interacting Particle
Solution. Markov Processes and Related Fields. 2 (4): 555--580.
"""
def _assimilation_transform(self, rng, state_particles, weights):
"""Perform multinomial particle resampling given computed weights."""
num_particle = state_particles.shape[0]
resampled_indices = rng.choice(num_particle, num_particle, True, weights)
return state_particles[resampled_indices]
class EnsembleTransformParticleFilter(AbstractParticleFilter):
"""Ensemble transform particle filter.
The filtering distribution at each observation time index is approximated by
alternating propagating an ensemble of state particles forward through time under
the model dynamics and linearly transforming the ensemble with an optimal transport
map computed to transform a uniform empirical distribution at the particle locations
to an empirical distribution at the particle locations weighted according to the
conditional probability densities of the observations at the current time index
given the state particle values [1].
References:
1. Reich, S. (2013). A nonparametric ensemble transform method for
Bayesian inference. SIAM Journal on Scientific Computing, 35(4),
A2013-A2024.
"""
def __init__(
self,
optimal_transport_solver: Callable[
[np.ndarray, np.ndarray, np.ndarray], np.ndarray
] = optimal_transport.solve_optimal_transport_exact,
optimal_transport_solver_kwargs: Optional[Dict[str, Any]] = None,
transport_cost: Callable[
[np.ndarray, np.ndarray], np.ndarray
] = optimal_transport.pairwise_euclidean_distance,
weight_threshold: float = 1e-8,
use_sparse_matrix_multiply: bool = False,
):
"""
Args:
optimal_transport_solver: Optimal transport solver function with signature
transport_matrix = optimal_transport_solver(
source_dist, target_dist, cost_matrix,
**optimal_transport_solver_kwargs)
where `source_dist` and `target_dist` are the source and target
distribution weights respectively as 1D arrays, `cost_matrix` is a 2D
array of the transport costs for each particle pair.
optimal_transport_solver_kwargs: Any additional keyword parameters values
for the optimal transport solver.
transport_cost: Function calculating transport cost matrix with signature
cost_matrix = transport_cost(source_particles, target_particles)
where `source_particles` are the particles values of the source and
target empirical distributions respecitively.
weight_threshold: Threshold below which to set any particle weights to zero
prior to solving the optimal transport problem. Using a small non-zero
value can both improve the numerical stability of the optimal transport
solves, with problems with many small weights sometimes failing to
convergence, and also improve performance as some solvers (including)
the default network simplex based algorithm) are able to exploit
sparsity in the source / target distributions.
use_sparse_matrix_multiply: Whether to conver the optimal transport based
transform matrix used in the assimilation update to a sparse CSR format
before multiplying by the state particle ensemble matrix. This may
improve performance when the computed transport plan is sparse and the
number of particles is large.
"""
self.optimal_transport_solver = optimal_transport_solver
self.optimal_transport_solver_kwargs = (
{}
if optimal_transport_solver_kwargs is None
else optimal_transport_solver_kwargs
)
self.transport_cost = transport_cost
self.weight_threshold = weight_threshold
self.use_sparse_matrix_multiply = use_sparse_matrix_multiply
def _assimilation_transform(self, rng, state_particles, weights):
"""Solve optimal transport problem and transform ensemble."""
num_particle = state_particles.shape[0]
source_dist = np.ones(num_particle) / num_particle
target_dist = weights
if self.weight_threshold > 0:
target_dist[target_dist < self.weight_threshold] = 0
target_dist /= target_dist.sum()
cost_matrix = self.transport_cost(state_particles, state_particles)
transform_matrix = num_particle * self.optimal_transport_solver(
source_dist,
target_dist,
cost_matrix,
**self.optimal_transport_solver_kwargs
)
if self.use_sparse_matrix_multiply:
transform_matrix = csr_matrix(transform_matrix)
return transform_matrix @ state_particles
| 44.745665
| 88
| 0.689058
| 879
| 7,741
| 5.88851
| 0.302617
| 0.074189
| 0.063756
| 0.027821
| 0.283037
| 0.267195
| 0.191461
| 0.186631
| 0.118624
| 0.098532
| 0
| 0.010064
| 0.255523
| 7,741
| 172
| 89
| 45.005814
| 0.888079
| 0.463764
| 0
| 0.168539
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067416
| false
| 0.011236
| 0.101124
| 0
| 0.247191
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f83d223baea30c7408f539bf887906161d4b99ea
| 1,477
|
py
|
Python
|
pokemon.py
|
bran-almeida/Pokemon_Game
|
061c9e1b53d8cbaa7366634535288bb2868d6885
|
[
"MIT"
] | null | null | null |
pokemon.py
|
bran-almeida/Pokemon_Game
|
061c9e1b53d8cbaa7366634535288bb2868d6885
|
[
"MIT"
] | null | null | null |
pokemon.py
|
bran-almeida/Pokemon_Game
|
061c9e1b53d8cbaa7366634535288bb2868d6885
|
[
"MIT"
] | null | null | null |
import random
class Pokemon:
def __init__(self, especie, level=None, nome=None):
self.especie = especie
if nome:
self.nome = nome
else:
self.nome = especie
if level:
self.level = level
else:
self.level = random.randint(1,100)
self.ataque = self.level * 5
self.vida = self.level * 10
def __str__(self):
return f"Especie: {self.especie} | Level: {self.level} | Tipo: {self.tipo}"
def atacar(self, alvo):
ataque_efetivo = int((self.ataque * random.random() * 1.3))
alvo.vida -= ataque_efetivo
print(f"{alvo.especie} perdeu {ataque_efetivo} pontos de vida")
if alvo.vida <= 0:
print(f"{alvo.especie}, foi derrotado.")
return True
else:
return False
class PokemonEletrico(Pokemon):
tipo = "Elétrico"
def atacar(self, alvo):
print(f"{self.especie} lançou um ataque elétrico em {alvo.especie}")
return super().atacar(alvo)
class PokemonFogo(Pokemon):
tipo = "Fogo"
def atacar(self, alvo):
print(f"{self.especie} lançou um ataque de fogo em {alvo.especie}")
return super().atacar(alvo)
class PokemonAgua(Pokemon):
tipo = "Agua"
def atacar(self, alvo):
print(f"{self.especie} lançou um ataque de agua em {alvo.especie}")
return super().atacar(alvo)
| 27.867925
| 83
| 0.564658
| 176
| 1,477
| 4.676136
| 0.272727
| 0.080194
| 0.063183
| 0.082625
| 0.315917
| 0.315917
| 0.315917
| 0.274605
| 0.17983
| 0.17983
| 0
| 0.00995
| 0.319567
| 1,477
| 53
| 84
| 27.867925
| 0.808955
| 0
| 0
| 0.25
| 0
| 0.025
| 0.227334
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.025
| 0.025
| 0.5
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f83f6977354074227de8507f3a2a55a87f9d6abe
| 5,752
|
py
|
Python
|
sdks/python/appcenter_sdk/models/BranchConfigurationToolsets.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | null | null | null |
sdks/python/appcenter_sdk/models/BranchConfigurationToolsets.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 6
|
2019-10-23T06:38:53.000Z
|
2022-01-22T07:57:58.000Z
|
sdks/python/appcenter_sdk/models/BranchConfigurationToolsets.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 2
|
2019-10-23T06:31:05.000Z
|
2021-08-21T17:32:47.000Z
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class BranchConfigurationToolsets(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'xcode': '',
'javascript': '',
'xamarin': '',
'android': ''
}
attribute_map = {
'xcode': 'xcode',
'javascript': 'javascript',
'xamarin': 'xamarin',
'android': 'android'
}
def __init__(self, xcode=None, javascript=None, xamarin=None, android=None): # noqa: E501
"""BranchConfigurationToolsets - a model defined in Swagger""" # noqa: E501
self._xcode = None
self._javascript = None
self._xamarin = None
self._android = None
self.discriminator = None
if xcode is not None:
self.xcode = xcode
if javascript is not None:
self.javascript = javascript
if xamarin is not None:
self.xamarin = xamarin
if android is not None:
self.android = android
@property
def xcode(self):
"""Gets the xcode of this BranchConfigurationToolsets. # noqa: E501
Build configuration when Xcode is part of the build steps # noqa: E501
:return: The xcode of this BranchConfigurationToolsets. # noqa: E501
:rtype:
"""
return self._xcode
@xcode.setter
def xcode(self, xcode):
"""Sets the xcode of this BranchConfigurationToolsets.
Build configuration when Xcode is part of the build steps # noqa: E501
:param xcode: The xcode of this BranchConfigurationToolsets. # noqa: E501
:type:
"""
self._xcode = xcode
@property
def javascript(self):
"""Gets the javascript of this BranchConfigurationToolsets. # noqa: E501
Build configuration when React Native, or other JavaScript tech, is part of the build steps # noqa: E501
:return: The javascript of this BranchConfigurationToolsets. # noqa: E501
:rtype:
"""
return self._javascript
@javascript.setter
def javascript(self, javascript):
"""Sets the javascript of this BranchConfigurationToolsets.
Build configuration when React Native, or other JavaScript tech, is part of the build steps # noqa: E501
:param javascript: The javascript of this BranchConfigurationToolsets. # noqa: E501
:type:
"""
self._javascript = javascript
@property
def xamarin(self):
"""Gets the xamarin of this BranchConfigurationToolsets. # noqa: E501
Build configuration for Xamarin projects # noqa: E501
:return: The xamarin of this BranchConfigurationToolsets. # noqa: E501
:rtype:
"""
return self._xamarin
@xamarin.setter
def xamarin(self, xamarin):
"""Sets the xamarin of this BranchConfigurationToolsets.
Build configuration for Xamarin projects # noqa: E501
:param xamarin: The xamarin of this BranchConfigurationToolsets. # noqa: E501
:type:
"""
self._xamarin = xamarin
@property
def android(self):
"""Gets the android of this BranchConfigurationToolsets. # noqa: E501
Build configuration for Android projects # noqa: E501
:return: The android of this BranchConfigurationToolsets. # noqa: E501
:rtype:
"""
return self._android
@android.setter
def android(self, android):
"""Sets the android of this BranchConfigurationToolsets.
Build configuration for Android projects # noqa: E501
:param android: The android of this BranchConfigurationToolsets. # noqa: E501
:type:
"""
self._android = android
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BranchConfigurationToolsets):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.497436
| 113
| 0.595271
| 611
| 5,752
| 5.518822
| 0.212766
| 0.054567
| 0.156584
| 0.131673
| 0.449585
| 0.433867
| 0.394721
| 0.262159
| 0.113286
| 0.11032
| 0
| 0.019653
| 0.318846
| 5,752
| 194
| 114
| 29.649485
| 0.84099
| 0.410814
| 0
| 0.071429
| 0
| 0
| 0.039344
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.035714
| 0
| 0.357143
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f840464edc80ddc50844d1de4a6669b63272a7ea
| 1,156
|
py
|
Python
|
tests/cli/version_test.py
|
longhuei/floyd-cli
|
82709f1e301d7a56ac354e4615a354e2c36d71b8
|
[
"Apache-2.0"
] | 162
|
2017-01-27T02:54:17.000Z
|
2022-03-03T09:06:28.000Z
|
tests/cli/version_test.py
|
longhuei/floyd-cli
|
82709f1e301d7a56ac354e4615a354e2c36d71b8
|
[
"Apache-2.0"
] | 79
|
2017-02-17T08:58:39.000Z
|
2021-05-29T09:24:31.000Z
|
tests/cli/version_test.py
|
longhuei/floyd-cli
|
82709f1e301d7a56ac354e4615a354e2c36d71b8
|
[
"Apache-2.0"
] | 43
|
2017-02-23T10:58:42.000Z
|
2022-01-17T10:29:31.000Z
|
from click.testing import CliRunner
import unittest
from mock import patch, Mock, PropertyMock
from floyd.cli.version import upgrade
class TestFloydVersion(unittest.TestCase):
"""
Tests cli utils helper functions
"""
def setUp(self):
self.runner = CliRunner()
@patch('floyd.cli.version.pip_upgrade')
@patch('floyd.cli.version.conda_upgrade')
@patch('floyd.cli.utils.sys')
def test_floyd_upgrade_with_standard_python(self, mock_sys, conda_upgrade, pip_upgrade):
mock_sys.version = '2.7.13 (default, Jan 19 2017, 14:48:08) \n[GCC 6.3.0 20170118]'
self.runner.invoke(upgrade)
conda_upgrade.assert_not_called()
pip_upgrade.assert_called_once()
@patch('floyd.cli.version.pip_upgrade')
@patch('floyd.cli.version.conda_upgrade')
@patch('floyd.cli.utils.sys')
def test_floyd_upgrade_with_anaconda_python(self, mock_sys, conda_upgrade, pip_upgrade):
mock_sys.version = '3.6.3 |Anaconda, Inc.| (default, Oct 13 2017, 12:02:49) \n[GCC 7.2.0]'
self.runner.invoke(upgrade)
pip_upgrade.assert_not_called()
conda_upgrade.assert_called_once()
| 32.111111
| 98
| 0.702422
| 163
| 1,156
| 4.773006
| 0.355828
| 0.071979
| 0.100257
| 0.102828
| 0.40874
| 0.40874
| 0.40874
| 0.40874
| 0.40874
| 0.40874
| 0
| 0.047269
| 0.176471
| 1,156
| 35
| 99
| 33.028571
| 0.769958
| 0.027682
| 0
| 0.347826
| 0
| 0.086957
| 0.26083
| 0.108303
| 0
| 0
| 0
| 0
| 0.173913
| 1
| 0.130435
| false
| 0
| 0.173913
| 0
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8423088619bdfe61a95a3f318f27fab6ca0c75a
| 4,181
|
py
|
Python
|
offthedialbot/help.py
|
DJam98/bot
|
366a46bcca55098e1030a4f05d63e8872a791bf8
|
[
"MIT"
] | 2
|
2020-08-31T15:45:07.000Z
|
2021-09-26T22:15:43.000Z
|
offthedialbot/help.py
|
DJam98/bot
|
366a46bcca55098e1030a4f05d63e8872a791bf8
|
[
"MIT"
] | 17
|
2020-06-02T02:29:48.000Z
|
2021-10-13T23:47:44.000Z
|
offthedialbot/help.py
|
DJam98/bot
|
366a46bcca55098e1030a4f05d63e8872a791bf8
|
[
"MIT"
] | 3
|
2020-05-31T23:17:10.000Z
|
2022-03-09T22:23:22.000Z
|
"""Contains HelpCommand class."""
import discord
from discord.ext import commands
from offthedialbot import utils
class HelpCommand(commands.DefaultHelpCommand):
"""Set up help command for the bot."""
async def send_bot_help(self, mapping):
"""Send bot command page."""
list_commands = [
command for cog in [
await self.filter_commands(cog_commands)
for cog, cog_commands in mapping.items()
if cog is not None and await self.filter_commands(cog_commands)
] for command in cog
]
embed = self.create_embed(
title="`$help`",
description="All the commands for Off the Dial Bot!",
fields=[{
"name": "Commands:",
"value": "\n".join([
self.short(command)
for command in await self.filter_commands(mapping[None]) if command.help])
}, {
"name": "Misc Commands:",
"value": "\n".join([
self.short(command)
for command in list_commands])
}]
)
await self.get_destination().send(embed=embed)
async def send_cog_help(self, cog):
"""Send cog command page."""
embed = self.create_embed(
title=cog.qualified_name.capitalize(),
description=cog.description,
**({"fields": [{
"name": f"{cog.qualified_name.capitalize()} Commands:",
"value": "\n".join([
self.short(command)
for command in cog.get_commands()])
}]} if cog.get_commands() else {}))
await self.get_destination().send(embed=embed)
async def send_group_help(self, group):
"""Send command group page."""
embed = self.create_embed(
title=self.short(group, False),
description=group.help,
fields=[{
"name": f"Subcommands:",
"value": "\n".join([
self.short(command)
for command in await self.filter_commands(group.commands)
])
}]
)
await self.get_destination().send(embed=embed)
async def send_command_help(self, command):
"""Send command page."""
embed = self.create_embed(
title=self.short(command, False),
description=command.help,
)
await self.get_destination().send(embed=embed)
async def command_not_found(self, string):
"""Returns message when command is not found."""
return f"Command {self.short(string, False)} does not exist."
async def subcommand_not_found(self, command, string):
"""Returns message when subcommand is not found."""
if isinstance(command, commands.Group) and len(command.all_commands) > 0:
return f"Command {self.short(command, False)} has no subcommand named `{string}`."
else:
return f"Command {self.short(command, False)} has no subcommands."
async def send_error_message(self, error):
"""Send error message, override to support sending embeds."""
await self.get_destination().send(
embed=utils.Alert.create_embed(utils.Alert.Style.DANGER,
title="Command/Subcommand not found.", description=error))
def create_embed(self, fields: list = (), **kwargs):
"""Create help embed."""
embed = discord.Embed(color=utils.Alert.Style.DANGER, **kwargs)
for field in fields:
embed.add_field(**field, inline=False)
embed.set_footer(
text=f"Type {self.clean_prefix}help command for more info on a command. You can also type {self.clean_prefix}help category for more info on a category.")
return embed
def short(self, command, doc=True):
"""List the command as a one-liner."""
sig = self.get_command_signature(command) if not doc else f'{self.clean_prefix}{command}'
return f'`{sig[:-1] if sig.endswith(" ") else sig}` {(command.short_doc if doc else "")}'
help_command = HelpCommand()
| 38.712963
| 165
| 0.577613
| 479
| 4,181
| 4.941545
| 0.227557
| 0.034221
| 0.047317
| 0.048585
| 0.362907
| 0.31052
| 0.297
| 0.247571
| 0.215463
| 0.162653
| 0
| 0.000688
| 0.304233
| 4,181
| 107
| 166
| 39.074766
| 0.812994
| 0.026788
| 0
| 0.25
| 0
| 0.025
| 0.167284
| 0.028322
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.0375
| 0
| 0.1375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8470708904f8b5b4aa1dabc0a1785bf58a61c23
| 7,178
|
py
|
Python
|
qpricesim/model_code/QLearningAgent.py
|
ToFeWe/qpricesim
|
2d4312ed1d1356449f0c168835a0662b238a27bb
|
[
"MIT"
] | 2
|
2022-03-22T12:16:37.000Z
|
2022-03-22T12:48:46.000Z
|
qpricesim/model_code/QLearningAgent.py
|
ToFeWe/qpricesim
|
2d4312ed1d1356449f0c168835a0662b238a27bb
|
[
"MIT"
] | null | null | null |
qpricesim/model_code/QLearningAgent.py
|
ToFeWe/qpricesim
|
2d4312ed1d1356449f0c168835a0662b238a27bb
|
[
"MIT"
] | null | null | null |
"""
A module that defines the QLearning Agent for the pricing game as a class.
Note that we have a numba version (for speed) which inherits everything from
QLearningAgentBase.
"""
import numpy as np
from numba import float64
from numba import int32
from numba import njit
from numba.experimental import jitclass
from .utils_q_learning import numba_argmax
from .utils_q_learning import numba_max
class QLearningAgentBase:
"""
A simple Q-Learning Agent based on numpy. Actions and state are assumed
to be represented by integer numbers/an index and corresponds to the
respective rows / columns in the Q-Matrix.
We assume that the agent can choose every action in every state.
The random seed will be set by a helper function outside this class.
Args:
self.epsilon (float): Exploration probability
self.alpha (float): Learning rate
self.discount (float): Discount rate
self.n_actions (int): Number of actions the agent can pick
"""
def __init__(self, alpha, epsilon, discount, n_actions, n_states):
self.n_actions = n_actions
self.n_states = n_states
self._qvalues = np.random.rand(self.n_states, self.n_actions)
self.alpha = alpha
self.epsilon = epsilon
self.discount = discount
def set_qmatrix(self, new_matrix):
self._qvalues = new_matrix
def get_qvalue(self, state, action):
"""
Returns the Q-value for the given state action
Args:
state (integer): Index representation of a state
action (integer): Index representation of an action
Returns:
float: Q-value for the state-action combination
"""
return self._qvalues[state, action]
def set_qvalue(self, state, action, value):
"""Sets the Qvalue for [state,action] to the given value
Args:
state (integer): Index representation of a state
action (integer): Index representation of an action
value (float): Q-value that is being assigned
"""
self._qvalues[state, action] = value
def get_value(self, state):
"""
Compute the agents estimate of V(s) using current q-values.
Args:
state (integer): Index representation of a state
Returns:
float: Value of the state
"""
value = numba_max(
self._qvalues[
state,
]
)
return value
def get_qmatrix(self):
"""
Returns the qmatrix of the agent
Returns:
array (float): Full Q-Matrix
"""
return self._qvalues
def update(self, state, action, reward, next_state):
"""
Update Q-Value:
Q(s,a) := (1 - alpha) * Q(s,a) + alpha * (r + gamma * V(s'))
Args:
state (integer): Index representation of the current state (Row of the Q-matrix)
action (integer): Index representation of the picked action (Column of the Q-matrix)
reward (float): Reward for picking from picking the action in the given state
next_state (integer): Index representation of the next state (Column of the Q-matrix)
"""
# Calculate the updated Q-value
c_q_value = (1 - self.alpha) * self.get_qvalue(state, action) + self.alpha * (
reward + self.discount * self.get_value(next_state)
)
# Update the Q-values for the next iteration
self.set_qvalue(state, action, c_q_value)
def get_best_action(self, state):
"""
Compute the best action to take in a state (using current q-values).
Args:
state (integer): Index representation of the current state (Row of the Q-matrix)
Returns:
integer: Index representation of the best action (Column of the Q-matrix)
for the given state (Row of the Q-matrix)
"""
# Pick the Action (Row of the Q-matrix) with the highest q-value
best_action = numba_argmax(self._qvalues[state, :])
return best_action
def get_action(self, state):
"""
Compute the action to take in the current state, including exploration.
With probability self.epsilon, we take a random action.
Returns both, the chosen action (with exploration) and the best action (argmax).
If the chosen action is the same as the best action, both returns will be
the same.
Args:
state (integer): Integer representation of the current state (Row of the Q-matrix)
Returns:
tuple: chosen_action, best_action
chosen_action (integer): Index representation of the acutally picked action
(Column of the Q-matrix)
best_action (integer): Index representation of the current best action
(Column of the Q-matrix) in the given state.
"""
# agent parameters:
epsilon = self.epsilon
e_threshold = np.random.random()
# Get the best action.
best_action = self.get_best_action(state)
if e_threshold < epsilon:
# In the numpy.random module randint() is exclusive for the upper
# bound and inclusive for the lower bound -> Actions are array
# indices for us.
chosen_action = np.random.randint(0, self.n_actions)
else:
chosen_action = best_action
return chosen_action, best_action
spec = [
("n_actions", int32),
("n_states", int32),
("_qvalues", float64[:, :]),
("alpha", float64),
("epsilon", float64),
("discount", float64),
]
@jitclass(spec)
class QLearningAgent(QLearningAgentBase):
"""
Wrapper class to create a jitclass for the QLearningAgent.
Not that this class cannot be serialized. Hence, if you want
to save the trained agent as a pickle file, use the base class.
Note that for the random seed to work, you need to do it in
a njit wrapper function. From the numba documentation:
"Calling numpy.random.seed() from non-Numba code (or from object mode code)
will seed the Numpy random generator, not the Numba random generator."
"""
def jitclass_to_baseclass(agent_jit):
"""
A helper function to create a new QLearningAgentBase
object from the jitclass equivalent. This is needed
as we cannot serialize jitclasses in the current
numba version.
The function takes all parameters from the QLearningAgent
*agent_jit* and rewrites it to a new QLearningAgentBase
object.
Args:
agent_jit (QLearningAgent): jitclass instance of agent
Returns:
QLearningAgentBase: Serializable version of the agent
"""
agent_nojit = QLearningAgentBase(
alpha=agent_jit.alpha,
epsilon=agent_jit.epsilon,
discount=agent_jit.discount,
n_actions=agent_jit.n_actions,
n_states=agent_jit.n_states,
)
agent_nojit.set_qmatrix(new_matrix=agent_jit.get_qmatrix())
return agent_nojit
| 31.621145
| 97
| 0.633185
| 923
| 7,178
| 4.827736
| 0.208017
| 0.023564
| 0.070018
| 0.075404
| 0.212074
| 0.177962
| 0.125898
| 0.099865
| 0.098519
| 0.098519
| 0
| 0.003759
| 0.295765
| 7,178
| 226
| 98
| 31.761062
| 0.877745
| 0.563249
| 0
| 0
| 0
| 0
| 0.017907
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.149254
| false
| 0
| 0.104478
| 0
| 0.373134
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f84a7601115fccffa87d1679d8be58c1f83890a1
| 1,561
|
py
|
Python
|
stanCode_Projects/my_photoshop/shrink.py
|
wilson51678/sc-projects
|
a4b9a0c542449372181f6bd20d4ad81b87bfcb46
|
[
"MIT"
] | null | null | null |
stanCode_Projects/my_photoshop/shrink.py
|
wilson51678/sc-projects
|
a4b9a0c542449372181f6bd20d4ad81b87bfcb46
|
[
"MIT"
] | null | null | null |
stanCode_Projects/my_photoshop/shrink.py
|
wilson51678/sc-projects
|
a4b9a0c542449372181f6bd20d4ad81b87bfcb46
|
[
"MIT"
] | null | null | null |
"""
File: shrink.py
Name: Wilson Wang 2020/08/05
-------------------------------
Create a new "out" image half the width and height of the original.
Set pixels at x=0 1 2 3 in out , from x=0 2 4 6 in original,
and likewise in the y direction.
"""
from simpleimage import SimpleImage
def shrink(filename):
"""
This function should shrink the 'filename' image into a 1/2 size new image.
:param filename: img, the image of origin size
:return img: new_img, the image of half size of the origin photo
"""
img = SimpleImage(filename)
# This step should makes a blank photo, which has half size of the origin photo
new_img = SimpleImage.blank(img.width//2,img.height//2)
for y in range(new_img.height):
for x in range(new_img.width):
# This step catch pixel in origin photo in every two pixel. x=0,2,4,6
img_pixel = img.get_pixel(x*2,y*2)
new_img_pixel = new_img.get_pixel(x,y)
# These three steps are filling pixels from the origin photo into 'new_pixel'
new_img_pixel.red = img_pixel.red
new_img_pixel.green = img_pixel.green
new_img_pixel.blue = img_pixel.blue
return new_img
def main():
"""
This program should shrink any image into a half size photo. 'without code:make_as_big_as'
"""
original = SimpleImage("images/poppy.png")
original.show()
after_shrink = shrink("images/poppy.png")
after_shrink.show()
if __name__ == '__main__':
main()
| 31.857143
| 95
| 0.632287
| 240
| 1,561
| 3.970833
| 0.35
| 0.062959
| 0.04617
| 0.008395
| 0.06086
| 0.050367
| 0
| 0
| 0
| 0
| 0
| 0.022609
| 0.263293
| 1,561
| 48
| 96
| 32.520833
| 0.806087
| 0.474055
| 0
| 0
| 0
| 0
| 0.055172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f84a986b558a36ee9782c5da91c77b0601aa7b43
| 15,349
|
py
|
Python
|
src/genie/libs/parser/iosxe/show_ip_dhcp.py
|
komurzak-cisco/genieparser
|
e6cd6bb133bab7260b2b82da198fd14a4dec66c7
|
[
"Apache-2.0"
] | 1
|
2021-07-26T02:56:27.000Z
|
2021-07-26T02:56:27.000Z
|
src/genie/libs/parser/iosxe/show_ip_dhcp.py
|
zhangineer/genieparser
|
d6abcb49bf6d39092d835d9490d817452920ae98
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/show_ip_dhcp.py
|
zhangineer/genieparser
|
d6abcb49bf6d39092d835d9490d817452920ae98
|
[
"Apache-2.0"
] | null | null | null |
"""
show ip dhcp database
show ip dhcp snooping database
show ip dhcp snooping database detail
"""
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import (Schema, Any, Optional,
Or, And, Default, Use)
# Parser Utils
from genie.libs.parser.utils.common import Common
# =======================================
# Schema for 'show ip dhcp database'
# =======================================
class ShowIpDhcpDatabaseSchema(MetaParser):
"""
Schema for show ip dhcp database
"""
schema = {
'url': {
str: {
'read': str,
'written': str,
'status': str,
'delay_in_secs': int,
'timeout_in_secs': int,
'failures': int,
'successes': int
}
}
}
# =======================================
# Parser for 'show ip dhcp database'
# =======================================
class ShowIpDhcpDatabase(ShowIpDhcpDatabaseSchema):
"""
Parser for show ip dhcp database
"""
cli_command = 'show ip dhcp database'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
# URL : ftp://user:password@172.16.4.253/router-dhcp
p1 = re.compile(r'^URL +: +(?P<url>(\S+))$')
# Read : Dec 01 1997 12:01 AM
p2 = re.compile(r'^Read +: +(?P<read>(.+))$')
# Written : Never
p3 = re.compile(r'^Written +: +(?P<written>(\S+))$')
# Status : Last read succeeded. Bindings have been loaded in RAM.
p4 = re.compile(r'^Status +: +(?P<status>(.+))$')
# Delay : 300 seconds
p5 = re.compile(r'^Delay +: +(?P<delay>(\d+))')
# Timeout : 300 seconds
p6 = re.compile(r'^Timeout +: +(?P<timeout>(\d+))')
# Failures : 0
p7 = re.compile(r'^Failures +: +(?P<failures>(\d+))$')
# Successes : 1
p8 = re.compile(r'^Successes +: +(?P<successes>(\d+))$')
ret_dict = {}
for line in out.splitlines():
line.strip()
# URL : ftp://user:password@172.16.4.253/router-dhcp
m = p1.match(line)
if m:
url_dict = ret_dict.setdefault('url', {}).setdefault(m.groupdict()['url'], {})
# ret_dict.update({'url': m.groupdict()['url']})
continue
# Read : Dec 01 1997 12:01 AM
m = p2.match(line)
if m:
url_dict.update({'read': m.groupdict()['read']})
continue
# Written : Never
m = p3.match(line)
if m:
url_dict.update({'written': m.groupdict()['written']})
continue
# Status : Last read succeeded. Bindings have been loaded in RAM.
m = p4.match(line)
if m:
url_dict.update({'status': m.groupdict()['status']})
continue
# Delay : 300 seconds
m = p5.match(line)
if m:
url_dict.update({'delay_in_secs': int(m.groupdict()['delay'])})
continue
# Timeout : 300 seconds
m = p6.match(line)
if m:
url_dict.update({'timeout_in_secs': int(m.groupdict()['timeout'])})
continue
# Failures : 0
m = p7.match(line)
if m:
url_dict.update({'failures': int(m.groupdict()['failures'])})
continue
# Successes : 1
m = p8.match(line)
if m:
url_dict.update({'successes': int(m.groupdict()['successes'])})
continue
return ret_dict
# ===================================================
# Schema for 'show ip dhcp snooping database'
# 'show ip dhcp snooping database detail'
# ===================================================
class ShowIpDhcpSnoopingDatabaseSchema(MetaParser):
"""
Schema for show ip dhcp snooping database
show ip dhcp snooping database detail
"""
schema = {
'agent_url': str,
'write_delay_secs': int,
'abort_timer_secs': int,
'agent_running': str,
'delay_timer_expiry': str,
'abort_timer_expiry': str,
'last_succeeded_time': str,
'last_failed_time': str,
'last_failed_reason': str,
'total_attempts': int,
'startup_failures': int,
'successful_transfers': int,
'failed_transfers': int,
'successful_reads': int,
'failed_reads': int,
'successful_writes': int,
'failed_writes': int,
'media_failures': int,
Optional('detail'): {
'first_successful_access': str,
'last_ignored_bindings_counters': {
'binding_collisions': int,
'expired_leases': int,
'invalid_interfaces': int,
'unsupported_vlans': int,
'parse_failures': int
},
'last_ignored_time': str,
'total_ignored_bindings_counters': {
'binding_collisions': int,
'expired_leases': int,
'invalid_interfaces': int,
'unsupported_vlans': int,
'parse_failures': int
}
}
}
# ===================================================
# Parser for 'show ip dhcp snooping database'
# ===================================================
class ShowIpDhcpSnoopingDatabase(ShowIpDhcpSnoopingDatabaseSchema):
"""
Parser for show ip dhcp snooping database
"""
cli_command = 'show ip dhcp snooping database'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Initializes the Python dictionary variable
ret_dict = {}
# Agent URL :
p1 = re.compile(r'^Agent URL +: +(?P<agent_url>\S*)$')
# Write delay Timer : 300 seconds
p2 = re.compile(r'^Write delay Timer +: +(?P<write_delay_secs>\d+) seconds$')
# Abort Timer : 300 seconds
p3 = re.compile(r'^Abort Timer +: +(?P<abort_timer_secs>\d+) seconds$')
# Agent Running : No
p4 = re.compile(r'^Agent Running +: +(?P<agent_running>\w+)$')
# Delay Timer Expiry : Not Running
p5 = re.compile(r'^Delay Timer Expiry +: +(?P<delay_timer_expiry>.+)$')
# Abort Timer Expiry : Not Running
p6 = re.compile(r'^Abort Timer Expiry +: +(?P<abort_timer_expiry>.+)$')
# Last Succeded Time : None
p7 = re.compile(r'^Last Succee?ded Time +: +(?P<last_succeeded_time>.+)$')
# Last Failed Time : None
p8 = re.compile(r'^Last Failed Time +: +(?P<last_failed_time>.+)$')
# Last Failed Reason : No failure recorded.
p9 = re.compile(r'^Last Failed Reason +: +(?P<last_failed_reason>[\w ]+)\.?$')
# Total Attempts : 0 Startup Failures : 0
p10 = re.compile(r'^Total Attempts +: +(?P<total_attempts>\d+) +Startup Failures +: +(?P<startup_failures>\d+)$')
# Successful Transfers : 0 Failed Transfers : 0
p11 = re.compile(r'^Successful Transfers +: +(?P<successful_transfers>\d+) +Failed Transfers +: +(?P<failed_transfers>\d+)$')
# Successful Reads : 0 Failed Reads : 0
p12 = re.compile(r'^Successful Reads +: +(?P<successful_reads>\d+) +Failed Reads +: +(?P<failed_reads>\d+)$')
# Successful Writes : 0 Failed Writes : 0
p13 = re.compile(r'^Successful Writes +: +(?P<successful_writes>\d+) +Failed Writes +: +(?P<failed_writes>\d+)$')
# Media Failures : 0
p14 = re.compile(r'^Media Failures +: +(?P<media_failures>\d+)$')
# First successful access: Read
p15 = re.compile(r'^First successful access *: +(?P<first_successful_access>\w+)$')
# Last ignored bindings counters :
p16 = re.compile(r'^Last ignored bindings counters *:$')
# Binding Collisions : 0 Expired leases : 0
p17 = re.compile(r'^Binding Collisions +: +(?P<binding_collisions>\d+) +Expired leases +: +(?P<expired_leases>\d+)$')
# Invalid interfaces : 0 Unsupported vlans : 0
p18 = re.compile(r'^Invalid interfaces +: +(?P<invalid_interfaces>\d+) +Unsupported vlans : +(?P<unsupported_vlans>\d+)$')
# Parse failures : 0
p19 = re.compile(r'^Parse failures +: +(?P<parse_failures>\d+)$')
# Last Ignored Time : None
p20 = re.compile(r'^Last Ignored Time +: +(?P<last_ignored_time>.+)$')
# Total ignored bindings counters :
p21 = re.compile(r'^Total ignored bindings counters *:$')
# Processes the matched patterns
for line in out.splitlines():
line.strip()
# Agent URL :
m = p1.match(line)
if m:
ret_dict['agent_url'] = m.groupdict()['agent_url']
continue
# Write delay Timer : 300 seconds
m = p2.match(line)
if m:
ret_dict['write_delay_secs'] = int(m.groupdict()['write_delay_secs'])
continue
# Abort Timer : 300 seconds
m = p3.match(line)
if m:
ret_dict['abort_timer_secs'] = int(m.groupdict()['abort_timer_secs'])
continue
# Agent Running : No
m = p4.match(line)
if m:
ret_dict['agent_running'] = m.groupdict()['agent_running']
continue
# Delay Timer Expiry : Not Running
m = p5.match(line)
if m:
ret_dict['delay_timer_expiry'] = m.groupdict()['delay_timer_expiry']
continue
# Abort Timer Expiry : Not Running
m = p6.match(line)
if m:
ret_dict['abort_timer_expiry'] = m.groupdict()['abort_timer_expiry']
continue
# Last Succeded Time : None
m = p7.match(line)
if m:
ret_dict['last_succeeded_time'] = m.groupdict()['last_succeeded_time']
continue
# Last Failed Time : None
m = p8.match(line)
if m:
ret_dict['last_failed_time'] = m.groupdict()['last_failed_time']
continue
# Last Failed Reason : No failure recorded.
m = p9.match(line)
if m:
ret_dict['last_failed_reason'] = m.groupdict()['last_failed_reason']
continue
# Total Attempts : 0 Startup Failures : 0
m = p10.match(line)
if m:
ret_dict['total_attempts'] = int(m.groupdict()['total_attempts'])
ret_dict['startup_failures'] = int(m.groupdict()['startup_failures'])
continue
# Successful Transfers : 0 Failed Transfers : 0
m = p11.match(line)
if m:
ret_dict['successful_transfers'] = int(m.groupdict()['successful_transfers'])
ret_dict['failed_transfers'] = int(m.groupdict()['failed_transfers'])
continue
# Successful Reads : 0 Failed Reads : 0
m = p12.match(line)
if m:
ret_dict['successful_reads'] = int(m.groupdict()['successful_reads'])
ret_dict['failed_reads'] = int(m.groupdict()['failed_reads'])
continue
# Successful Writes : 0 Failed Writes : 0
m = p13.match(line)
if m:
ret_dict['successful_writes'] = int(m.groupdict()['successful_writes'])
ret_dict['failed_writes'] = int(m.groupdict()['failed_writes'])
continue
# Media Failures : 0
m = p14.match(line)
if m:
ret_dict['media_failures'] = int(m.groupdict()['media_failures'])
continue
# First successful access: Read
m = p15.match(line)
if m:
detail_dict = ret_dict.setdefault('detail', {})
detail_dict['first_successful_access'] = m.groupdict()['first_successful_access']
continue
# Last ignored bindings counters :
m = p16.match(line)
if m:
bindings_dict = detail_dict.setdefault('last_ignored_bindings_counters', {})
continue
# Binding Collisions : 0 Expired leases : 0
m = p17.match(line)
if m:
bindings_dict['binding_collisions'] = int(m.groupdict()['binding_collisions'])
bindings_dict['expired_leases'] = int(m.groupdict()['expired_leases'])
continue
# Invalid interfaces : 0 Unsupported vlans : 0
m = p18.match(line)
if m:
bindings_dict['invalid_interfaces'] = int(m.groupdict()['invalid_interfaces'])
bindings_dict['unsupported_vlans'] = int(m.groupdict()['unsupported_vlans'])
continue
# Parse failures : 0
m = p19.match(line)
if m:
bindings_dict['parse_failures'] = int(m.groupdict()['parse_failures'])
continue
# Last Ignored Time : None
m = p20.match(line)
if m:
detail_dict['last_ignored_time'] = m.groupdict()['last_ignored_time']
continue
# Total ignored bindings counters :
m = p21.match(line)
if m:
bindings_dict = detail_dict.setdefault('total_ignored_bindings_counters', {})
continue
return ret_dict
# ===================================================
# Parser for 'show ip dhcp snooping database detail'
# ===================================================
class ShowIpDhcpSnoopingDatabaseDetail(ShowIpDhcpSnoopingDatabase):
"""
Parser for show ip dhcp snooping database detail
"""
cli_command = 'show ip dhcp snooping database detail'
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command)
return super().cli(output=output)
| 37.436585
| 133
| 0.485699
| 1,509
| 15,349
| 4.799867
| 0.109344
| 0.046942
| 0.040039
| 0.048046
| 0.405633
| 0.359105
| 0.228773
| 0.143863
| 0.116526
| 0.094988
| 0
| 0.018199
| 0.369926
| 15,349
| 410
| 134
| 37.436585
| 0.730741
| 0.223272
| 0
| 0.441176
| 0
| 0.02521
| 0.273369
| 0.074509
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012605
| false
| 0
| 0.016807
| 0
| 0.084034
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f84d7afc084777032cfb27a9f3d492736584d51d
| 1,051
|
py
|
Python
|
backend/flaskr/__init__.py
|
DakyungAndEunji/2021-ICE-Capstone-Project
|
71761bf66bd170eae48a8084331ed1d00f9c184b
|
[
"MIT"
] | 1
|
2021-05-11T04:08:58.000Z
|
2021-05-11T04:08:58.000Z
|
backend/flaskr/__init__.py
|
DakyungAndEunji/2021-ICE-Capstone-Project
|
71761bf66bd170eae48a8084331ed1d00f9c184b
|
[
"MIT"
] | 11
|
2021-04-06T15:22:47.000Z
|
2021-06-01T05:13:43.000Z
|
backend/flaskr/__init__.py
|
DakyungAndEunji/2021-ICE-Capstone-Project
|
71761bf66bd170eae48a8084331ed1d00f9c184b
|
[
"MIT"
] | null | null | null |
### flaskr/__init__.py
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_app(test_config = None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:toor!@localhost:3306/tps?charset=utf8'
app.config['SQLALCHEMY_ECHO'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.secret_key = 'manyrandombyte'
if test_config is None:
# Load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# Load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
db.init_app(app)
with app.app_context():
db.create_all()
from flaskr.view import productController
app.register_blueprint(productController.bp)
return app
| 26.948718
| 104
| 0.698382
| 137
| 1,051
| 5.153285
| 0.510949
| 0.063739
| 0.080737
| 0.065156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006039
| 0.212179
| 1,051
| 39
| 105
| 26.948718
| 0.846618
| 0.163654
| 0
| 0
| 0
| 0
| 0.170872
| 0.127294
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0.041667
| 0.166667
| 0
| 0.25
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f84fd6a36061acc80024ef6237230dcd9e8feabc
| 7,228
|
py
|
Python
|
backend/ec2.py
|
yubinhong/AutoAws
|
92a3be4ba4ed582536af9eeaf5b5fbd5cee1035d
|
[
"MIT"
] | 1
|
2020-02-21T07:40:46.000Z
|
2020-02-21T07:40:46.000Z
|
backend/ec2.py
|
yubinhong/AutoAws
|
92a3be4ba4ed582536af9eeaf5b5fbd5cee1035d
|
[
"MIT"
] | null | null | null |
backend/ec2.py
|
yubinhong/AutoAws
|
92a3be4ba4ed582536af9eeaf5b5fbd5cee1035d
|
[
"MIT"
] | null | null | null |
import boto3
import time
class AwsEc2(object):
def __init__(self, access_key, secret_key):
self.access_key = access_key
self.secret_key = secret_key
self.client = boto3.client(service_name='ec2', region_name="ap-northeast-1", aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key)
self.resource = boto3.resource(service_name='ec2', region_name="ap-northeast-1",
aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key)
def get_instance(self, vpc_id, servername):
res = self.client.describe_instances(
Filters=[
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
},
{
'Name': 'tag:Name',
'Values': [
servername
]
}
],
)
return res
def get_instance_by_resource(self, vpc_id):
instance_list = self.resource.instances.all()
res_list = []
for i in instance_list:
if i.vpc_id == vpc_id:
res_list.append(i)
return res_list
def get_vpc(self):
res = self.client.describe_vpcs()
return res
def get_subnet(self, vpc_id):
res = self.client.describe_subnets(
Filters=[
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
},
]
)
return res
def get_security_group(self, **kwargs):
filter_dict = {}
if len(kwargs.keys()) > 0:
for key in kwargs.keys():
if kwargs[key] != '':
filter_dict[key] = kwargs[key]
filter_list = [{'Name': key, 'Values': [value]} for key, value in filter_dict.items()]
res = self.client.describe_security_groups(
Filters=filter_list
)
else:
res = self.client.describe_security_groups()
return res
def create_security_group(self, name, vpc_id):
res = self.client.create_security_group(
Description=name,
GroupName=name,
VpcId=vpc_id,
)
return res
def security_group(self, name, vpc_id):
try:
res = self.create_security_group(name, vpc_id)
except Exception as e:
param_dict = {'group-name': name}
res = self.get_security_group(**param_dict)['SecurityGroups'][0]
return res
def modified_security_group(self, instance_id, groups):
try:
res = self.client.modify_instance_attribute(InstanceId=instance_id, Groups=groups)
result = {'code': 0, 'msg': res}
except Exception as e:
print(e)
result = {'code': 1, 'msg': str(e)}
return result
def create_instance_from_template(self, instance_template_list, vpc_id, subnet_id):
res_list = []
for instance_template in instance_template_list:
res1 = self.security_group(instance_template['name'], vpc_id)
res = self.resource.create_instances(
BlockDeviceMappings=[
{
'DeviceName': '/dev/sda1',
'Ebs': {
'DeleteOnTermination': False,
'VolumeSize': instance_template['disk'],
'VolumeType': 'gp2',
'Encrypted': False
}
},
],
ImageId=instance_template['image_id'],
InstanceType=instance_template['instance_type'],
KeyName=instance_template['key_name'],
NetworkInterfaces=[
{
'AssociatePublicIpAddress': True,
'DeleteOnTermination': True,
'DeviceIndex': 0,
'Groups': [
res1['GroupId'],
],
'SubnetId': subnet_id,
'InterfaceType': 'interface'
},
],
MaxCount=instance_template['count'],
MinCount=instance_template['count'],
)
for instance in res:
status = instance.state
while status['Code'] != 16:
time.sleep(6)
instance.load()
status = instance.state
if status['Code'] == 16:
instance.create_tags(
Tags=[{
'Key': 'Name',
'Value': instance_template['name']
}]
)
res_list.append(instance)
return res_list
def create_instance(self, instance_dict, vpc_id, subnet_id):
res1 = self.security_group(instance_dict['name'], vpc_id)
try:
res = self.resource.create_instances(
BlockDeviceMappings=[
{
'DeviceName': '/dev/sda1',
'Ebs': {
'DeleteOnTermination': False,
'VolumeSize': instance_dict['disk'],
'VolumeType': 'gp2',
'Encrypted': False
}
},
],
ImageId=instance_dict['image_id'],
InstanceType=instance_dict['instance_type'],
KeyName=instance_dict['key_name'],
NetworkInterfaces=[
{
'AssociatePublicIpAddress': True,
'DeleteOnTermination': True,
'DeviceIndex': 0,
'Groups': [
res1['GroupId'],
],
'SubnetId': subnet_id,
'InterfaceType': 'interface'
},
],
MaxCount=instance_dict['count'],
MinCount=instance_dict['count'],
)
except Exception as e:
result = {'code': 1, 'msg': str(e)}
return result
for instance in res:
status = instance.state
while status['Code'] != 16:
time.sleep(6)
instance.load()
status = instance.state
if status['Code'] == 16:
instance.create_tags(
Tags=[{
'Key': 'Name',
'Value': instance_dict['name']
}]
)
result = {'code': 0}
return result
if __name__ == "__main__":
ec2 = AwsEc2("", "")
res = ec2.get_instance_by_resource('xxxxxx')
for i in res:
print(i.placement)
| 34.419048
| 119
| 0.445075
| 598
| 7,228
| 5.140468
| 0.198997
| 0.027651
| 0.029603
| 0.034157
| 0.496096
| 0.447625
| 0.400781
| 0.353286
| 0.353286
| 0.333116
| 0
| 0.009439
| 0.457665
| 7,228
| 209
| 120
| 34.583732
| 0.774745
| 0
| 0
| 0.468421
| 0
| 0
| 0.088443
| 0.006644
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057895
| false
| 0
| 0.010526
| 0
| 0.131579
| 0.010526
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f851380879e61799e28a7ffd91239a32f370bf71
| 2,299
|
py
|
Python
|
control/voiceControl.py
|
Lluxent/CorporateClashUtility
|
36c5f724fb8e0050aab2b3a0bfb02c5b5d0c6272
|
[
"MIT"
] | 2
|
2021-03-08T02:30:58.000Z
|
2021-03-17T12:57:33.000Z
|
control/voiceControl.py
|
Lluxent/CorporateClashUtility
|
36c5f724fb8e0050aab2b3a0bfb02c5b5d0c6272
|
[
"MIT"
] | null | null | null |
control/voiceControl.py
|
Lluxent/CorporateClashUtility
|
36c5f724fb8e0050aab2b3a0bfb02c5b5d0c6272
|
[
"MIT"
] | null | null | null |
import control
import speech_recognition as sr
def recognize_speech_from_mic(recognizer, microphone):
"""Transcribe speech from recorded from `microphone`.
Returns a dictionary with three keys:
"success": a boolean indicating whether or not the API request was
successful
"error": `None` if no error occured, otherwise a string containing
an error message if the API could not be reached or
speech was unrecognizable
"transcription": `None` if speech could not be transcribed,
otherwise a string containing the transcribed text
"""
# check that recognizer and microphone arguments are appropriate type
if not isinstance(recognizer, sr.Recognizer):
raise TypeError("`recognizer` must be `Recognizer` instance")
if not isinstance(microphone, sr.Microphone):
raise TypeError("`microphone` must be `Microphone` instance")
# adjust the recognizer sensitivity to ambient noise and record audio
# from the microphone
with microphone as source:
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source)
# set up the response object
response = {
"success" : True,
"error" : None,
"transcription" : None
}
# try recognizing the speech in the recording
# if a RequestError or UnknownValueError exception is caught, update the response object accordingly
try:
response["transcription"] = recognizer.recognize_google(audio)
except sr.RequestError:
# API was unreachable or unresponsive
response["success"] = False
response["error"] = "API unavailable"
except sr.UnknownValueError:
# speech was unintelligible
response["error"] = "Unable to recognize speech"
return response
r = sr.Recognizer()
m = sr.Microphone()
while(True):
while(True):
print('Listening... ')
arg = recognize_speech_from_mic(r, m)
if arg["transcription"]:
break
if not arg["success"]:
break
if arg["error"]:
print('Error! {}'.format(arg["error"]))
pass
print('Heard: {}'.format(arg["transcription"]))
control.doAction(str.lower(arg["transcription"]))
| 33.808824
| 104
| 0.653763
| 255
| 2,299
| 5.85098
| 0.423529
| 0.030161
| 0.025469
| 0.029491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.260983
| 2,299
| 68
| 105
| 33.808824
| 0.878164
| 0.373206
| 0
| 0.105263
| 0
| 0
| 0.19181
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0.026316
| 0.052632
| 0
| 0.105263
| 0.078947
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f85295b6cbccfde4504d51121948d6ed5ff3e3c4
| 6,721
|
py
|
Python
|
lookatweb/rules/objects.py
|
ivbeg/lookatweb
|
b98e3ebd29c00e2f718c3392bb31b7202aa82a99
|
[
"BSD-3-Clause"
] | 2
|
2018-01-18T13:22:29.000Z
|
2018-02-03T13:10:20.000Z
|
lookatweb/rules/objects.py
|
ivbeg/lookatweb
|
b98e3ebd29c00e2f718c3392bb31b7202aa82a99
|
[
"BSD-3-Clause"
] | null | null | null |
lookatweb/rules/objects.py
|
ivbeg/lookatweb
|
b98e3ebd29c00e2f718c3392bb31b7202aa82a99
|
[
"BSD-3-Clause"
] | null | null | null |
from .consts import *
# Object matching by classid
OBJECTS_CLSID_RULES = [
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:D27CDB6E-AE6D-11cf-96B8-444553540000',
'entities' : [
{'name' : 'web:tech/flash'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:d27cdb6e-ae6d-11cf-96b8-444553540000',
'entities' : [
{'name' : 'web:tech/flash'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:-D27CDB6E-AE6D-11cf-96B8-444553540000',
'entities' : [
{'name' : 'web:tech/flash'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95',
'entities' : [
{'name' : 'web:tech:activex/wmplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95',
'entities' : [
{'name' : 'web:tech:activex/wmplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:22D6F312-B0F6-11D0-94AB-0080C74C7E95',
'entities' : [
{'name' : 'web:tech:activex/wmplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:6BF52A52-394A-11D3-B153-00C04F79FAA6',
'entities' : [
{'name' : 'web:tech:activex/wmplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'CLSID:CFCDAA03-8BE4-11cf-B84B-0020AFBBCCFA',
'entities' : [
{'name' : 'web:tech:activex/realplayer'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'clsid:CFCDAA03-8BE4-11cf-B84B-0020AFBBCCFA',
'entities' : [
{'name' : 'web:tech:activex/realplayer'}
]
},
]
# match object tags by type
OBJECTS_TYPE_RULES = [
{'type' : RULETYPE_EQUAL, 'text' : 'application/x-silverlight-2',
'entities' : [
{'name' : 'web:tech/silverlight'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'application/x-shockwave-flash',
'entities' : [
{'name' : 'web:tech/flash'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'application/x-oleobject',
'entities' : [
{'name' : 'web:tech/activex'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'image/svg+xml',
'entities' : [
{'name' : 'web:tech/svg'}
]
},
]
# match object tags by data
OBJECTS_DATA_RULES = [
{'type' : RULETYPE_REGEXP, 'text' : '^http://img\.yandex\.net/i/time/clock\.swf',
'entities' : [
{'name' : 'web:widgets:clock/yandexclock'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://vimeo\.com',
'entities' : [
{'name' : 'web:media:video/vimeo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www\.youtube\.com',
'entities' : [
{'name' : 'web:media:video/youtube'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://cdn\.last\.fm/widgets/chart',
'entities' : [
{'name' : 'web:widgets:audio/lastfm'}
]
},
]
# match object tags by embed src
EMBED_SRC_RULES = [
{'type' : RULETYPE_REGEXP, 'text' : '^http://img\.mail\.ru/r/video2/player_v2\.swf',
'entities' : [
{'name' : 'web:media:video/mailru'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://flv\.video\.yandex\.ru',
'entities' : [
{'name' : 'web:media:video/yandex'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://img\.gismeteo\.ru/flash',
'entities' : [
{'name' : 'web:widgets:meteo/gismeteo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www\.clocklink\.com/clocks/',
'entities' : [
{'name' : 'web:widgets:time/clocklink'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'http://iii.ru/static/Vishnu.swf',
'entities' : [
{'name' : 'web:widgets:chat/iiiru'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://[a-z0-9]{1,3}\.videos\.sapo\.pt/play',
'entities' : [
{'name' : 'web:media:video/sapovideos'}
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'http://pub.tvigle.ru/swf/tvigle_single_v2.swf',
'entities' : [
{'name' : 'web:media:video/twigle'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://rpod\.ru/i/b/listen_240x400_01/core\.swf',
'entities' : [
{'name' : 'web:media:audio/rpodru'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://vision\.rambler\.ru/i/e\.swf',
'entities' : [
{'name' : 'web:media:video/ramblervision'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://pics\.smotri\.com/scrubber_custom8\.swf',
'entities' : [
{'name' : 'web:media:video/smotricom'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www\.russia\.ru/player/main\.swf',
'entities' : [
{'name' : 'web:media:video/russiaru'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://video\.google\.(com|ru|ca|de)/googleplayer.swf',
'entities' : [
{'name' : 'web:media:video/googlevideo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www\.youtube\.com/v/',
'entities' : [
{'name' : 'web:media:video/youtube'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^/bitrix/templates/',
'entities' : [
{'name' : 'web:cms/bitrix'},
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^/bitrix/components/',
'entities' : [
{'name' : 'web:cms/bitrix'},
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://developer\.truveo\.com/apps/listWidget',
'entities' : [
{'name' : 'web:media:video/truveo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://pics\.rbc\.ru/informer',
'entities' : [
{'name' : 'web:widgets:fin/rbcinformer'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://video\.rutube\.ru',
'entities' : [
{'name' : 'web:media:video/rutube'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://static\.twitter\.com/flash/widgets/profile/TwitterWidget\.swf',
'entities' : [
{'name' : 'web:widgets:blog/twitter'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://vimeo\.com/moogaloop.swf',
'entities' : [
{'name' : 'web:media:video/vimeo'}
]
},
{'type' : RULETYPE_REGEXP, 'text' : '^http://www.1tv.ru/(n|p)video',
'entities' : [
{'name' : 'web:media:video/1tvru'}
]
},
]
| 30.139013
| 112
| 0.494867
| 639
| 6,721
| 5.123631
| 0.247261
| 0.085522
| 0.174099
| 0.154551
| 0.69609
| 0.571167
| 0.431888
| 0.372022
| 0.357056
| 0.343311
| 0
| 0.040803
| 0.296236
| 6,721
| 222
| 113
| 30.274775
| 0.651374
| 0.016218
| 0
| 0.288557
| 0
| 0
| 0.456101
| 0.175901
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004975
| 0
| 0.004975
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f856a06399d0483aa5762d750435935c90b3dd55
| 6,020
|
py
|
Python
|
src/failprint/cli.py
|
pawamoy/woof
|
5c8eccfe5c1343b5a399b5794c486b3c0de67c78
|
[
"0BSD"
] | 6
|
2020-10-14T07:22:31.000Z
|
2022-02-13T23:17:56.000Z
|
src/failprint/cli.py
|
pawamoy/woof
|
5c8eccfe5c1343b5a399b5794c486b3c0de67c78
|
[
"0BSD"
] | 10
|
2020-04-29T12:29:43.000Z
|
2021-07-31T10:35:36.000Z
|
src/failprint/cli.py
|
pawamoy/woof
|
5c8eccfe5c1343b5a399b5794c486b3c0de67c78
|
[
"0BSD"
] | 1
|
2021-08-07T03:23:41.000Z
|
2021-08-07T03:23:41.000Z
|
# Why does this file exist, and why not put this in `__main__`?
#
# You might be tempted to import things from `__main__` later,
# but that will cause problems: the code will get executed twice:
#
# - When you run `python -m failprint` python will execute
# `__main__.py` as a script. That means there won't be any
# `failprint.__main__` in `sys.modules`.
# - When you import `__main__` it will get executed again (as a module) because
# there's no `failprint.__main__` in `sys.modules`.
"""Module that contains the command line application."""
import argparse
from typing import List, Optional, Sequence
from failprint.capture import Capture
from failprint.formats import accept_custom_format, formats
from failprint.runners import run
class ArgParser(argparse.ArgumentParser):
"""A custom argument parser with a helper method to add boolean flags."""
def add_bool_argument(
self,
truthy: Sequence[str],
falsy: Sequence[str],
truthy_help: str = "",
falsy_help: str = "",
**kwargs,
) -> None:
"""
Add a boolean flag/argument to the parser.
Arguments:
truthy: Values that will store true in the destination.
falsy: Values that will store false in the destination.
truthy_help: Help for the truthy arguments.
falsy_help: Help for the falsy arguments.
**kwargs: Remaining keyword arguments passed to `argparse.ArgumentParser.add_argument`.
"""
truthy_kwargs = {**kwargs, "help": truthy_help, "action": "store_true"}
falsy_kwargs = {**kwargs, "help": falsy_help, "action": "store_false"}
mxg = self.add_mutually_exclusive_group()
mxg.add_argument(*truthy, **truthy_kwargs) # type: ignore # mypy is confused by arguments position
mxg.add_argument(*falsy, **falsy_kwargs) # type: ignore
def add_flags(parser, set_defaults=True) -> ArgParser:
"""
Add some boolean flags to the parser.
We made this method separate and public
for its use in [duty](https://github.com/pawamoy/duty).
Arguments:
parser: The parser to add flags to.
set_defaults: Whether to set default values on arguments.
Returns:
The augmented parser.
"""
# IMPORTANT: the arguments destinations should match
# the parameters names of the failprint.runners.run function.
# As long as names are consistent between the two,
# it's very easy to pass CLI args to the function,
# and it also allows to avoid duplicating the parser arguments
# in dependent projects like duty (https://github.com/pawamoy/duty) :)
parser.add_argument(
"-c",
"--capture",
choices=list(Capture),
type=Capture,
help="Which output to capture. Colors are supported with 'both' only, unless the command has a 'force color' option.",
)
parser.add_argument(
"-f",
"--fmt",
"--format",
dest="fmt",
choices=formats.keys(),
type=accept_custom_format,
default=None,
help="Output format. Pass your own Jinja2 template as a string with '-f custom=TEMPLATE'. "
"Available variables: command, title (command or title passed with -t), code (exit status), "
"success (boolean), failure (boolean), number (command number passed with -n), "
"output (command output), nofail (boolean), quiet (boolean), silent (boolean). "
"Available filters: indent (textwrap.indent).",
)
parser.add_bool_argument(
["-y", "--pty"],
["-Y", "--no-pty"],
dest="pty",
default=True if set_defaults else None,
truthy_help="Enable the use of a pseudo-terminal. PTY doesn't allow programs to use standard input.",
falsy_help="Disable the use of a pseudo-terminal. PTY doesn't allow programs to use standard input.",
)
parser.add_bool_argument(
["-p", "--progress"],
["-P", "--no-progress"],
dest="progress",
default=True if set_defaults else None,
truthy_help="Print progress while running a command.",
falsy_help="Don't print progress while running a command.",
)
parser.add_bool_argument(
["-q", "--quiet"],
["-Q", "--no-quiet"],
dest="quiet",
default=False if set_defaults else None,
truthy_help="Don't print the command output, even if it failed.",
falsy_help="Print the command output when it fails.",
)
parser.add_bool_argument(
["-s", "--silent"],
["-S", "--no-silent"],
dest="silent",
default=False if set_defaults else None,
truthy_help="Don't print anything.",
falsy_help="Print output as usual.",
)
parser.add_bool_argument(
["-z", "--zero", "--nofail"],
["-Z", "--no-zero", "--strict"],
dest="nofail",
default=False if set_defaults else None,
truthy_help="Don't fail. Always return a success (0) exit code.",
falsy_help="Return the original exit code.",
)
return parser
def get_parser() -> ArgParser:
"""
Return the CLI argument parser.
Returns:
An argparse parser.
"""
parser = add_flags(ArgParser(prog="failprint"))
parser.add_argument("-n", "--number", type=int, default=1, help="Command number. Useful for the 'tap' format.")
parser.add_argument("-t", "--title", help="Command title. Default is the command itself.")
parser.add_argument("cmd", metavar="COMMAND", nargs="+")
return parser
def main(args: Optional[List[str]] = None) -> int:
"""
Run the main program.
This function is executed when you type `failprint` or `python -m failprint`.
Arguments:
args: Arguments passed from the command line.
Returns:
An exit code.
"""
parser = get_parser()
opts = parser.parse_args(args).__dict__.items() # noqa: WPS609
return run(**{_: value for _, value in opts if value is not None}).code
| 36.707317
| 126
| 0.635382
| 770
| 6,020
| 4.849351
| 0.315584
| 0.026513
| 0.024103
| 0.02812
| 0.143278
| 0.129888
| 0.096679
| 0.096679
| 0.096679
| 0.074183
| 0
| 0.001324
| 0.247342
| 6,020
| 163
| 127
| 36.932515
| 0.822776
| 0.318106
| 0
| 0.152174
| 0
| 0.01087
| 0.333846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0.032609
| 0.054348
| 0
| 0.141304
| 0.076087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f85a24e0d9a829e5ba4097a173e5c180ffe2795f
| 1,410
|
py
|
Python
|
Summarizing-Data-with-statistics-/code.py
|
Tushar23dhongade/ga-learner-dsmp-repo
|
cf5550a36d2f5d3a91940d7ac8a245d5040cd9d1
|
[
"MIT"
] | null | null | null |
Summarizing-Data-with-statistics-/code.py
|
Tushar23dhongade/ga-learner-dsmp-repo
|
cf5550a36d2f5d3a91940d7ac8a245d5040cd9d1
|
[
"MIT"
] | null | null | null |
Summarizing-Data-with-statistics-/code.py
|
Tushar23dhongade/ga-learner-dsmp-repo
|
cf5550a36d2f5d3a91940d7ac8a245d5040cd9d1
|
[
"MIT"
] | null | null | null |
# --------------
#Header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#path of the data file- path
data=pd.read_csv(path)
data["Gender"].replace("-","Agender",inplace=True)
gender_count=data.Gender.value_counts()
gender_count.plot(kind="bar")
#Code starts here
# --------------
#Code starts here
alignment=data.Alignment.value_counts()
plt.pie(alignment,labels=["good","bad","newutral"])
# --------------
#Code starts here
sc_df=data[["Strength","Combat"]]
sc_covariance=sc_df.cov().iloc[0,1]
sc_strength=sc_df.Strength.std()
sc_combat=sc_df.Combat.std()
sc_pearson=sc_covariance/(sc_strength*sc_combat)
print(sc_pearson)
ic_df=data[["Intelligence","Combat"]]
ic_covariance=ic_df.cov().iloc[0,1]
ic_intelligence=ic_df.Intelligence.std()
ic_combat=ic_df.Combat.std()
ic_pearson=ic_covariance/(ic_intelligence*ic_combat)
print(ic_pearson)
# --------------
#Code starts here
total_high=data.Total.quantile(0.99)
super_best=data[data.Total>total_high]
super_best_names=list(super_best.Name)
print(super_best_names)
# --------------
#Code starts here
Intelligence, ax_1 = plt.subplots()
ax_1.boxplot(data.Intelligence)
ax_1.set_title('Intelligence')
Speed, ax_2 = plt.subplots()
ax_2.boxplot(data.Speed)
ax_2.set_title('Speed')
Power, ax_3 = plt.subplots()
ax_3.boxplot(data.Power)
ax_3.set_title('Power')
| 20.434783
| 53
| 0.698582
| 212
| 1,410
| 4.415094
| 0.330189
| 0.053419
| 0.074786
| 0.021368
| 0.023504
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012821
| 0.114894
| 1,410
| 68
| 54
| 20.735294
| 0.737179
| 0.137589
| 0
| 0
| 0
| 0
| 0.074588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.088235
| 0
| 0.088235
| 0.088235
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f85c11db5b31e7e4088a63d0697d91e4986e3c85
| 6,962
|
py
|
Python
|
soc/python/checkDB.py
|
idea-fasoc/fasoc
|
5a1fc8cf980b24a48b17f4447f13fb50d49e366a
|
[
"MIT"
] | 48
|
2019-09-16T09:49:54.000Z
|
2022-02-09T20:59:10.000Z
|
soc/python/checkDB.py
|
idea-fasoc/fasoc
|
5a1fc8cf980b24a48b17f4447f13fb50d49e366a
|
[
"MIT"
] | 18
|
2019-10-15T04:17:35.000Z
|
2021-05-25T00:12:52.000Z
|
soc/python/checkDB.py
|
idea-fasoc/fasoc
|
5a1fc8cf980b24a48b17f4447f13fb50d49e366a
|
[
"MIT"
] | 8
|
2019-10-15T17:27:41.000Z
|
2022-01-26T20:42:07.000Z
|
#!/usr/bin/env python3
#MIT License
#Copyright (c) 2018 The University of Michigan
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import shutil
import os
import json # json parsing
import zipfile
import sys
from modifyDBFiles import modifyDBFiles
def checkDB(moduleJson,databaseDir,outputDir,ipXactDir,module_number,designName):
genJson = moduleJson['generator']
searchDir = os.path.join(databaseDir,'JSN',genJson)
excluded_name = ['LDO_CONTROLLER','decoder_3to8','mux_8to1','ANALOG_CORE','bu_dco_8stg','dco_8stg','dco_10drv_10cc_30fc_18stg','dco_CC','dco_FC','DCO_MODEL','FUNCTIONS','PLL_CONTROLLER','PLL_CONTROLLER_TDC_COUNTER','SSC_GENERATOR','synth_dco','synth_pll_dco_interp','synth_pll_dco_outbuff','TB_synth_pll','TDC_COUNTER','test_synth_pll','counter','TEMP_ANALOG.nl','TEMP_ANALOG_test.nl','TEMP_AUTO_def','tempsenseInst']
if 'specifications' in moduleJson:
target_specsJson = moduleJson['specifications']
if os.path.exists(searchDir):
if len(os.listdir(searchDir)) != 0:
for file in os.listdir(searchDir):
overlap_tag = True
with open(os.path.join(searchDir,file), 'r') as search_file:
srchJson = json.load(search_file)
if 'specifications' in srchJson:
srch_specifications= srchJson['specifications']
for target_specName, target_specVal in target_specsJson.items():
if target_specVal != "" and isinstance(target_specVal, str) != True:
if target_specName in srch_specifications:
srch_specVal = srch_specifications[target_specName]
if srch_specVal != "" and isinstance(srch_specVal, str) != True:
if isinstance(target_specVal, dict):
if "min" in target_specVal:
if isinstance(srch_specVal, dict):
if srch_specVal["min"] < target_specVal["min"]:
overlap_tag = False
break
else:
if srch_specVal < target_specVal["min"]:
overlap_tag = False
break
if "max" in target_specVal:
if isinstance(srch_specVal, dict):
if srch_specVal["max"] > target_specVal["max"]:
overlap_tag = False
break
else:
if srch_specVal > target_specVal["max"]:
overlap_tag = False
break
else:
if "min" in target_specName:
if isinstance(srch_specVal, dict):
if srch_specVal["min"] < target_specVal:
overlap_tag = False
break
else:
if srch_specVal < target_specVal:
overlap_tag = False
break
elif "max" in target_specName:
if isinstance(srch_specVal, dict):
if srch_specVal["max"] > target_specVal:
overlap_tag = False
break
else:
if srch_specVal > target_specVal:
overlap_tag = False
break
else:
if isinstance(srch_specVal, dict):
if srch_specVal["min"] != target_specVal:
overlap_tag = False
break
if srch_specVal["max"] != target_specVal:
overlap_tag = False
break
else:
if srch_specVal != target_specVal:
overlap_tag = False
break
if overlap_tag:
found_Filename = os.path.join(databaseDir,'ZIP',(file.split('.'))[0]+'.zip')
if os.path.exists(found_Filename):
print(moduleJson['module_name'] + " has been found at the database")
zip_ref = zipfile.ZipFile(found_Filename, 'r')
zip_ref.extractall(outputDir)
zip_ref.close()
for output_file in os.listdir(outputDir):
output_file_name = (output_file.split('.'))[0]
postfix = (output_file.split(output_file_name))[-1]
if (not postfix == '.v') or (postfix == '.v' and output_file_name not in excluded_name):
os.rename(os.path.join(outputDir,output_file),os.path.join(outputDir,moduleJson['module_name'] + postfix))
modifyDBFiles(os.path.join(outputDir,moduleJson['module_name'] + postfix),postfix,moduleJson['module_name'],srchJson["module_name"])
return True
else:#When there is no zipfile it means search was unsuccessfull
return False
return False# when code reaches here it means it could not find the correct file
else:#if the database is empty => search was unsuccessfull
return False
else:#If database does not exist it means search was unsuccessfull
return False
else:#If the target file has no specification, all files are acceptable
if os.path.exists(searchDir):
if len(os.listdir(searchDir)) != 0:
with open(os.path.join(searchDir,os.listdir(searchDir)[0]), 'r') as search_file:
srchJson = json.load(search_file)
found_Filename = os.path.join(databaseDir,'ZIP',(os.listdir(searchDir)[0].split('.'))[0]+'.zip')
if os.path.exists(found_Filename):
print(moduleJson['module_name'] + " has been found at the database")
zip_ref = zipfile.ZipFile(found_Filename, 'r')
zip_ref.extractall(outputDir)
zip_ref.close()
for output_file in os.listdir(outputDir):
output_file_name = (output_file.split('.'))[0]
postfix = (output_file.split(output_file_name))[-1]
if (not postfix == '.v') or (postfix == '.v' and output_file_name not in excluded_name):
os.rename(os.path.join(outputDir,output_file),os.path.join(outputDir,moduleJson['module_name'] + postfix))
modifyDBFiles(os.path.join(outputDir,moduleJson['module_name'] + postfix),postfix,moduleJson['module_name'],srchJson["module_name"])
return True
else:#When there is no zipfile it means search was unsuccessfull
return False
else:#if the database is empty => search was unsuccessfull
return False
else:#If database does not exist it means search was unsuccessfull
return False
| 44.063291
| 418
| 0.668342
| 894
| 6,962
| 5.042506
| 0.250559
| 0.046362
| 0.034605
| 0.048802
| 0.547915
| 0.547471
| 0.535492
| 0.512866
| 0.511979
| 0.487578
| 0
| 0.005441
| 0.234415
| 6,962
| 158
| 419
| 44.063291
| 0.840338
| 0.225223
| 0
| 0.65
| 0
| 0
| 0.118666
| 0.013413
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008333
| false
| 0
| 0.05
| 0
| 0.133333
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f85e27ad10e7814b11be2c93c0c4dca76deac4ea
| 2,222
|
py
|
Python
|
Piquant/Debug/script/matlplotlib_pyplot实操代码.py
|
QuantPengPeng/Piquant
|
88047831a3ce4eb5b67fc68c752243084ba90199
|
[
"MIT"
] | 9
|
2019-04-07T06:17:50.000Z
|
2021-07-11T14:31:36.000Z
|
Piquant/Debug/script/matlplotlib_pyplot实操代码.py
|
QuantPengPeng/Piquant
|
88047831a3ce4eb5b67fc68c752243084ba90199
|
[
"MIT"
] | 1
|
2019-05-17T01:57:07.000Z
|
2019-11-19T01:57:05.000Z
|
Piquant/Debug/script/matlplotlib_pyplot实操代码.py
|
QuantPengPeng/Piquant
|
88047831a3ce4eb5b67fc68c752243084ba90199
|
[
"MIT"
] | 6
|
2019-04-15T07:17:26.000Z
|
2019-08-04T02:55:36.000Z
|
# coding: utf-8
# In[35]:
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
main_image=plt.figure(figsize=(10,10))
subplots_adjust(hspace=0.3,wspace=0.3)#控制子图间的行间距、列间距
#子图1-单线
x_0=np.linspace(0,2*np.pi,20) #自变量X的取值范围
sub_image_1=plt.subplot(2,2,1)
plt.xlabel('X value')
plt.ylabel('Sin value')
plt.grid(True)
sub_image_1.plot(x_0, np.sin(x), 'r--o',label='Sin(x)')
sub_image_1.legend()#展示图例
sub_image_1.annotate('sin wave', xy=(3,0.25), xytext=(4,0.5), arrowprops=dict(facecolor='black',shrink=0.05))#特定文本注释
sub_image_1.set_title('Sin Waves')
#子图2-多线
x_1=np.linspace(0,2*np.pi,20)
sub_image_2=plt.subplot(2,2,2)
plt.xlabel('X value')
plt.ylabel('Cos and Sin value')
plt.grid(True)
sub_image_2.plot(x_1, np.cos(x), color='blue', linestyle='--',linewidth=1, marker='o', markerfacecolor='red', markersize='6', label='Cos(x)')
sub_image_2.plot(x_1, np.sin(x), color='green', linestyle='-.', linewidth=3, marker='^', markerfacecolor='yellow', markersize='8', label='Sin(x)')
sub_image_2.legend()
sub_image_2.set_title('Cos and Sin Waves')
#子图3-直方图
bins_count=10
mu,sigma=100,20
x_hist=mu+sigma*np.random.randn(1000,1)#randn用于生成符合标准正态分布的包含1000个元素的列序列
sub_image_3=plt.subplot(2,2,3)
plt.xlabel('value')
plt.ylabel('count')
plt.grid(False)
tuple_return=sub_image_3.hist(x_hist, bins=bins_count, facecolor='red', alpha=0.8, edgecolor='black',normed=0)#normed=0画频数直方图,normed=1画频率直方图
sub_image_3.set_title('Frequency Histogram')
plt.xlim((floor(x_hist.min()),ceil(x_hist.max())))
bar_width=(x_hist.max()-x_hist.min())/bins_count
plt.xticks(np.arange(floor(x_hist.min()),ceil(x_hist.max()),round(bar_width)))#刻度设置
for i in range(bins_count):
sub_image_3.text(x_hist.min()+(bar_width*i)+(bar_width/2), tuple_return[0][i], str(tuple_return[0][i]), horizontalalignment='center', verticalalignment='bottom')
#子图3-分段函数
x_part_1=np.linspace(-10,-1,10)#分段函数的离散取值
x_part_2=np.linspace(0,10,11)
sub_image_4=plt.subplot(2,2,4)
plt.xlabel('X value')
plt.ylabel('Y value')
plt.grid(False)
sub_image_4.plot(x_part_1,x_part_1*2+1,'b--o',label='y=2x+1')
sub_image_4.plot(x_part_2,x_part_2**2,'r--o',label='y=x^2')
sub_image_4.legend()
sub_image_4.set_title('PieceWise Function')
#展示
plt.show()
| 32.676471
| 165
| 0.729973
| 425
| 2,222
| 3.625882
| 0.317647
| 0.098637
| 0.029202
| 0.031149
| 0.19987
| 0.177807
| 0.107722
| 0.032446
| 0
| 0
| 0
| 0.057377
| 0.066607
| 2,222
| 67
| 166
| 33.164179
| 0.685632
| 0.077408
| 0
| 0.145833
| 0
| 0
| 0.111713
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f85f1ff5fdc55f6eaa86305ff1243afdf2c3c231
| 7,624
|
py
|
Python
|
colour/models/rgb.py
|
canavandl/colour
|
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
|
[
"BSD-3-Clause"
] | 1
|
2019-06-27T11:32:48.000Z
|
2019-06-27T11:32:48.000Z
|
colour/models/rgb.py
|
canavandl/colour
|
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/rgb.py
|
canavandl/colour
|
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
RGB Colourspace Transformations
===============================
Defines the *RGB* colourspace transformations:
- :func:`XYZ_to_RGB`
- :func:`RGB_to_XYZ`
- :func:`RGB_to_RGB`
See Also
--------
`RGB Colourspaces IPython Notebook
<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/rgb.ipynb>`_ # noqa
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.models import xy_to_XYZ
from colour.adaptation import chromatic_adaptation_matrix
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['XYZ_to_RGB',
'RGB_to_XYZ',
'RGB_to_RGB']
def XYZ_to_RGB(XYZ,
illuminant_XYZ,
illuminant_RGB,
to_RGB,
chromatic_adaptation_method='CAT02',
transfer_function=None):
"""
Converts from *CIE XYZ* colourspace to *RGB* colourspace using given
*CIE XYZ* colourspace matrix, *illuminants*, *chromatic adaptation* method,
*normalised primary matrix* and *transfer function*.
Parameters
----------
XYZ : array_like, (3,)
*CIE XYZ* colourspace matrix.
illuminant_XYZ : array_like
*CIE XYZ* colourspace *illuminant* *xy* chromaticity coordinates.
illuminant_RGB : array_like
*RGB* colourspace *illuminant* *xy* chromaticity coordinates.
to_RGB : array_like, (3, 3)
*Normalised primary matrix*.
chromatic_adaptation_method : unicode, optional
('XYZ Scaling', 'Bradford', 'Von Kries', 'Fairchild', 'CAT02')
*Chromatic adaptation* method.
transfer_function : object, optional
*Transfer function*.
Returns
-------
ndarray, (3,)
*RGB* colourspace matrix.
Notes
-----
- Input *CIE XYZ* colourspace matrix is in domain [0, 1].
- Input *illuminant_XYZ* *xy* chromaticity coordinates are in domain
[0, 1].
- Input *illuminant_RGB* *xy* chromaticity coordinates are in domain
[0, 1].
- Output *RGB* colourspace matrix is in domain [0, 1].
Examples
--------
>>> XYZ = np.array([0.1151847498, 0.1008, 0.0508937252])
>>> illuminant_XYZ = (0.34567, 0.35850)
>>> illuminant_RGB = (0.31271, 0.32902)
>>> chromatic_adaptation_method = 'Bradford'
>>> to_RGB = np.array([
... [3.24100326, -1.53739899, -0.49861587],
... [-0.96922426, 1.87592999, 0.04155422],
... [0.05563942, -0.2040112, 1.05714897]])
>>> XYZ_to_RGB(
... XYZ,
... illuminant_XYZ,
... illuminant_RGB,
... to_RGB,
... chromatic_adaptation_method) # doctest: +ELLIPSIS
array([ 0.1730350..., 0.0821103..., 0.0567249...])
"""
np.array([
[3.24100326, -1.53739899, -0.49861587],
[-0.96922426, 1.87592999, 0.04155422],
[0.05563942, -0.2040112, 1.05714897]])
cat = chromatic_adaptation_matrix(xy_to_XYZ(illuminant_XYZ),
xy_to_XYZ(illuminant_RGB),
method=chromatic_adaptation_method)
adapted_XYZ = np.dot(cat, XYZ)
RGB = np.dot(to_RGB.reshape((3, 3)), adapted_XYZ.reshape((3, 1)))
if transfer_function is not None:
RGB = np.array([transfer_function(x) for x in np.ravel(RGB)])
return np.ravel(RGB)
def RGB_to_XYZ(RGB,
illuminant_RGB,
illuminant_XYZ,
to_XYZ,
chromatic_adaptation_method='CAT02',
inverse_transfer_function=None):
"""
Converts from *RGB* colourspace to *CIE XYZ* colourspace using given
*RGB* colourspace matrix, *illuminants*, *chromatic adaptation* method,
*normalised primary matrix* and *transfer function*.
Parameters
----------
RGB : array_like, (3,)
*RGB* colourspace matrix.
illuminant_RGB : array_like
*RGB* colourspace *illuminant* chromaticity coordinates.
illuminant_XYZ : array_like
*CIE XYZ* colourspace *illuminant* chromaticity coordinates.
to_XYZ : array_like, (3, 3)
*Normalised primary matrix*.
chromatic_adaptation_method : unicode, optional
('XYZ Scaling', 'Bradford', 'Von Kries', 'Fairchild', 'CAT02')
*Chromatic adaptation* method.
inverse_transfer_function : object, optional
*Inverse transfer function*.
Returns
-------
ndarray, (3,)
*CIE XYZ* colourspace matrix.
Notes
-----
- Input *RGB* colourspace matrix is in domain [0, 1].
- Input *illuminant_RGB* *xy* chromaticity coordinates are in domain
[0, 1].
- Input *illuminant_XYZ* *xy* chromaticity coordinates are in domain
[0, 1].
- Output *CIE XYZ* colourspace matrix is in domain [0, 1].
Examples
--------
>>> RGB = np.array([0.17303501, 0.08211033, 0.05672498])
>>> illuminant_RGB = (0.31271, 0.32902)
>>> illuminant_XYZ = (0.34567, 0.35850)
>>> chromatic_adaptation_method = 'Bradford'
>>> to_XYZ = np.array([
... [0.41238656, 0.35759149, 0.18045049],
... [0.21263682, 0.71518298, 0.0721802],
... [0.01933062, 0.11919716, 0.95037259]])
>>> RGB_to_XYZ(
... RGB,
... illuminant_RGB,
... illuminant_XYZ,
... to_XYZ,
... chromatic_adaptation_method) # doctest: +ELLIPSIS
array([ 0.1151847..., 0.1008 , 0.0508937...])
"""
if inverse_transfer_function is not None:
RGB = np.array([inverse_transfer_function(x)
for x in np.ravel(RGB)])
XYZ = np.dot(to_XYZ.reshape((3, 3)), RGB.reshape((3, 1)))
cat = chromatic_adaptation_matrix(
xy_to_XYZ(illuminant_RGB),
xy_to_XYZ(illuminant_XYZ),
method=chromatic_adaptation_method)
adapted_XYZ = np.dot(cat, XYZ.reshape((3, 1)))
return np.ravel(adapted_XYZ)
def RGB_to_RGB(RGB,
input_colourspace,
output_colourspace,
chromatic_adaptation_method='CAT02'):
"""
Converts from given input *RGB* colourspace to output *RGB* colourspace
using given *chromatic adaptation* method.
Parameters
----------
RGB : array_like, (3,)
*RGB* colourspace matrix.
input_colourspace : RGB_Colourspace
*RGB* input colourspace.
output_colourspace : RGB_Colourspace
*RGB* output colourspace.
chromatic_adaptation_method : unicode, optional
('XYZ Scaling', 'Bradford', 'Von Kries', 'Fairchild', 'CAT02')
*Chromatic adaptation* method.
ndarray, (3,)
*RGB* colourspace matrix.
Notes
-----
- *RGB* colourspace matrices are in domain [0, 1].
Examples
--------
>>> from colour import sRGB_COLOURSPACE, PROPHOTO_RGB_COLOURSPACE
>>> RGB = np.array([0.35521588, 0.41, 0.24177934])
>>> RGB_to_RGB(
... RGB,
... sRGB_COLOURSPACE,
... PROPHOTO_RGB_COLOURSPACE) # doctest: +ELLIPSIS
array([ 0.3579334..., 0.4007138..., 0.2615704...])
"""
cat = chromatic_adaptation_matrix(
xy_to_XYZ(input_colourspace.whitepoint),
xy_to_XYZ(output_colourspace.whitepoint),
chromatic_adaptation_method)
trs_matrix = np.dot(output_colourspace.to_RGB,
np.dot(cat, input_colourspace.to_XYZ))
return np.dot(trs_matrix, RGB)
| 31.766667
| 115
| 0.613458
| 852
| 7,624
| 5.2723
| 0.188967
| 0.097284
| 0.105744
| 0.020036
| 0.62333
| 0.503117
| 0.466162
| 0.42854
| 0.342832
| 0.307881
| 0
| 0.08627
| 0.251967
| 7,624
| 239
| 116
| 31.899582
| 0.701385
| 0.60585
| 0
| 0.206897
| 0
| 0
| 0.091166
| 0.012505
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0
| 0.068966
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f85f4b7c7b491177a0f091a1844ac24655fff102
| 1,768
|
py
|
Python
|
tests/assign_folds_test.py
|
turku-rad-ai/pe-detection
|
d9b49800de45a40030db72db65f4806b23d97a63
|
[
"Apache-2.0"
] | null | null | null |
tests/assign_folds_test.py
|
turku-rad-ai/pe-detection
|
d9b49800de45a40030db72db65f4806b23d97a63
|
[
"Apache-2.0"
] | null | null | null |
tests/assign_folds_test.py
|
turku-rad-ai/pe-detection
|
d9b49800de45a40030db72db65f4806b23d97a63
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
import pandas as pd
import pytest
from preprocessing.assign_folds import assign_folds
testdata = [
[
[
"patient1",
"patient2",
"patient3",
"patient4",
"patient5",
"patient6",
"patient7",
"patient8",
"patient9",
"patient1", # second 1
"patient3", # second 3
"patient10",
],
[
"image1.dcm",
"image2.dcm",
"image3.dcm",
"image4.dcm",
"image5.dcm",
"image6.dcm",
"image7.dcm",
"image8.dcm",
"image9.dcm",
"image10.dcm",
"image11.dcm",
"image12.dcm",
],
[1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1],
3,
]
]
@pytest.mark.parametrize("patient_ids,dcm_filenames,dataset_labels,folds", testdata)
def test_assign_folds(
patient_ids: List[str],
dcm_filenames: List[str],
dataset_labels: List[int],
folds: int,
):
data = {
"PatientID": patient_ids,
"dcm_filename": dcm_filenames,
"dataset_label": dataset_labels,
}
df = pd.DataFrame(data=data)
df = assign_folds(df, fold_count=folds)
# pat_fold - column must have been added
assert "pat_fold" in df.columns
# Check that folds are on proper range
assert df["pat_fold"].min() == 0
assert df["pat_fold"].max() == folds - 1
# Test that each patient belongs to one and only one fold
assert min([item.shape[0] for item in list(df.groupby("PatientID")["pat_fold"].unique())]) == 1
assert max([item.shape[0] for item in list(df.groupby("PatientID")["pat_fold"].unique())]) == 1
| 24.901408
| 99
| 0.526584
| 200
| 1,768
| 4.54
| 0.43
| 0.046256
| 0.006608
| 0.008811
| 0.121145
| 0.121145
| 0.121145
| 0.121145
| 0.121145
| 0.121145
| 0
| 0.041916
| 0.338801
| 1,768
| 70
| 100
| 25.257143
| 0.734816
| 0.084276
| 0
| 0.105263
| 0
| 0
| 0.221947
| 0.028518
| 0
| 0
| 0
| 0
| 0.087719
| 1
| 0.017544
| false
| 0
| 0.070175
| 0
| 0.087719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f86011337ef051c071ef0fd89e5bf4792bb54439
| 1,116
|
py
|
Python
|
tests/test_main.py
|
dadaloop82/viseron
|
1c6c446a4856e16c0e2ed6b9323d169fbdcae20f
|
[
"MIT"
] | 399
|
2020-08-31T21:13:07.000Z
|
2022-03-31T18:54:26.000Z
|
tests/test_main.py
|
dadaloop82/viseron
|
1c6c446a4856e16c0e2ed6b9323d169fbdcae20f
|
[
"MIT"
] | 157
|
2020-09-01T18:59:56.000Z
|
2022-03-25T07:14:19.000Z
|
tests/test_main.py
|
dadaloop82/viseron
|
1c6c446a4856e16c0e2ed6b9323d169fbdcae20f
|
[
"MIT"
] | 53
|
2020-09-01T07:35:59.000Z
|
2022-03-28T23:21:16.000Z
|
"""Tests for __main__.py."""
# import logging
from unittest.mock import MagicMock, patch
import pytest
import viseron.__main__
@pytest.fixture
def mocked_viseron(mocker):
"""Mock Viseron class."""
mocker.patch("viseron.__main__.Viseron", return_value="Testing")
def test_init(simple_config, mocked_viseron):
"""Test init."""
viseron.__main__.main()
# viseron.__main__.LOGGER.info("testing")
with patch.object(viseron.__main__, "main", MagicMock()) as mock_main:
with patch.object(viseron.__main__, "__name__", "__main__"):
viseron.__main__.init()
mock_main.assert_called_once()
# class TestMyFormatter:
# """Tests for class MyFormatter."""
# def test_format(self):
# """Test formatter."""
# formatter = viseron.__main__.MyFormatter()
# record = logging.makeLogRecord(
# {
# "name": "test_logger",
# "level": 10,
# "pathname": "test_main.py",
# "msg": "Testing, message repeated 2 times",
# }
# )
# formatter.format(record)
| 27.219512
| 74
| 0.606631
| 113
| 1,116
| 5.495575
| 0.433628
| 0.141707
| 0.048309
| 0.070853
| 0.083736
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003597
| 0.252688
| 1,116
| 40
| 75
| 27.9
| 0.741007
| 0.503584
| 0
| 0
| 0
| 0
| 0.097514
| 0.045889
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.166667
| false
| 0
| 0.25
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|